aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.2.1-5666.3/gcc/config
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.2.1-5666.3/gcc/config')
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/README5
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/README-interworking742
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/_fixdfdi.c84
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/_fixsfdi.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/_fixunsdfdi.c64
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/_fixunssfdi.c41
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/aof.h348
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/aout.h354
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm-cores.def123
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm-generic.md152
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm-modes.def70
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm-protos.h275
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm-tune.md6
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm.c23901
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm.h3196
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm.md11975
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm.opt187
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm1020e.md388
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm1026ejs.md241
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm1136jfs.md387
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm926ejs.md188
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/arm_neon.h12180
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/bpabi.S120
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/bpabi.c61
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/bpabi.h118
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/cirrus.md604
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/coff.h88
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/constraints.md281
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8-neon.md1308
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8.md275
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/cortex-r4.md289
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/crti.asm84
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/crtn.asm79
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/darwin.h384
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/darwin.opt9
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/ecos-elf.h28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/elf.h159
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/fpa.md945
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/freebsd.h68
-rwxr-xr-xgcc-4.2.1-5666.3/gcc/config/arm/gentune.sh12
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/host-arm-darwin.c33
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/hwdiv.md42
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/ieee754-df.S1869
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/ieee754-sf.S1398
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/iwmmxt.md1411
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/kaos-arm.h24
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/kaos-strongarm.h24
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/lib1funcs.asm1695
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/libgcc-bpabi.ver83
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/libgcc-iphoneos.ver38
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/libunwind.S238
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/linux-eabi.h85
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/linux-elf.h106
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/linux-gas.h55
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/mmintrin.h1257
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/neon-docgen.ml323
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/neon-gen.ml424
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/neon-schedgen.ml498
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/neon-testgen.ml274
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/neon.md4917
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/neon.ml1827
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/netbsd-elf.h158
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/netbsd.h153
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/pe.c269
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/pe.h149
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/pe.opt24
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/pr-support.c409
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/predicates.md589
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/rtems-elf.h46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/semi.h76
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/semiaof.h40
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/strongarm-coff.h28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/strongarm-elf.h30
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/strongarm-pe.h23
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/symbian.h101
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/sync.md192
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-arm28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-arm-coff34
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-arm-elf92
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-bpabi16
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-darwin75
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-linux15
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-linux-eabi14
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-netbsd28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-pe33
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-rtems10
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-semi37
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-slibgcc-iphoneos64
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-elf44
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-pe38
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-symbian32
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-vxworks10
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-wince-pe37
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-coff45
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-elf66
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/thumb2.md1164
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/uclinux-elf.h74
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/unaligned-funcs.c62
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/unknown-elf.h97
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.c1238
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.h271
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/vec-common.md108
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/vfp.md1068
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/vfp11.md94
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/vxworks.h95
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/wince-pe.h27
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/x-darwin7
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/xscale-coff.h34
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/arm/xscale-elf.h59
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/asm.h89
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/chorus.h41
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-64.c77
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-c.c1250
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-crt2.c158
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-crt3.c537
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-driver.c213
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-protos.h147
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin-sections.def140
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin.c2565
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin.h1429
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin.opt77
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin10.h2
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/darwin9.h13
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/dbx.h28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/dbxcoff.h63
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/dbxelf.h64
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/dfp-bit.c541
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/dfp-bit.h513
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/divmod.c50
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/elfos.h498
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/floatunsidf.c15
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/floatunsisf.c18
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/floatunsitf.c15
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/floatunsixf.c15
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/fp-bit.c1678
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/fp-bit.h543
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/freebsd-nthr.h22
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/freebsd-spec.h146
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/freebsd.h87
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/gnu.h28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/gofast.h81
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/host-darwin.c82
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/host-darwin.h28
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/host-hpux.c136
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/host-linux.c219
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/host-solaris.c79
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/ammintrin.h106
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/athlon.md874
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/biarch64.h25
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/constraints.md171
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.4.ver81
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.5.ver85
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/darwin.h452
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/darwin.opt15
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/darwin64.h43
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/driver-i386.c300
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/emmintrin.h1981
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/gmm_malloc.h77
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/gstabs.h7
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/host-i386-darwin.c31
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/i386-modes.def97
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/i386-protos.h261
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/i386.c23515
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/i386.h3230
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/i386.md21399
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/i386.opt262
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/k6.md268
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/lib1funcs.asm30
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/mm3dnow.h220
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/mmintrin.h1219
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/mmx.md1470
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/nmmintrin.h41
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/pentium.md312
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/pmmintrin.h172
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/ppro.md763
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/predicates.md1037
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/smmintrin.h836
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/sse.md6218
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/sync.md291
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/t-darwin22
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/t-darwin6412
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/t-gmm_malloc6
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/tmmintrin.h304
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/x-darwin8
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/x-darwin-x86_648
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/x-i3863
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/i386/xmmintrin.h1582
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/interix.h121
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/interix3.h35
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/kaos.h31
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/kfreebsd-gnu.h38
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/knetbsd-gnu.h39
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/libgcc-glibc.ver23
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/libgloss.h38
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/linux.h129
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/linux.opt29
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/lynx.h178
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/lynx.opt32
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/memcmp.c16
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/memcpy.c12
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/memmove.c20
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/memset.c11
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/netbsd-aout.h206
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/netbsd-elf.h94
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/netbsd.h227
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/openbsd-oldgas.h23
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/openbsd.h311
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/ptx4.h223
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/40x.md119
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/440.md133
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/603.md142
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/6xx.md274
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/7450.md184
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/7xx.md183
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/8540.md249
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.h464
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.md2351
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/builtin.ops297
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/constraints.md162
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-asm.h61
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fallback.c471
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fpsave.asm102
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble-format84
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble.c438
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.4.ver76
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.5.ver89
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-tramp.asm135
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-unwind.h35
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-vecsave.asm165
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-world.asm269
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.h605
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.md532
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.opt31
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin64.h36
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin7.h31
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/darwin8.h33
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/host-darwin.c157
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/host-ppc64-darwin.c31
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/libgcc-ppc64.ver7
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/mpc.md110
-rwxr-xr-xgcc-4.2.1-5666.3/gcc/config/rs6000/ops-to-gp620
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/power4.md410
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/power5.md321
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/ppc64-fp.c243
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/predicates.md1319
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rios1.md190
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rios2.md128
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-c.c2738
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-modes.def47
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-protos.h185
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.c22252
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.h3520
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.md14451
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.opt262
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/rs64.md153
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/spe.md2814
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/sync.md625
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin83
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/t-rs600022
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/vec.h4515
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/vec.ops1025
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin4
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin644
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/rtems.h44
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/README4
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/adddf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/addsf3.c50
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/addtf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/divdf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/divsf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/divtf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/double.h264
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/eqdf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/eqsf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/eqtf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/extenddftf2.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/extended.h431
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsfdf2.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsftf2.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfdi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfsi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfdi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfsi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfdi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfsi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfdi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfsi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfdi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfsi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfdi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfsi.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdidf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdisf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatditf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsidf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsisf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsitf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundidf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundisf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunditf.c47
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsidf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsisf.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsitf.c47
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/gedf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/gesf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/getf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/ledf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/lesf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/letf2.c51
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/muldf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/mulsf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/multf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/negdf2.c48
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/negsf2.c48
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/negtf2.c48
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/op-1.h302
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/op-2.h617
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/op-4.h688
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/op-8.h111
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/op-common.h1359
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/quad.h271
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/single.h151
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/soft-fp.h209
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/subdf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/subsf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/subtf3.c49
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/t-softfp108
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/truncdfsf2.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfdf2.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfsf2.c54
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/unorddf2.c45
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/unordsf2.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/soft-fp/unordtf2.c46
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/sol2-10.h24
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/sol2-6.h27
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/sol2-c.c272
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/sol2-protos.h24
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/sol2.c120
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/sol2.h245
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/svr3.h161
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/svr4.h212
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-darwin53
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-dfprules10
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-freebsd5
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-freebsd-thread2
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-gnu2
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-libc-ok1
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-libgcc-pic2
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-libunwind12
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-libunwind-elf31
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-linux13
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-lynx15
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-netbsd2
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-openbsd2
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-openbsd-thread5
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-rtems7
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-slibgcc-darwin104
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-slibgcc-elf-ver36
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-slibgcc-nolc-override1
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-slibgcc-sld32
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-sol212
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-svr411
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/t-vxworks27
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/tm-dwarf2.h4
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/udivmod.c14
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/udivmodsi4.c24
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/usegas.h21
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/vx-common.h88
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/vxlib.c386
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/vxworks.h103
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/vxworks.opt32
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/vxworksae.h58
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/windiss.h38
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/x-darwin3
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/x-hpux4
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/x-interix3
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/x-linux4
-rw-r--r--gcc-4.2.1-5666.3/gcc/config/x-solaris4
379 files changed, 238781 insertions, 0 deletions
diff --git a/gcc-4.2.1-5666.3/gcc/config/README b/gcc-4.2.1-5666.3/gcc/config/README
new file mode 100644
index 000000000..60328ec5b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/README
@@ -0,0 +1,5 @@
+This directory contains machine-specific files for the GNU C compiler.
+It has a subdirectory for each basic CPU type.
+The only files in this directory itself
+are some .h files that pertain to particular operating systems
+and are used for more than one CPU type.
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/README-interworking b/gcc-4.2.1-5666.3/gcc/config/arm/README-interworking
new file mode 100644
index 000000000..d221e1555
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/README-interworking
@@ -0,0 +1,742 @@
+ Arm / Thumb Interworking
+ ========================
+
+The Cygnus GNU Pro Toolkit for the ARM7T processor supports function
+calls between code compiled for the ARM instruction set and code
+compiled for the Thumb instruction set and vice versa. This document
+describes how that interworking support operates and explains the
+command line switches that should be used in order to produce working
+programs.
+
+Note: The Cygnus GNU Pro Toolkit does not support switching between
+compiling for the ARM instruction set and the Thumb instruction set
+on anything other than a per file basis. There are in fact two
+completely separate compilers, one that produces ARM assembler
+instructions and one that produces Thumb assembler instructions. The
+two compilers share the same assembler, linker and so on.
+
+
+1. Explicit interworking support for C and C++ files
+====================================================
+
+By default if a file is compiled without any special command line
+switches then the code produced will not support interworking.
+Provided that a program is made up entirely from object files and
+libraries produced in this way and which contain either exclusively
+ARM instructions or exclusively Thumb instructions then this will not
+matter and a working executable will be created. If an attempt is
+made to link together mixed ARM and Thumb object files and libraries,
+then warning messages will be produced by the linker and a non-working
+executable will be created.
+
+In order to produce code which does support interworking it should be
+compiled with the
+
+ -mthumb-interwork
+
+command line option. Provided that a program is made up entirely from
+object files and libraries built with this command line switch a
+working executable will be produced, even if both ARM and Thumb
+instructions are used by the various components of the program. (No
+warning messages will be produced by the linker either).
+
+Note that specifying -mthumb-interwork does result in slightly larger,
+slower code being produced. This is why interworking support must be
+specifically enabled by a switch.
+
+
+2. Explicit interworking support for assembler files
+====================================================
+
+If assembler files are to be included into an interworking program
+then the following rules must be obeyed:
+
+ * Any externally visible functions must return by using the BX
+ instruction.
+
+ * Normal function calls can just use the BL instruction. The
+ linker will automatically insert code to switch between ARM
+ and Thumb modes as necessary.
+
+ * Calls via function pointers should use the BX instruction if
+ the call is made in ARM mode:
+
+ .code 32
+ mov lr, pc
+ bx rX
+
+ This code sequence will not work in Thumb mode however, since
+ the mov instruction will not set the bottom bit of the lr
+ register. Instead a branch-and-link to the _call_via_rX
+ functions should be used instead:
+
+ .code 16
+ bl _call_via_rX
+
+ where rX is replaced by the name of the register containing
+ the function address.
+
+ * All externally visible functions which should be entered in
+ Thumb mode must have the .thumb_func pseudo op specified just
+ before their entry point. e.g.:
+
+ .code 16
+ .global function
+ .thumb_func
+ function:
+ ...start of function....
+
+ * All assembler files must be assembled with the switch
+ -mthumb-interwork specified on the command line. (If the file
+ is assembled by calling gcc it will automatically pass on the
+ -mthumb-interwork switch to the assembler, provided that it
+ was specified on the gcc command line in the first place.)
+
+
+3. Support for old, non-interworking aware code.
+================================================
+
+If it is necessary to link together code produced by an older,
+non-interworking aware compiler, or code produced by the new compiler
+but without the -mthumb-interwork command line switch specified, then
+there are two command line switches that can be used to support this.
+
+The switch
+
+ -mcaller-super-interworking
+
+will allow calls via function pointers in Thumb mode to work,
+regardless of whether the function pointer points to old,
+non-interworking aware code or not. Specifying this switch does
+produce slightly slower code however.
+
+Note: There is no switch to allow calls via function pointers in ARM
+mode to be handled specially. Calls via function pointers from
+interworking aware ARM code to non-interworking aware ARM code work
+without any special considerations by the compiler. Calls via
+function pointers from interworking aware ARM code to non-interworking
+aware Thumb code however will not work. (Actually under some
+circumstances they may work, but there are no guarantees). This is
+because only the new compiler is able to produce Thumb code, and this
+compiler already has a command line switch to produce interworking
+aware code.
+
+
+The switch
+
+ -mcallee-super-interworking
+
+will allow non-interworking aware ARM or Thumb code to call Thumb
+functions, either directly or via function pointers. Specifying this
+switch does produce slightly larger, slower code however.
+
+Note: There is no switch to allow non-interworking aware ARM or Thumb
+code to call ARM functions. There is no need for any special handling
+of calls from non-interworking aware ARM code to interworking aware
+ARM functions, they just work normally. Calls from non-interworking
+aware Thumb functions to ARM code however, will not work. There is no
+option to support this, since it is always possible to recompile the
+Thumb code to be interworking aware.
+
+As an alternative to the command line switch
+-mcallee-super-interworking, which affects all externally visible
+functions in a file, it is possible to specify an attribute or
+declspec for individual functions, indicating that that particular
+function should support being called by non-interworking aware code.
+The function should be defined like this:
+
+ int __attribute__((interfacearm)) function
+ {
+ ... body of function ...
+ }
+
+or
+
+ int __declspec(interfacearm) function
+ {
+ ... body of function ...
+ }
+
+
+
+4. Interworking support in dlltool
+==================================
+
+It is possible to create DLLs containing mixed ARM and Thumb code. It
+is also possible to call Thumb code in a DLL from an ARM program and
+vice versa. It is even possible to call ARM DLLs that have been compiled
+without interworking support (say by an older version of the compiler),
+from Thumb programs and still have things work properly.
+
+ A version of the `dlltool' program which supports the `--interwork'
+command line switch is needed, as well as the following special
+considerations when building programs and DLLs:
+
+*Use `-mthumb-interwork'*
+ When compiling files for a DLL or a program the `-mthumb-interwork'
+ command line switch should be specified if calling between ARM and
+ Thumb code can happen. If a program is being compiled and the
+ mode of the DLLs that it uses is not known, then it should be
+ assumed that interworking might occur and the switch used.
+
+*Use `-m thumb'*
+ If the exported functions from a DLL are all Thumb encoded then the
+ `-m thumb' command line switch should be given to dlltool when
+ building the stubs. This will make dlltool create Thumb encoded
+ stubs, rather than its default of ARM encoded stubs.
+
+ If the DLL consists of both exported Thumb functions and exported
+ ARM functions then the `-m thumb' switch should not be used.
+ Instead the Thumb functions in the DLL should be compiled with the
+ `-mcallee-super-interworking' switch, or with the `interfacearm'
+ attribute specified on their prototypes. In this way they will be
+ given ARM encoded prologues, which will work with the ARM encoded
+ stubs produced by dlltool.
+
+*Use `-mcaller-super-interworking'*
+ If it is possible for Thumb functions in a DLL to call
+ non-interworking aware code via a function pointer, then the Thumb
+ code must be compiled with the `-mcaller-super-interworking'
+ command line switch. This will force the function pointer calls
+ to use the _interwork_call_via_rX stub functions which will
+ correctly restore Thumb mode upon return from the called function.
+
+*Link with `libgcc.a'*
+ When the dll is built it may have to be linked with the GCC
+ library (`libgcc.a') in order to extract the _call_via_rX functions
+ or the _interwork_call_via_rX functions. This represents a partial
+ redundancy since the same functions *may* be present in the
+ application itself, but since they only take up 372 bytes this
+ should not be too much of a consideration.
+
+*Use `--support-old-code'*
+ When linking a program with an old DLL which does not support
+ interworking, the `--support-old-code' command line switch to the
+ linker should be used. This causes the linker to generate special
+ interworking stubs which can cope with old, non-interworking aware
+ ARM code, at the cost of generating bulkier code. The linker will
+ still generate a warning message along the lines of:
+ "Warning: input file XXX does not support interworking, whereas YYY does."
+ but this can now be ignored because the --support-old-code switch
+ has been used.
+
+
+
+5. How interworking support works
+=================================
+
+Switching between the ARM and Thumb instruction sets is accomplished
+via the BX instruction which takes as an argument a register name.
+Control is transfered to the address held in this register (with the
+bottom bit masked out), and if the bottom bit is set, then Thumb
+instruction processing is enabled, otherwise ARM instruction
+processing is enabled.
+
+When the -mthumb-interwork command line switch is specified, gcc
+arranges for all functions to return to their caller by using the BX
+instruction. Thus provided that the return address has the bottom bit
+correctly initialized to indicate the instruction set of the caller,
+correct operation will ensue.
+
+When a function is called explicitly (rather than via a function
+pointer), the compiler generates a BL instruction to do this. The
+Thumb version of the BL instruction has the special property of
+setting the bottom bit of the LR register after it has stored the
+return address into it, so that a future BX instruction will correctly
+return the instruction after the BL instruction, in Thumb mode.
+
+The BL instruction does not change modes itself however, so if an ARM
+function is calling a Thumb function, or vice versa, it is necessary
+to generate some extra instructions to handle this. This is done in
+the linker when it is storing the address of the referenced function
+into the BL instruction. If the BL instruction is an ARM style BL
+instruction, but the referenced function is a Thumb function, then the
+linker automatically generates a calling stub that converts from ARM
+mode to Thumb mode, puts the address of this stub into the BL
+instruction, and puts the address of the referenced function into the
+stub. Similarly if the BL instruction is a Thumb BL instruction, and
+the referenced function is an ARM function, the linker generates a
+stub which converts from Thumb to ARM mode, puts the address of this
+stub into the BL instruction, and the address of the referenced
+function into the stub.
+
+This is why it is necessary to mark Thumb functions with the
+.thumb_func pseudo op when creating assembler files. This pseudo op
+allows the assembler to distinguish between ARM functions and Thumb
+functions. (The Thumb version of GCC automatically generates these
+pseudo ops for any Thumb functions that it generates).
+
+Calls via function pointers work differently. Whenever the address of
+a function is taken, the linker examines the type of the function
+being referenced. If the function is a Thumb function, then it sets
+the bottom bit of the address. Technically this makes the address
+incorrect, since it is now one byte into the start of the function,
+but this is never a problem because:
+
+ a. with interworking enabled all calls via function pointer
+ are done using the BX instruction and this ignores the
+ bottom bit when computing where to go to.
+
+ b. the linker will always set the bottom bit when the address
+ of the function is taken, so it is never possible to take
+ the address of the function in two different places and
+ then compare them and find that they are not equal.
+
+As already mentioned any call via a function pointer will use the BX
+instruction (provided that interworking is enabled). The only problem
+with this is computing the return address for the return from the
+called function. For ARM code this can easily be done by the code
+sequence:
+
+ mov lr, pc
+ bx rX
+
+(where rX is the name of the register containing the function
+pointer). This code does not work for the Thumb instruction set,
+since the MOV instruction will not set the bottom bit of the LR
+register, so that when the called function returns, it will return in
+ARM mode not Thumb mode. Instead the compiler generates this
+sequence:
+
+ bl _call_via_rX
+
+(again where rX is the name if the register containing the function
+pointer). The special call_via_rX functions look like this:
+
+ .thumb_func
+_call_via_r0:
+ bx r0
+ nop
+
+The BL instruction ensures that the correct return address is stored
+in the LR register and then the BX instruction jumps to the address
+stored in the function pointer, switch modes if necessary.
+
+
+6. How caller-super-interworking support works
+==============================================
+
+When the -mcaller-super-interworking command line switch is specified
+it changes the code produced by the Thumb compiler so that all calls
+via function pointers (including virtual function calls) now go via a
+different stub function. The code to call via a function pointer now
+looks like this:
+
+ bl _interwork_call_via_r0
+
+Note: The compiler does not insist that r0 be used to hold the
+function address. Any register will do, and there are a suite of stub
+functions, one for each possible register. The stub functions look
+like this:
+
+ .code 16
+ .thumb_func
+_interwork_call_via_r0
+ bx pc
+ nop
+
+ .code 32
+ tst r0, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx r0
+
+The stub first switches to ARM mode, since it is a lot easier to
+perform the necessary operations using ARM instructions. It then
+tests the bottom bit of the register containing the address of the
+function to be called. If this bottom bit is set then the function
+being called uses Thumb instructions and the BX instruction to come
+will switch back into Thumb mode before calling this function. (Note
+that it does not matter how this called function chooses to return to
+its caller, since the both the caller and callee are Thumb functions,
+and mode switching is necessary). If the function being called is an
+ARM mode function however, the stub pushes the return address (with
+its bottom bit set) onto the stack, replaces the return address with
+the address of the a piece of code called '_arm_return' and then
+performs a BX instruction to call the function.
+
+The '_arm_return' code looks like this:
+
+ .code 32
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+
+It simply retrieves the return address from the stack, and then
+performs a BX operation to return to the caller and switch back into
+Thumb mode.
+
+
+7. How callee-super-interworking support works
+==============================================
+
+When -mcallee-super-interworking is specified on the command line the
+Thumb compiler behaves as if every externally visible function that it
+compiles has had the (interfacearm) attribute specified for it. What
+this attribute does is to put a special, ARM mode header onto the
+function which forces a switch into Thumb mode:
+
+ without __attribute__((interfacearm)):
+
+ .code 16
+ .thumb_func
+ function:
+ ... start of function ...
+
+ with __attribute__((interfacearm)):
+
+ .code 32
+ function:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .thumb_func
+ .real_start_of_function:
+
+ ... start of function ...
+
+Note that since the function now expects to be entered in ARM mode, it
+no longer has the .thumb_func pseudo op specified for its name.
+Instead the pseudo op is attached to a new label .real_start_of_<name>
+(where <name> is the name of the function) which indicates the start
+of the Thumb code. This does have the interesting side effect in that
+if this function is now called from a Thumb mode piece of code
+outside of the current file, the linker will generate a calling stub
+to switch from Thumb mode into ARM mode, and then this is immediately
+overridden by the function's header which switches back into Thumb
+mode.
+
+In addition the (interfacearm) attribute also forces the function to
+return by using the BX instruction, even if has not been compiled with
+the -mthumb-interwork command line flag, so that the correct mode will
+be restored upon exit from the function.
+
+
+8. Some examples
+================
+
+ Given these two test files:
+
+ int arm (void) { return 1 + thumb (); }
+
+ int thumb (void) { return 2 + arm (); }
+
+ The following pieces of assembler are produced by the ARM and Thumb
+version of GCC depending upon the command line options used:
+
+ `-O2':
+ .code 32 .code 16
+ .global _arm .global _thumb
+ .thumb_func
+ _arm: _thumb:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, pc} pop {pc}
+
+ Note how the functions return without using the BX instruction. If
+these files were assembled and linked together they would fail to work
+because they do not change mode when returning to their caller.
+
+ `-O2 -mthumb-interwork':
+
+ .code 32 .code 16
+ .global _arm .global _thumb
+ .thumb_func
+ _arm: _thumb:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, lr} pop {r1}
+ bx lr bx r1
+
+ Now the functions use BX to return their caller. They have grown by
+4 and 2 bytes respectively, but they can now successfully be linked
+together and be expect to work. The linker will replace the
+destinations of the two BL instructions with the addresses of calling
+stubs which convert to the correct mode before jumping to the called
+function.
+
+ `-O2 -mcallee-super-interworking':
+
+ .code 32 .code 32
+ .global _arm .global _thumb
+ _arm: _thumb:
+ orr r12, pc, #1
+ bx r12
+ mov ip, sp .code 16
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, lr} pop {r1}
+ bx lr bx r1
+
+ The thumb function now has an ARM encoded prologue, and it no longer
+has the `.thumb-func' pseudo op attached to it. The linker will not
+generate a calling stub for the call from arm() to thumb(), but it will
+still have to generate a stub for the call from thumb() to arm(). Also
+note how specifying `--mcallee-super-interworking' automatically
+implies `-mthumb-interworking'.
+
+
+9. Some Function Pointer Examples
+=================================
+
+ Given this test file:
+
+ int func (void) { return 1; }
+
+ int call (int (* ptr)(void)) { return ptr (); }
+
+ The following varying pieces of assembler are produced by the Thumb
+version of GCC depending upon the command line options used:
+
+ `-O2':
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __call_via_r0
+ pop {pc}
+
+ Note how the two functions have different exit sequences. In
+particular call() uses pop {pc} to return, which would not work if the
+caller was in ARM mode. func() however, uses the BX instruction, even
+though `-mthumb-interwork' has not been specified, as this is the most
+efficient way to exit a function when the return address is held in the
+link register.
+
+ `-O2 -mthumb-interwork':
+
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ This time both functions return by using the BX instruction. This
+means that call() is now two bytes longer and several cycles slower
+than the previous version.
+
+ `-O2 -mcaller-super-interworking':
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __interwork_call_via_r0
+ pop {pc}
+
+ Very similar to the first (non-interworking) version, except that a
+different stub is used to call via the function pointer. This new stub
+will work even if the called function is not interworking aware, and
+tries to return to call() in ARM mode. Note that the assembly code for
+call() is still not interworking aware itself, and so should not be
+called from ARM code.
+
+ `-O2 -mcallee-super-interworking':
+
+ .code 32
+ .globl _func
+ _func:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_func
+ .thumb_func
+ .real_start_of_func:
+ mov r0, #1
+ bx lr
+
+ .code 32
+ .globl _call
+ _call:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_call
+ .thumb_func
+ .real_start_of_call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ Now both functions have an ARM coded prologue, and both functions
+return by using the BX instruction. These functions are interworking
+aware therefore and can safely be called from ARM code. The code for
+the call() function is now 10 bytes longer than the original, non
+interworking aware version, an increase of over 200%.
+
+ If a prototype for call() is added to the source code, and this
+prototype includes the `interfacearm' attribute:
+
+ int __attribute__((interfacearm)) call (int (* ptr)(void));
+
+ then this code is produced (with only -O2 specified on the command
+line):
+
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .code 32
+ _call:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_call
+ .thumb_func
+ .real_start_of_call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ So now both call() and func() can be safely called via
+non-interworking aware ARM code. If, when such a file is assembled,
+the assembler detects the fact that call() is being called by another
+function in the same file, it will automatically adjust the target of
+the BL instruction to point to .real_start_of_call. In this way there
+is no need for the linker to generate a Thumb-to-ARM calling stub so
+that call can be entered in ARM mode.
+
+
+10. How to use dlltool to build ARM/Thumb DLLs
+==============================================
+ Given a program (`prog.c') like this:
+
+ extern int func_in_dll (void);
+
+ int main (void) { return func_in_dll(); }
+
+ And a DLL source file (`dll.c') like this:
+
+ int func_in_dll (void) { return 1; }
+
+ Here is how to build the DLL and the program for a purely ARM based
+environment:
+
+*Step One
+ Build a `.def' file describing the DLL:
+
+ ; example.def
+ ; This file describes the contents of the DLL
+ LIBRARY example
+ HEAPSIZE 0x40000, 0x2000
+ EXPORTS
+ func_in_dll 1
+
+*Step Two
+ Compile the DLL source code:
+
+ arm-pe-gcc -O2 -c dll.c
+
+*Step Three
+ Use `dlltool' to create an exports file and a library file:
+
+ dlltool --def example.def --output-exp example.o --output-lib example.a
+
+*Step Four
+ Link together the complete DLL:
+
+ arm-pe-ld dll.o example.o -o example.dll
+
+*Step Five
+ Compile the program's source code:
+
+ arm-pe-gcc -O2 -c prog.c
+
+*Step Six
+ Link together the program and the DLL's library file:
+
+ arm-pe-gcc prog.o example.a -o prog
+
+ If instead this was a Thumb DLL being called from an ARM program, the
+steps would look like this. (To save space only those steps that are
+different from the previous version are shown):
+
+*Step Two
+ Compile the DLL source code (using the Thumb compiler):
+
+ thumb-pe-gcc -O2 -c dll.c -mthumb-interwork
+
+*Step Three
+ Build the exports and library files (and support interworking):
+
+ dlltool -d example.def -z example.o -l example.a --interwork -m thumb
+
+*Step Five
+ Compile the program's source code (and support interworking):
+
+ arm-pe-gcc -O2 -c prog.c -mthumb-interwork
+
+ If instead, the DLL was an old, ARM DLL which does not support
+interworking, and which cannot be rebuilt, then these steps would be
+used.
+
+*Step One
+ Skip. If you do not have access to the sources of a DLL, there is
+ no point in building a `.def' file for it.
+
+*Step Two
+ Skip. With no DLL sources there is nothing to compile.
+
+*Step Three
+ Skip. Without a `.def' file you cannot use dlltool to build an
+ exports file or a library file.
+
+*Step Four
+ Skip. Without a set of DLL object files you cannot build the DLL.
+ Besides it has already been built for you by somebody else.
+
+*Step Five
+ Compile the program's source code, this is the same as before:
+
+ arm-pe-gcc -O2 -c prog.c
+
+*Step Six
+ Link together the program and the DLL's library file, passing the
+ `--support-old-code' option to the linker:
+
+ arm-pe-gcc prog.o example.a -Wl,--support-old-code -o prog
+
+ Ignore the warning message about the input file not supporting
+ interworking as the --support-old-code switch has taken care if this.
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/_fixdfdi.c b/gcc-4.2.1-5666.3/gcc/config/arm/_fixdfdi.c
new file mode 100644
index 000000000..f88067192
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/_fixdfdi.c
@@ -0,0 +1,84 @@
+/* APPLE LOCAL file 5316398 improved float/double -> int64 functions */
+#include <stdint.h>
+
+int64_t
+__fixdfdi (double x)
+{
+ union { double d; uint64_t u; } u = {x};
+ uint64_t fabsx = u.u & 0x7fffffffffffffffULL;
+ uint32_t exp = fabsx >> 52;
+ int64_t result = 0;
+
+ /* for very large and reasonably small values, regular int converter
+ works fine */
+ if (exp >= 52U + 1023U) /* if( |x| >= 0x1.0p52 || isnan( x ) ) */
+ {
+ /* early out for error cases |x| >= 0x1.0p63 || isnan(x) */
+ if (exp >= 1023U + 63U)
+ {
+ /* special case for x == -0x1.0p63 */
+ if (-0x1.0p63 == x)
+ return 0x8000000000000000ULL;
+
+ /* huge, Inf, NaN */
+ result = (int32_t) x; /* grab sign bit */
+ result >>= 63; /* splat it across value */
+ /* return either 0x8000000000000000 or 0x7fffffffffffffff
+ according to sign bit */
+ result ^= 0x7fffffffffffffffULL;
+
+ return result;
+ }
+
+ /* 0x1.0p52 <= |x| < 0x1.0p63 always integer, but too big. Chop
+ off some of the top. */
+ u.u &= 0xFFFFFFFF00000000ULL; /* truncate off some low bits */
+ x -= u.d; /* get remainder */
+
+ /* accumulate the high part into result */
+ int32_t hi = u.d * 0x1.0p-32;
+ result += (int64_t) hi << 32;
+ }
+ else
+ { /* |x| < 0x1.0p52 */
+
+ /* early out for |x| < 0x1.0p31 -- use hardware 32-bit conversion */
+ if (exp < 1023U + 31U)
+ return (int64_t) ((int32_t) x);
+
+ /* The integer result fits in the significand, but there may be
+ some fractional bits. Value is too large to use 32-bit
+ hardware.
+
+ create a mask that covers the high 32-bit part of the number
+ and the whole integer part. */
+ uint64_t intMask = (int64_t) 0xFFF0000000000000LL >> (exp - 1023);
+
+ /* extract the full integer (round to integer in round to zero
+ rounding mode) */
+ u.u &= intMask;
+
+ /* find the fractional part */
+ double fraction = x - u.d;
+
+ /* save the integer part */
+ x = u.d;
+
+ /* set inexact as needed */
+ result = (int32_t) fraction; /* always 0 */
+ }
+
+ /* xi is < 2**53 now and integer. Convert to integer representation. */
+ if (x < 0.0)
+ {
+ u.d = x - 0x1.0p52;
+ result -= u.u & 0x000FFFFFFFFFFFFFULL;
+ }
+ else
+ {
+ u.d = x + 0x1.0p52;
+ result += u.u & 0x000FFFFFFFFFFFFFULL;
+ }
+
+ return result;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/_fixsfdi.c b/gcc-4.2.1-5666.3/gcc/config/arm/_fixsfdi.c
new file mode 100644
index 000000000..f248c95a4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/_fixsfdi.c
@@ -0,0 +1,54 @@
+/* APPLE LOCAL file 5316398 improved float/double -> int64 functions */
+#include <stdint.h>
+
+int64_t
+__fixsfdi (float x)
+{
+ union { float f; uint32_t u; } u = {x};
+ uint32_t fabsx = u.u & 0x7fffffffU;
+ uint32_t exp = fabsx >> 23;
+ int64_t result = 0;
+
+ /* for small ints, overflow and NaN, the int32_t converter works fine
+ if( |x| < 0x1.0p31f || |x| >= 1.0p64f || isnan(x) ) unsigned
+ compare */
+ if (exp - (127U + 31U) >= (63U - 31U))
+ {
+ if (exp > (127 + 31))
+ {
+ if (x == -0x1.0p63f)
+ return 0x8000000000000000LL;
+
+ uint32_t r = (int32_t) x;
+ result = (int64_t) r << 32;
+ r = (r << 1) | (r & 1);
+ result |= r;
+ return result;
+ }
+
+ /* small number. Regular int32_t conversion will work fine here. */
+ result = (int32_t) x;
+ return result;
+ }
+
+ /* 0x1.0p31 <= |x| <0x1.0p64, x is always an integer in this range */
+
+ /* convert float to fixed */
+ result = (fabsx & 0x007fffffU) | 0x00800000;
+
+ /* signMask = x < 0.0f ? -1LL : 0 */
+ int64_t signMask = (int64_t) u.u << 32;
+ signMask >>= 63;
+
+ /* Calculate shift value to move fixed point to right place */
+ int32_t leftShift = exp - (127 + 23);
+
+ /* move the fixed point into place */
+ result <<= leftShift;
+
+ /* Fix sign */
+ result ^= signMask;
+ result -= signMask;
+
+ return result;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/_fixunsdfdi.c b/gcc-4.2.1-5666.3/gcc/config/arm/_fixunsdfdi.c
new file mode 100644
index 000000000..98fcc8f53
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/_fixunsdfdi.c
@@ -0,0 +1,64 @@
+/* APPLE LOCAL file 5316398 improved float/double -> int64 functions */
+#include <stdint.h>
+
+uint64_t
+__fixunsdfdi (double x)
+{
+ union { double d; uint64_t u; uint32_t u32[2]; } u = {x};
+ uint32_t hi = u.u >> 32;
+ uint32_t lo;
+
+ /* Early out for the common case: +0 <= x < 0x1.0p32 */
+ if (__builtin_expect (hi < 0x41f00000U, 1))
+ return (uint64_t) ((uint32_t) x);
+
+ /* 0x1.0p32 <= x < 0x1.0p64 */
+ if (__builtin_expect (hi < 0x43f00000U, 1))
+ {
+ /* if x < 0x1.0p52 */
+ if (__builtin_expect (hi < 0x43400000U, 1))
+ {
+ if (__builtin_expect (hi < 0x43300000U, 1))
+ {
+ uint32_t shift = (1023 + 52) - (hi >> 20);
+ uint32_t unitBit = 1U << shift;
+ uint32_t fractMask = unitBit - 1;
+ u.u32[0] = lo = (uint32_t) u.u & ~fractMask;
+ x -= u.d;
+ hi &= 0x000FFFFFU;
+ hi |= 0x00100000U;
+ lo = (lo >> shift) | (hi << (32 - shift));
+ /* (int32_t) x is always zero here. This sets the inexact
+ flag. */
+ hi = (hi >> shift) + (int32_t) x;
+ }
+ else
+ {
+ u.u &= 0x000FFFFFFFFFFFFFULL;
+ u.u |= 0x0010000000000000ULL;
+ return u.u;
+ }
+ }
+ else
+ {
+ uint32_t shift = (hi >> 20) - (1023 + 52);
+ hi &= 0x000FFFFFU;
+ lo = u.u;
+ hi |= 0x00100000U;
+
+ hi = (hi << shift) | (lo >> (32 - shift));
+ lo = lo << shift;
+ }
+
+ /* return the result; */
+ return ((uint64_t) hi << 32) | lo;
+ }
+
+ /* x <= -0 or x >= 0x1.0p64 or x is NaN. set invalid as necessary.
+ Pin according to ARM rules. */
+ hi = x;
+
+ /* promote to 64-bits */
+ lo = (hi << 1) | (hi & 1);
+ return ((uint64_t) hi << 32) | lo;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/_fixunssfdi.c b/gcc-4.2.1-5666.3/gcc/config/arm/_fixunssfdi.c
new file mode 100644
index 000000000..08ffc6a6e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/_fixunssfdi.c
@@ -0,0 +1,41 @@
+/* APPLE LOCAL file 5316398 improved float/double -> int64 functions */
+#include <stdint.h>
+
+uint64_t
+__fixunssfdi (float x)
+{
+ union { float f; uint32_t u; } u = {x};
+ uint32_t hi, lo;
+
+ /* early out for common small positive numbers. */
+ if (__builtin_expect (u.u < 0x4f800000U, 1))
+ return (uint64_t) ((uint32_t) x);
+
+ /* larger non-overflowing cases are all exact, so we just need to do
+ the conversion in integer code */
+ /* if( 0x1.0p32f <= x < 0x1.0p63f ) */
+ if (__builtin_expect (u.u < 0x5f800000U, 1))
+ {
+ uint32_t bits = (u.u & 0x007fffffU) | 0x00800000U;
+ uint32_t shift = (u.u >> 23) - (127 + 23);
+ if (shift < 32)
+ {
+ hi = bits >> (32 - shift);
+ lo = bits << shift;
+ }
+ else
+ {
+ hi = bits << (shift - 32);
+ lo = 0;
+ }
+ return ((uint64_t) hi << 32) | lo;
+ }
+
+ /* Overflow or NaN: convert value to unsigned int, set invalid as
+ necessary */
+ hi = x;
+
+ /* extend to 64-bits. */
+ lo = (hi << 1) | (hi & 1);
+ return ((uint64_t) hi << 32) | lo;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/aof.h b/gcc-4.2.1-5666.3/gcc/config/arm/aof.h
new file mode 100644
index 000000000..ce0548109
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/aof.h
@@ -0,0 +1,348 @@
+/* Definitions of target machine for GNU compiler, for Advanced RISC Machines
+ ARM compilation, AOF Assembler.
+ Copyright (C) 1995, 1996, 1997, 2000, 2003, 2004
+ Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+
+
+#define AOF_ASSEMBLER
+
+#define LINK_LIBGCC_SPECIAL 1
+
+#define LINK_SPEC "%{aof} %{bin} %{aif} %{ihf} %{shl,*} %{reent*} %{split} \
+ %{ov*} %{reloc*} -nodebug"
+
+#define STARTFILE_SPEC "crtbegin.o%s"
+
+#define ENDFILE_SPEC "crtend.o%s"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "%{g -g} -arch 4 -apcs 3/32bit"
+#endif
+
+#ifndef LIB_SPEC
+#define LIB_SPEC "%{Eb: armlib_h.32b%s}%{!Eb: armlib_h.32l%s}"
+#endif
+
+#define LIBGCC_SPEC "libgcc.a%s"
+
+#define CTOR_LIST_BEGIN \
+ asm (CTORS_SECTION_ASM_OP); \
+ extern func_ptr __CTOR_END__[1]; \
+ func_ptr __CTOR_LIST__[1] = {__CTOR_END__};
+
+#define CTOR_LIST_END \
+ asm (CTORS_SECTION_ASM_OP); \
+ func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DO_GLOBAL_CTORS_BODY \
+ do \
+ { \
+ func_ptr *ptr = __CTOR_LIST__ + 1; \
+ \
+ while (*ptr) \
+ (*ptr++) (); \
+ } \
+ while (0)
+
+#define DTOR_LIST_BEGIN \
+ asm (DTORS_SECTION_ASM_OP); \
+ extern func_ptr __DTOR_END__[1]; \
+ func_ptr __DTOR_LIST__[1] = {__DTOR_END__};
+
+#define DTOR_LIST_END \
+ asm (DTORS_SECTION_ASM_OP); \
+ func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define DO_GLOBAL_DTORS_BODY \
+ do \
+ { \
+ func_ptr *ptr = __DTOR_LIST__ + 1; \
+ \
+ while (*ptr) \
+ (*ptr++) (); \
+ } \
+ while (0)
+
+/* We really want to put Thumb tables in a read-only data section, but
+ switching to another section during function output is not
+ possible. We could however do what the SPARC does and defer the
+ whole table generation until the end of the function. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#define TARGET_ASM_INIT_SECTIONS aof_asm_init_sections
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define ASM_COMMENT_START ";"
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+
+#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \
+{ \
+ int i; \
+ const char *ptr = (PTR); \
+ fprintf ((STREAM), "\tDCB"); \
+ for (i = 0; i < (long)(LEN); i++) \
+ fprintf ((STREAM), " &%02x%s", \
+ (unsigned ) *(ptr++), \
+ (i + 1 < (long)(LEN) \
+ ? ((i & 3) == 3 ? "\n\tDCB" : ",") \
+ : "\n")); \
+}
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n')
+
+/* Output of Uninitialized Variables. */
+
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (in_section = NULL, \
+ fprintf ((STREAM), "\tAREA "), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ", DATA, COMMON\n\t%% %d\t%s size=%d\n", \
+ (int)(ROUNDED), ASM_COMMENT_START, (int)(SIZE)))
+
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (zero_init_section (), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), "\n"), \
+ fprintf ((STREAM), "\t%% %d\t%s size=%d\n", \
+ (int)(ROUNDED), ASM_COMMENT_START, (int)(SIZE)))
+
+/* Output and Generation of Labels */
+extern int arm_main_function;
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\tEXPORT\t"
+
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+do { \
+ assemble_name (STREAM,NAME); \
+ fputs ("\n", STREAM); \
+} while (0)
+
+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \
+{ \
+ if (TARGET_POKE_FUNCTION_NAME) \
+ arm_poke_function_name ((STREAM), (NAME)); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ if (! TREE_PUBLIC (DECL)) \
+ { \
+ fputs ("\tKEEP ", STREAM); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ aof_delete_import ((NAME)); \
+}
+
+#define ASM_DECLARE_OBJECT_NAME(STREAM,NAME,DECL) \
+{ \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ if (! TREE_PUBLIC (DECL)) \
+ { \
+ fputs ("\tKEEP ", STREAM); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ aof_delete_import ((NAME)); \
+}
+
+#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \
+ aof_add_import ((NAME))
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(STREAM,SYMREF) \
+ (fprintf ((STREAM), "\tIMPORT\t"), \
+ assemble_name ((STREAM), XSTR ((SYMREF), 0)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "|%s|", NAME)
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*|%s..%ld|", (PREFIX), (long)(NUM))
+
+/* How initialization functions are handled. */
+
+#define CTORS_SECTION_ASM_OP "\tAREA\t|C$$gnu_ctorsvec|, DATA, READONLY"
+#define DTORS_SECTION_ASM_OP "\tAREA\t|C$$gnu_dtorsvec|, DATA, READONLY"
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Output of Assembler Instructions. Note that the ?xx registers are
+ there so that VFPv3/NEON registers D16-D31 have the same spacing as D0-D15
+ (each of which is overlaid on two S registers), although there are no
+ actual single-precision registers which correspond to D16-D31. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+#define REGISTER_NAMES \
+{ \
+ "a1", "a2", "a3", "a4", \
+ "v1", "v2", "v3", "v4", \
+ "v5", "v6", "sl", "fp", \
+ "ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", \
+ "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp", \
+ "mv0", "mv1", "mv2", "mv3", \
+ "mv4", "mv5", "mv6", "mv7", \
+ "mv8", "mv9", "mv10", "mv11", \
+ "mv12", "mv13", "mv14", "mv15", \
+ "wcgr0", "wcgr1", "wcgr2", "wcgr3", \
+ "wr0", "wr1", "wr2", "wr3", \
+ "wr4", "wr5", "wr6", "wr7", \
+ "wr8", "wr9", "wr10", "wr11", \
+ "wr12", "wr13", "wr14", "wr15", \
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", \
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", \
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", \
+ "d16", "?16", "d17", "?17", "d18", "?18", "d19", "?19", \
+ "d20", "?20", "d21", "?21", "d22", "?22", "d23", "?23", \
+ "d24", "?24", "d25", "?25", "d26", "?26", "d27", "?27", \
+ "d28", "?28", "d29", "?29", "d30", "?30", "d31", "?31", \
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ "vfpcc" \
+}
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"r0", 0}, {"a1", 0}, \
+ {"r1", 1}, {"a2", 1}, \
+ {"r2", 2}, {"a3", 2}, \
+ {"r3", 3}, {"a4", 3}, \
+ {"r4", 4}, {"v1", 4}, \
+ {"r5", 5}, {"v2", 5}, \
+ {"r6", 6}, {"v3", 6}, \
+ {"r7", 7}, {"wr", 7}, \
+ {"r8", 8}, {"v5", 8}, \
+ {"r9", 9}, {"v6", 9}, \
+ {"r10", 10}, {"sl", 10}, {"v7", 10}, \
+ {"r11", 11}, {"fp", 11}, \
+ {"r12", 12}, {"ip", 12}, \
+ {"r13", 13}, {"sp", 13}, \
+ {"r14", 14}, {"lr", 14}, \
+ {"r15", 15}, {"pc", 15}, \
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ {"d0", 63}, {"q0", 63}, \
+ {"d1", 65}, \
+ {"d2", 67}, {"q1", 67}, \
+ {"d3", 69}, \
+ {"d4", 71}, {"q2", 71}, \
+ {"d5", 73}, \
+ {"d6", 75}, {"q3", 75}, \
+ {"d7", 77}, \
+ {"d8", 79}, {"q4", 79}, \
+ {"d9", 81}, \
+ {"d10", 83}, {"q5", 83}, \
+ {"d11", 85}, \
+ {"d12", 87}, {"q6", 87}, \
+ {"d13", 89}, \
+ {"d14", 91}, {"q7", 91}, \
+ {"d15", 93}, \
+ {"q8", 95}, \
+ {"q9", 99}, \
+ {"q10", 103}, \
+ {"q11", 107}, \
+ {"q12", 111}, \
+ {"q13", 115}, \
+ {"q14", 119}, \
+ {"q15", 123} \
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+}
+
+#define REGISTER_PREFIX "__"
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX ""
+
+/* AOF does not prefix user function names with an underscore. */
+#define ARM_MCOUNT_NAME "_mcount"
+
+/* Output of Dispatch Tables. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do \
+ { \
+ if (TARGET_ARM) \
+ fprintf ((STREAM), "\tb\t|L..%d|\n", (VALUE)); \
+ else if (TARGET_THUMB1) \
+ fprintf ((STREAM), "\tDCD\t|L..%d| - |L..%d|\n", (VALUE), (REL)); \
+ else /* Thumb-2 */ \
+ { \
+ switch (GET_MODE(body)) \
+ { \
+ case QImode: /* TBB */ \
+ asm_fprintf (STREAM, "\tDCB\t(|L..%d| - |L..%d|)/2\n", \
+ VALUE, REL); \
+ break; \
+ case HImode: /* TBH */ \
+ asm_fprintf (STREAM, "\tDCW\t|L..%d| - |L..%d|)/2\n", \
+ VALUE, REL); \
+ break; \
+ case SImode: \
+ if (flag_pic) \
+ asm_fprintf (STREAM, "\tDCD\t|L..%d| + 1 - |L..%d|\n", \
+ VALUE, REL); \
+ else \
+ asm_fprintf (STREAM, "\tDCD\t|L..%d| + 1\n", VALUE); \
+ break; \
+ default: \
+ gcc_unreachable(); \
+ } \
+ } \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ do \
+ { \
+ gcc_assert (!TARGET_THUMB2) \
+ fprintf ((STREAM), "\tDCD\t|L..%d|\n", (VALUE)) \
+ } \
+ while (0)
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* A label marking the start of a jump table is a data label. */
+#define ASM_OUTPUT_CASE_LABEL(STREAM, PREFIX, NUM, TABLE) \
+ fprintf ((STREAM), "\tALIGN\n|%s..%d|\n", (PREFIX), (NUM))
+
+/* Assembler Commands for Alignment. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t%%\t%d\n", (int) (NBYTES))
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ int amount = 1 << (POWER); \
+ \
+ if (amount == 2) \
+ fprintf ((STREAM), "\tALIGN 2\n"); \
+ else if (amount == 4) \
+ fprintf ((STREAM), "\tALIGN\n"); \
+ else \
+ fprintf ((STREAM), "\tALIGN %d\n", amount); \
+ } \
+ while (0)
+
+#undef DBX_DEBUGGING_INFO
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/aout.h b/gcc-4.2.1-5666.3/gcc/config/arm/aout.h
new file mode 100644
index 000000000..587d951b4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/aout.h
@@ -0,0 +1,354 @@
+/* Definitions of target machine for GNU compiler, for ARM with a.out
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2004
+ Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#ifndef ASM_APP_ON
+#define ASM_APP_ON ""
+#endif
+#ifndef ASM_APP_OFF
+#define ASM_APP_OFF ""
+#endif
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define BSS_SECTION_ASM_OP "\t.bss"
+
+/* Note: If USER_LABEL_PREFIX or LOCAL_LABEL_PREFIX are changed,
+ make sure that this change is reflected in the function
+ coff_arm_is_local_label_name() in bfd/coff-arm.c. */
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX ""
+#endif
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* The assembler's names for the registers. Note that the ?xx registers are
+ there so that VFPv3/NEON registers D16-D31 have the same spacing as D0-D15
+ (each of which is overlaid on two S registers), although there are no
+ actual single-precision registers which correspond to D16-D31. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp", \
+ "mv0", "mv1", "mv2", "mv3", \
+ "mv4", "mv5", "mv6", "mv7", \
+ "mv8", "mv9", "mv10", "mv11", \
+ "mv12", "mv13", "mv14", "mv15", \
+ "wcgr0", "wcgr1", "wcgr2", "wcgr3", \
+ "wr0", "wr1", "wr2", "wr3", \
+ "wr4", "wr5", "wr6", "wr7", \
+ "wr8", "wr9", "wr10", "wr11", \
+ "wr12", "wr13", "wr14", "wr15", \
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", \
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", \
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", \
+ "d16", "?16", "d17", "?17", "d18", "?18", "d19", "?19", \
+ "d20", "?20", "d21", "?21", "d22", "?22", "d23", "?23", \
+ "d24", "?24", "d25", "?25", "d26", "?26", "d27", "?27", \
+ "d28", "?28", "d29", "?29", "d30", "?30", "d31", "?31", \
+ "vfpcc" \
+}
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"rfp", 9}, /* Gcc used to call it this */ \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15}, /* pc */ \
+ {"mvf0", 27}, \
+ {"mvf1", 28}, \
+ {"mvf2", 29}, \
+ {"mvf3", 30}, \
+ {"mvf4", 31}, \
+ {"mvf5", 32}, \
+ {"mvf6", 33}, \
+ {"mvf7", 34}, \
+ {"mvf8", 35}, \
+ {"mvf9", 36}, \
+ {"mvf10", 37}, \
+ {"mvf11", 38}, \
+ {"mvf12", 39}, \
+ {"mvf13", 40}, \
+ {"mvf14", 41}, \
+ {"mvf15", 42}, \
+ {"mvd0", 27}, \
+ {"mvd1", 28}, \
+ {"mvd2", 29}, \
+ {"mvd3", 30}, \
+ {"mvd4", 31}, \
+ {"mvd5", 32}, \
+ {"mvd6", 33}, \
+ {"mvd7", 34}, \
+ {"mvd8", 35}, \
+ {"mvd9", 36}, \
+ {"mvd10", 37}, \
+ {"mvd11", 38}, \
+ {"mvd12", 39}, \
+ {"mvd13", 40}, \
+ {"mvd14", 41}, \
+ {"mvd15", 42}, \
+ {"mvfx0", 27}, \
+ {"mvfx1", 28}, \
+ {"mvfx2", 29}, \
+ {"mvfx3", 30}, \
+ {"mvfx4", 31}, \
+ {"mvfx5", 32}, \
+ {"mvfx6", 33}, \
+ {"mvfx7", 34}, \
+ {"mvfx8", 35}, \
+ {"mvfx9", 36}, \
+ {"mvfx10", 37}, \
+ {"mvfx11", 38}, \
+ {"mvfx12", 39}, \
+ {"mvfx13", 40}, \
+ {"mvfx14", 41}, \
+ {"mvfx15", 42}, \
+ {"mvdx0", 27}, \
+ {"mvdx1", 28}, \
+ {"mvdx2", 29}, \
+ {"mvdx3", 30}, \
+ {"mvdx4", 31}, \
+ {"mvdx5", 32}, \
+ {"mvdx6", 33}, \
+ {"mvdx7", 34}, \
+ {"mvdx8", 35}, \
+ {"mvdx9", 36}, \
+ {"mvdx10", 37}, \
+ {"mvdx11", 38}, \
+ {"mvdx12", 39}, \
+ {"mvdx13", 40}, \
+ {"mvdx14", 41}, \
+ {"mvdx15", 42}, \
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */ \
+ {"d0", 63}, {"q0", 63}, \
+ {"d1", 65}, \
+ {"d2", 67}, {"q1", 67}, \
+ {"d3", 69}, \
+ {"d4", 71}, {"q2", 71}, \
+ {"d5", 73}, \
+ {"d6", 75}, {"q3", 75}, \
+ {"d7", 77}, \
+ {"d8", 79}, {"q4", 79}, \
+ {"d9", 81}, \
+ {"d10", 83}, {"q5", 83}, \
+ {"d11", 85}, \
+ {"d12", 87}, {"q6", 87}, \
+ {"d13", 89}, \
+ {"d14", 91}, {"q7", 91}, \
+ {"d15", 93}, \
+ {"q8", 95}, \
+ {"q9", 99}, \
+ {"q10", 103}, \
+ {"q11", 107}, \
+ {"q12", 111}, \
+ {"q13", 115}, \
+ {"q14", 119}, \
+ {"q15", 123} \
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */ \
+}
+#endif
+
+/* Arm Assembler barfs on dollars. */
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#ifndef NO_DOLLAR_IN_LABEL
+#define NO_DOLLAR_IN_LABEL 1
+#endif
+
+/* Generate DBX debugging information. riscix.h will undefine this because
+ the native assembler does not support stabs. */
+#define DBX_DEBUGGING_INFO 1
+
+/* Acorn dbx moans about continuation chars, so don't use any. */
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 0
+#endif
+
+/* Output a function label definition. */
+#ifndef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ ARM_DECLARE_FUNCTION_NAME (STREAM, NAME, DECL); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ while (0)
+#endif
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global\t"
+
+/* Make an internal label into a string. */
+#ifndef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+ sprintf (STRING, "*%s%s%u", LOCAL_LABEL_PREFIX, PREFIX, (unsigned int)(NUM))
+#endif
+
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ do \
+ { \
+ gcc_assert (!TARGET_THUMB2); \
+ asm_fprintf (STREAM, "\t.word\t%LL%d\n", VALUE); \
+ } \
+ while (0)
+
+
+/* Thumb-2 always uses addr_diff_elf so that the Table Branch instructions
+ can be used. For non-pic code where the offsets do not suitable for
+ TBB/TBH the elements are output as absolute labels. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do \
+ { \
+ if (TARGET_ARM) \
+ asm_fprintf (STREAM, "\tb\t%LL%d\n", VALUE); \
+ else if (TARGET_THUMB1) \
+ asm_fprintf (STREAM, "\t.word\t%LL%d-%LL%d\n", VALUE, REL); \
+ else /* Thumb-2 */ \
+ { \
+ switch (GET_MODE(body)) \
+ { \
+ case QImode: /* TBB */ \
+ asm_fprintf (STREAM, "\t.byte\t(%LL%d-%LL%d)/2\n", \
+ VALUE, REL); \
+ break; \
+ case HImode: /* TBH */ \
+ asm_fprintf (STREAM, "\t.2byte\t(%LL%d-%LL%d)/2\n", \
+ VALUE, REL); \
+ break; \
+ case SImode: \
+ if (flag_pic) \
+ asm_fprintf (STREAM, "\t.word\t%LL%d+1-%LL%d\n", VALUE, REL); \
+ else \
+ asm_fprintf (STREAM, "\t.word\t%LL%d+1\n", VALUE); \
+ break; \
+ default: \
+ gcc_unreachable(); \
+ } \
+ } \
+ } \
+ while (0)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \
+ output_ascii_pseudo_op (STREAM, (const unsigned char *) (PTR), LEN)
+
+/* Output a gap. In fact we fill it with nulls. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf (STREAM, "\t.space\t%d\n", (int) (NBYTES))
+
+/* Align output to a power of two. Horrible /bin/as. */
+#ifndef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ register int amount = 1 << (POWER); \
+ \
+ if (amount == 2) \
+ fprintf (STREAM, "\t.even\n"); \
+ else if (amount != 1) \
+ fprintf (STREAM, "\t.align\t%d\n", amount - 4); \
+ } \
+ while (0)
+#endif
+
+/* Output a common block. */
+#ifndef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ do \
+ { \
+ fprintf (STREAM, "\t.comm\t"); \
+ assemble_name (STREAM, NAME); \
+ asm_fprintf (STREAM, ", %d\t%@ %d\n", \
+ (int)(ROUNDED), (int)(SIZE)); \
+ } \
+ while (0)
+#endif
+
+/* Output a local common block. /bin/as can't do this, so hack a
+ `.space' into the bss segment. Note that this is *bad* practice,
+ which is guaranteed NOT to work since it doesn't define STATIC
+ COMMON space but merely STATIC BSS space. */
+#ifndef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ switch_to_section (bss_section); \
+ ASM_OUTPUT_ALIGN (STREAM, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ fprintf (STREAM, "\t.space\t%d\n", (int)(SIZE)); \
+ } \
+ while (0)
+#endif
+
+/* Output a zero-initialized block. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Output a #ident directive. */
+#ifndef ASM_OUTPUT_IDENT
+#define ASM_OUTPUT_IDENT(STREAM,STRING) \
+ asm_fprintf (STREAM, "%@ - - - ident %s\n", STRING)
+#endif
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* This works for GAS and some other assemblers. */
+#define SET_ASM_OP "\t.set\t"
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm-cores.def b/gcc-4.2.1-5666.3/gcc/config/arm/arm-cores.def
new file mode 100644
index 000000000..f4715ec59
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm-cores.def
@@ -0,0 +1,123 @@
+/* ARM CPU Cores
+ Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+ Written by CodeSourcery, LLC
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+/* Before using #include to read this file, define a macro:
+
+ ARM_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
+
+ The CORE_NAME is the name of the core, represented as a string constant.
+ The CORE_IDENT is the name of the core, represented as an identifier.
+ ARCH is the architecture revision implemented by the chip.
+ FLAGS are the bitwise-or of the traits that apply to that core.
+ This need not include flags implied by the architecture.
+ COSTS is the name of the rtx_costs routine to use.
+
+ If you update this table, you must update the "tune" attribute in
+ arm.md.
+
+ Some tools assume no whitespace up to the first "," in each entry. */
+
+/* V2/V2A Architecture Processors */
+ARM_CORE("arm2", arm2, 2, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm250", arm250, 2, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm3", arm3, 2, FL_CO_PROC | FL_MODE26, slowmul)
+
+/* V3 Architecture Processors */
+ARM_CORE("arm6", arm6, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm60", arm60, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm600", arm600, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm610", arm610, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm620", arm620, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm7", arm7, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm7d", arm7d, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm7di", arm7di, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm70", arm70, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm700", arm700, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm700i", arm700i, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm710", arm710, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm720", arm720, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm710c", arm710c, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm7100", arm7100, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm7500", arm7500, 3, FL_MODE26 | FL_WBUF, slowmul)
+/* Doesn't have an external co-proc, but does have embedded fpa. */
+ARM_CORE("arm7500fe", arm7500fe, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+
+/* V3M Architecture Processors */
+/* arm7m doesn't exist on its own, but only with D, ("and", and I), but
+ those don't alter the code, so arm7m is sometimes used. */
+ARM_CORE("arm7m", arm7m, 3M, FL_CO_PROC | FL_MODE26, fastmul)
+ARM_CORE("arm7dm", arm7dm, 3M, FL_CO_PROC | FL_MODE26, fastmul)
+ARM_CORE("arm7dmi", arm7dmi, 3M, FL_CO_PROC | FL_MODE26, fastmul)
+
+/* V4 Architecture Processors */
+ARM_CORE("arm8", arm8, 4, FL_MODE26 | FL_LDSCHED, fastmul)
+ARM_CORE("arm810", arm810, 4, FL_MODE26 | FL_LDSCHED, fastmul)
+ARM_CORE("strongarm", strongarm, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, fastmul)
+ARM_CORE("strongarm110", strongarm110, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, fastmul)
+ARM_CORE("strongarm1100", strongarm1100, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, fastmul)
+ARM_CORE("strongarm1110", strongarm1110, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, fastmul)
+
+/* V4T Architecture Processors */
+ARM_CORE("arm7tdmi", arm7tdmi, 4T, FL_CO_PROC , fastmul)
+ARM_CORE("arm7tdmi-s", arm7tdmis, 4T, FL_CO_PROC , fastmul)
+ARM_CORE("arm710t", arm710t, 4T, FL_WBUF, fastmul)
+ARM_CORE("arm720t", arm720t, 4T, FL_WBUF, fastmul)
+ARM_CORE("arm740t", arm740t, 4T, FL_WBUF, fastmul)
+ARM_CORE("arm9", arm9, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm9tdmi", arm9tdmi, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm920", arm920, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm920t", arm920t, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm922t", arm922t, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm940t", arm940t, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("ep9312", ep9312, 4T, FL_LDSCHED | FL_CIRRUS, fastmul)
+
+/* V5T Architecture Processors */
+ARM_CORE("arm10tdmi", arm10tdmi, 5T, FL_LDSCHED, fastmul)
+ARM_CORE("arm1020t", arm1020t, 5T, FL_LDSCHED, fastmul)
+
+/* V5TE Architecture Processors */
+ARM_CORE("arm9e", arm9e, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm946e-s", arm946es, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm966e-s", arm966es, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm968e-s", arm968es, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm10e", arm10e, 5TE, FL_LDSCHED, fastmul)
+ARM_CORE("arm1020e", arm1020e, 5TE, FL_LDSCHED, fastmul)
+ARM_CORE("arm1022e", arm1022e, 5TE, FL_LDSCHED, fastmul)
+ARM_CORE("xscale", xscale, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE, xscale)
+ARM_CORE("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
+
+/* V5TEJ Architecture Processors */
+ARM_CORE("arm926ej-s", arm926ejs, 5TEJ, FL_LDSCHED, 9e)
+ARM_CORE("arm1026ej-s", arm1026ejs, 5TEJ, FL_LDSCHED, 9e)
+
+/* V6 Architecture Processors */
+ARM_CORE("arm1136j-s", arm1136js, 6J, FL_LDSCHED, 9e)
+ARM_CORE("arm1136jf-s", arm1136jfs, 6J, FL_LDSCHED | FL_VFPV2, 9e)
+ARM_CORE("arm1176jz-s", arm1176jzs, 6ZK, FL_LDSCHED, 9e)
+ARM_CORE("arm1176jzf-s", arm1176jzfs, 6ZK, FL_LDSCHED | FL_VFPV2, 9e)
+ARM_CORE("mpcorenovfp", mpcorenovfp, 6K, FL_LDSCHED, 9e)
+ARM_CORE("mpcore", mpcore, 6K, FL_LDSCHED | FL_VFPV2, 9e)
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ARM_CORE("arm1156t2-s", arm1156t2s, 6T2, FL_LDSCHED, 9e)
+ARM_CORE("cortex-a8", cortexa8, 7A, FL_LDSCHED, 9e)
+ARM_CORE("cortex-r4", cortexr4, 7R, FL_LDSCHED, 9e)
+ARM_CORE("cortex-m3", cortexm3, 7M, FL_LDSCHED, 9e)
+/* APPLE LOCAL end v7 support. Merge from mainline */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm-generic.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm-generic.md
new file mode 100644
index 000000000..611648648
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm-generic.md
@@ -0,0 +1,152 @@
+;; Generic ARM Pipeline Description
+;; Copyright (C) 2003 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+(define_automaton "arm")
+
+;; Write buffer
+;
+; Strictly, we should model a 4-deep write buffer for ARM7xx based chips
+;
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+(define_cpu_unit "write_buf" "arm")
+
+;; Write blockage unit
+;
+; The write_blockage unit models (partially), the fact that reads will stall
+; until the write buffer empties.
+; The f_mem_r and r_mem_f could also block, but they are to the stack,
+; so we don't model them here
+(define_cpu_unit "write_blockage" "arm")
+
+;; Core
+;
+(define_cpu_unit "core" "arm")
+
+(define_insn_reservation "r_mem_f_wbuf" 5
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "r_mem_f")))
+ "core+write_buf*3")
+
+(define_insn_reservation "store_wbuf" 5
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")))
+ "core+write_buf*3+write_blockage*5")
+
+(define_insn_reservation "store2_wbuf" 7
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")))
+ "core+write_buf*4+write_blockage*7")
+
+(define_insn_reservation "store3_wbuf" 9
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")))
+ "core+write_buf*5+write_blockage*9")
+
+(define_insn_reservation "store4_wbuf" 11
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")))
+ "core+write_buf*6+write_blockage*11")
+
+(define_insn_reservation "store2" 3
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "no")
+ (eq_attr "type" "store2")))
+ "core*3")
+
+(define_insn_reservation "store3" 4
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "no")
+ (eq_attr "type" "store3")))
+ "core*4")
+
+(define_insn_reservation "store4" 5
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "model_wbuf" "no")
+ (eq_attr "type" "store4")))
+ "core*5")
+
+(define_insn_reservation "store_ldsched" 1
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "yes")
+ (eq_attr "type" "store1")))
+ "core")
+
+(define_insn_reservation "load_ldsched_xscale" 3
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "yes")
+ (and (eq_attr "type" "load_byte,load1")
+ (eq_attr "is_xscale" "yes"))))
+ "core")
+
+(define_insn_reservation "load_ldsched" 2
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "yes")
+ (and (eq_attr "type" "load_byte,load1")
+ (eq_attr "is_xscale" "no"))))
+ "core")
+
+(define_insn_reservation "load_or_store" 2
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "!yes")
+ (eq_attr "type" "load_byte,load1,load2,load3,load4,store1")))
+ "core*2")
+
+(define_insn_reservation "mult" 16
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")))
+ "core*16")
+
+(define_insn_reservation "mult_ldsched_strongarm" 3
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "yes")
+ (and (eq_attr "is_strongarm" "yes")
+ (eq_attr "type" "mult"))))
+ "core*2")
+
+(define_insn_reservation "mult_ldsched" 4
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "ldsched" "yes")
+ (and (eq_attr "is_strongarm" "no")
+ (eq_attr "type" "mult"))))
+ "core*4")
+
+(define_insn_reservation "multi_cycle" 32
+ (and (eq_attr "generic_sched" "yes")
+ (and (eq_attr "core_cycles" "multi")
+ (eq_attr "type" "!mult,load_byte,load1,load2,load3,load4,store1,store2,store3,store4")))
+ "core*32")
+
+(define_insn_reservation "single_cycle" 1
+ (and (eq_attr "generic_sched" "yes")
+ (eq_attr "core_cycles" "single"))
+ "core")
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm-modes.def b/gcc-4.2.1-5666.3/gcc/config/arm/arm-modes.def
new file mode 100644
index 000000000..c64a5faa5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm-modes.def
@@ -0,0 +1,70 @@
+/* Definitions of target machine for GNU compiler, for ARM.
+ Copyright (C) 2002, 2004 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com)
+ Minor hacks by Nick Clifton (nickc@cygnus.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Extended precision floating point.
+ FIXME What format is this? */
+FLOAT_MODE (XF, 12, 0);
+
+/* CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CC_Nmode should be used if only the N (sign) flag is set correctly
+ CCmode should be used otherwise. */
+
+CC_MODE (CC_NOOV);
+CC_MODE (CC_Z);
+CC_MODE (CC_SWP);
+CC_MODE (CCFP);
+CC_MODE (CCFPE);
+CC_MODE (CC_DNE);
+CC_MODE (CC_DEQ);
+CC_MODE (CC_DLE);
+CC_MODE (CC_DLT);
+CC_MODE (CC_DGE);
+CC_MODE (CC_DGT);
+CC_MODE (CC_DLEU);
+CC_MODE (CC_DLTU);
+CC_MODE (CC_DGEU);
+CC_MODE (CC_DGTU);
+CC_MODE (CC_C);
+CC_MODE (CC_N);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 4); /* V4QI V2HI */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Opaque integer modes for 3, 4, 6 or 8 Neon double registers (2 is
+ TImode). */
+INT_MODE (EI, 24);
+INT_MODE (OI, 32);
+INT_MODE (CI, 48);
+/* ??? This should actually have 512 bits but the precision only has 9
+ bits. */
+FRACTIONAL_INT_MODE (XI, 511, 64);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm-protos.h b/gcc-4.2.1-5666.3/gcc/config/arm/arm-protos.h
new file mode 100644
index 000000000..9543ba096
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm-protos.h
@@ -0,0 +1,275 @@
+/* Prototypes for exported functions defined in arm.c and pe.c
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@arm.com)
+ Minor hacks by Nick Clifton (nickc@cygnus.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#ifndef GCC_ARM_PROTOS_H
+#define GCC_ARM_PROTOS_H
+
+/* APPLE LOCAL ARM darwin optimization defaults */
+extern void optimization_options (int, int);
+/* APPLE LOCAL begin ARM compact switch tables */
+extern void arm_adjust_insn_length (rtx, int *);
+extern void register_switch8_libfunc (void);
+extern void register_switchu8_libfunc (void);
+extern void register_switch16_libfunc (void);
+extern void register_switch32_libfunc (void);
+extern int count_thumb_unexpanded_prologue (void);
+extern int arm_label_align (rtx);
+/* APPLE LOCAL end ARM compact switch tables */
+/* APPLE LOCAL ARM prefer SP to FP */
+extern HOST_WIDE_INT arm_local_debug_offset (rtx);
+extern void arm_override_options (void);
+extern int use_return_insn (int, rtx);
+extern int arm_regno_class (int);
+extern void arm_load_pic_register (unsigned long);
+extern int arm_volatile_func (void);
+extern const char *arm_output_epilogue (rtx);
+extern void arm_expand_prologue (void);
+extern const char *arm_strip_name_encoding (const char *);
+extern void arm_asm_output_labelref (FILE *, const char *);
+/* APPLE LOCAL v7 support. Merge from mainline */
+extern void thumb2_asm_output_opcode (FILE *);
+extern unsigned long arm_current_func_type (void);
+extern HOST_WIDE_INT arm_compute_initial_elimination_offset (unsigned int,
+ unsigned int);
+extern HOST_WIDE_INT thumb_compute_initial_elimination_offset (unsigned int,
+ unsigned int);
+extern unsigned int arm_dbx_register_number (unsigned int);
+extern void arm_output_fn_unwind (FILE *, bool);
+
+
+#ifdef TREE_CODE
+extern int arm_return_in_memory (tree);
+extern void arm_encode_call_attribute (tree, int);
+#endif
+#ifdef RTX_CODE
+extern bool arm_vector_mode_supported_p (enum machine_mode);
+extern int arm_hard_regno_mode_ok (unsigned int, enum machine_mode);
+extern int const_ok_for_arm (HOST_WIDE_INT);
+/* APPLE LOCAL begin 5831562 long long constants */
+extern bool const64_ok_for_arm_immediate (rtx);
+extern bool const64_ok_for_arm_add (rtx);
+/* APPLE LOCAL end 5831562 long long constants */
+extern int arm_split_constant (RTX_CODE, enum machine_mode, rtx,
+ HOST_WIDE_INT, rtx, rtx, int);
+/* APPLE LOCAL 6258536 atomic builtins */
+extern void arm_split_compare_and_swap(rtx, rtx, rtx, rtx, rtx);
+extern RTX_CODE arm_canonicalize_comparison (RTX_CODE, enum machine_mode,
+ rtx *);
+extern int legitimate_pic_operand_p (rtx);
+extern rtx legitimize_pic_address (rtx, enum machine_mode, rtx);
+extern rtx legitimize_tls_address (rtx, rtx);
+extern int arm_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, int);
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+extern int thumb1_legitimate_address_p (enum machine_mode, rtx, int);
+extern int thumb2_legitimate_address_p (enum machine_mode, rtx, int);
+/* APPLE LOCAL end v7 support. Merge from mainline */
+extern int thumb_legitimate_offset_p (enum machine_mode, HOST_WIDE_INT);
+extern rtx arm_legitimize_address (rtx, rtx, enum machine_mode);
+extern rtx thumb_legitimize_address (rtx, rtx, enum machine_mode);
+extern rtx thumb_legitimize_reload_address (rtx *, enum machine_mode, int, int,
+ int);
+extern int arm_const_double_rtx (rtx);
+extern int neg_const_double_rtx_ok_for_fpa (rtx);
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+extern int vfp3_const_double_rtx (rtx);
+extern int neon_immediate_valid_for_move (rtx, enum machine_mode, rtx *, int *);
+extern int neon_immediate_valid_for_logic (rtx, enum machine_mode, int, rtx *,
+ int *);
+extern char *neon_output_logic_immediate (const char *, rtx *,
+ enum machine_mode, int, int);
+extern void neon_pairwise_reduce (rtx, rtx, enum machine_mode,
+ rtx (*) (rtx, rtx, rtx));
+extern void neon_expand_vector_init (rtx, rtx);
+extern void neon_reinterpret (rtx, rtx);
+extern void neon_emit_pair_result_insn (enum machine_mode,
+ rtx (*) (rtx, rtx, rtx, rtx),
+ rtx, rtx, rtx);
+extern void neon_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+extern enum reg_class coproc_secondary_reload_class (enum machine_mode, rtx,
+ bool);
+extern bool arm_tls_referenced_p (rtx);
+
+extern int cirrus_memory_offset (rtx);
+extern int arm_coproc_mem_operand (rtx, bool);
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+extern int neon_vector_mem_operand (rtx, bool);
+extern int neon_struct_mem_operand (rtx);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+extern int arm_no_early_store_addr_dep (rtx, rtx);
+extern int arm_no_early_alu_shift_dep (rtx, rtx);
+extern int arm_no_early_alu_shift_value_dep (rtx, rtx);
+extern int arm_no_early_mul_dep (rtx, rtx);
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+extern int arm_mac_accumulator_is_mul_result (rtx, rtx);
+
+extern int tls_mentioned_p (rtx);
+extern int symbol_mentioned_p (rtx);
+/* APPLE LOCAL ARM -mdynamic-no-pic support */
+extern int non_local_symbol_mentioned_p (rtx);
+extern int label_mentioned_p (rtx);
+extern RTX_CODE minmax_code (rtx);
+extern int adjacent_mem_locations (rtx, rtx);
+extern int load_multiple_sequence (rtx *, int, int *, int *, HOST_WIDE_INT *);
+extern const char *emit_ldm_seq (rtx *, int);
+extern int store_multiple_sequence (rtx *, int, int *, int *, HOST_WIDE_INT *);
+extern const char * emit_stm_seq (rtx *, int);
+extern rtx arm_gen_load_multiple (int, int, rtx, int, int,
+ rtx, HOST_WIDE_INT *);
+extern rtx arm_gen_store_multiple (int, int, rtx, int, int,
+ rtx, HOST_WIDE_INT *);
+extern int arm_gen_movmemqi (rtx *);
+extern enum machine_mode arm_select_cc_mode (RTX_CODE, rtx, rtx);
+extern enum machine_mode arm_select_dominance_cc_mode (rtx, rtx,
+ HOST_WIDE_INT);
+extern rtx arm_gen_compare_reg (RTX_CODE, rtx, rtx);
+extern rtx arm_gen_return_addr_mask (void);
+extern void arm_reload_in_hi (rtx *);
+extern void arm_reload_out_hi (rtx *);
+extern int arm_const_double_inline_cost (rtx);
+extern bool arm_const_double_by_parts (rtx);
+extern const char *fp_immediate_constant (rtx);
+extern const char *output_call (rtx *);
+extern const char *output_call_mem (rtx *);
+extern const char *output_mov_long_double_fpa_from_arm (rtx *);
+extern const char *output_mov_long_double_arm_from_fpa (rtx *);
+extern const char *output_mov_long_double_arm_from_arm (rtx *);
+extern const char *output_mov_double_fpa_from_arm (rtx *);
+extern const char *output_mov_double_arm_from_fpa (rtx *);
+extern const char *output_move_double (rtx *);
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+extern const char *output_move_quad (rtx *);
+extern const char *output_move_vfp (rtx *operands);
+extern const char *output_move_neon (rtx *operands);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+extern const char *output_add_immediate (rtx *);
+extern const char *arithmetic_instr (rtx, int);
+extern void output_ascii_pseudo_op (FILE *, const unsigned char *, int);
+extern const char *output_return_instruction (rtx, int, int);
+extern void arm_poke_function_name (FILE *, const char *);
+extern void arm_print_operand (FILE *, rtx, int);
+extern void arm_print_operand_address (FILE *, rtx);
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Removed line */
+extern void arm_final_prescan_insn (rtx);
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Removed line */
+extern int arm_debugger_arg_offset (int, rtx);
+extern int arm_is_longcall_p (rtx, int, int);
+extern int arm_emit_vector_const (FILE *, rtx);
+extern const char * arm_output_load_gr (rtx *);
+/* APPLE LOCAL v7 support. Merge from mainline */
+extern const char *vfp_output_fstmd (rtx *);
+extern void arm_set_return_address (rtx, rtx);
+extern int arm_eliminable_register (rtx);
+/* APPLE LOCAL v7 support. Merge from mainline */
+extern const char *arm_output_shift(rtx *, int);
+
+extern bool arm_output_addr_const_extra (FILE *, rtx);
+
+#if defined TREE_CODE
+extern rtx arm_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
+extern void arm_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree);
+extern bool arm_pad_arg_upward (enum machine_mode, tree);
+extern bool arm_pad_reg_upward (enum machine_mode, tree, int);
+extern bool arm_needs_doubleword_align (enum machine_mode, tree);
+extern rtx arm_function_value(tree, tree);
+#endif
+extern int arm_apply_result_size (void);
+
+#if defined AOF_ASSEMBLER
+extern rtx aof_pic_entry (rtx);
+extern void aof_add_import (const char *);
+extern void aof_delete_import (const char *);
+extern void zero_init_section (void);
+#endif /* AOF_ASSEMBLER */
+
+#endif /* RTX_CODE */
+
+extern int arm_float_words_big_endian (void);
+
+/* Thumb functions. */
+extern void arm_init_expanders (void);
+extern const char *thumb_unexpanded_epilogue (void);
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+extern void thumb1_expand_prologue (void);
+extern void thumb1_expand_epilogue (void);
+#ifdef TREE_CODE
+extern int is_called_in_ARM_mode (tree);
+#endif
+extern int thumb_shiftable_const (unsigned HOST_WIDE_INT);
+#ifdef RTX_CODE
+extern void thumb1_final_prescan_insn (rtx);
+extern void thumb2_final_prescan_insn (rtx);
+extern const char *thumb_load_double_from_address (rtx *);
+extern const char *thumb_output_move_mem_multiple (int, rtx *);
+extern const char *thumb_call_via_reg (rtx);
+extern void thumb_expand_movmemqi (rtx *);
+extern rtx arm_return_addr (int, rtx);
+extern void thumb_reload_out_hi (rtx *);
+extern void thumb_reload_in_hi (rtx *);
+extern void thumb_set_return_address (rtx, rtx);
+extern const char *thumb2_output_casesi(rtx *);
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* APPLE LOCAL begin ARM enhance conditional insn generation */
+#ifdef BB_HEAD
+extern void arm_ifcvt_modify_multiple_tests (ce_if_block_t *, basic_block, rtx *, rtx*);
+#endif
+/* APPLE LOCAL end ARM enhance conditional insn generation */
+
+/* Defined in pe.c. */
+extern int arm_dllexport_name_p (const char *);
+extern int arm_dllimport_name_p (const char *);
+
+#ifdef TREE_CODE
+extern void arm_pe_unique_section (tree, int);
+extern void arm_pe_encode_section_info (tree, rtx, int);
+extern int arm_dllexport_p (tree);
+extern int arm_dllimport_p (tree);
+extern void arm_mark_dllexport (tree);
+extern void arm_mark_dllimport (tree);
+#endif
+
+extern void arm_pr_long_calls (struct cpp_reader *);
+extern void arm_pr_no_long_calls (struct cpp_reader *);
+extern void arm_pr_long_calls_off (struct cpp_reader *);
+/* APPLE LOCAL 5946347 ms_struct support */
+extern int arm_field_ms_struct_align (tree);
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+extern const char *arm_mangle_type (tree);
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* APPLE LOCAL v7 support. Fix compact switch tables */
+extern void arm_asm_output_addr_diff_vec (FILE *file, rtx LABEL, rtx BODY);
+
+/* APPLE LOCAL begin 6160917 */
+extern void neon_reload_in (rtx *, enum machine_mode);
+extern void neon_reload_out (rtx *, enum machine_mode);
+/* APPLE LOCAL end 6160917 */
+/* APPLE LOCAL 5571707 Allow R9 as caller-saved register */
+void arm_darwin_subtarget_conditional_register_usage (void);
+
+#endif /* ! GCC_ARM_PROTOS_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm-tune.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm-tune.md
new file mode 100644
index 000000000..57b4d62ec
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm-tune.md
@@ -0,0 +1,6 @@
+;; -*- buffer-read-only: t -*-
+;; Generated automatically by gentune.sh from arm-cores.def
+(define_attr "tune"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,cortexa8,cortexr4,cortexm3"
+ (const (symbol_ref "arm_tune")))
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm.c b/gcc-4.2.1-5666.3/gcc/config/arm/arm.c
new file mode 100644
index 000000000..d431ecb12
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm.c
@@ -0,0 +1,23901 @@
+/* Output routines for GCC for ARM.
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+ 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* APPLE LOCAL begin 6902792 Q register clobbers in inline asm */
+#include <stdlib.h>
+#include <ctype.h>
+/* APPLE LOCAL end 6902792 Q register clobbers in inline asm */
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "obstack.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "toplev.h"
+#include "recog.h"
+#include "ggc.h"
+#include "except.h"
+#include "c-pragma.h"
+#include "integrate.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "debug.h"
+#include "langhooks.h"
+/* APPLE LOCAL ARM needed for SUBSUBTARGET_OVERRIDE_OPTIONS */
+#include "../../libcpp/internal.h"
+/* APPLE LOCAL ARM needed for set_param_value */
+#include "params.h"
+
+/* Forward definitions of types. */
+typedef struct minipool_node Mnode;
+typedef struct minipool_fixup Mfix;
+
+const struct attribute_spec arm_attribute_table[];
+
+/* Forward function declarations. */
+static arm_stack_offsets *arm_get_frame_offsets (void);
+static void arm_add_gc_roots (void);
+static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
+ HOST_WIDE_INT, rtx, rtx, int, int);
+static unsigned bit_count (unsigned long);
+static int arm_address_register_rtx_p (rtx, int);
+static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+static int thumb2_legitimate_index_p (enum machine_mode, rtx, int);
+static int thumb1_base_register_rtx_p (rtx, enum machine_mode, int);
+inline static int thumb1_index_register_rtx_p (rtx, int);
+static int thumb_far_jump_used_p (void);
+static bool thumb_force_lr_save (void);
+static unsigned long thumb1_compute_save_reg_mask (void);
+/* APPLE LOCAL end v7 support. Merge from mainline */
+static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
+static rtx emit_sfm (int, int);
+static int arm_size_return_regs (void);
+#ifndef AOF_ASSEMBLER
+static bool arm_assemble_integer (rtx, unsigned int, int);
+#endif
+static const char *fp_const_from_val (REAL_VALUE_TYPE *);
+static arm_cc get_arm_condition_code (rtx);
+static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
+static rtx is_jump_table (rtx);
+static const char *output_multi_immediate (rtx *, const char *, const char *,
+ int, HOST_WIDE_INT);
+static const char *shift_op (rtx, HOST_WIDE_INT *);
+static struct machine_function *arm_init_machine_status (void);
+/* APPLE LOCAL begin compact switch tables */
+static int handle_thumb_unexpanded_prologue (FILE *, bool);
+static int handle_thumb_unexpanded_epilogue (bool);
+static int handle_thumb_exit (FILE *, int, bool);
+static int handle_thumb_pushpop (FILE *, unsigned long, int, int *, unsigned long, bool);
+/* APPLE LOCAL end compact switch tables */
+static rtx is_jump_table (rtx);
+static HOST_WIDE_INT get_jump_table_size (rtx);
+static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
+static Mnode *add_minipool_forward_ref (Mfix *);
+static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
+static Mnode *add_minipool_backward_ref (Mfix *);
+static void assign_minipool_offsets (Mfix *);
+static void arm_print_value (FILE *, rtx);
+static void dump_minipool (rtx);
+static int arm_barrier_cost (rtx);
+static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
+static void push_minipool_barrier (rtx, HOST_WIDE_INT);
+static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
+ rtx);
+static void arm_reorg (void);
+static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
+static int current_file_function_operand (rtx);
+static unsigned long arm_compute_save_reg0_reg12_mask (void);
+static unsigned long arm_compute_save_reg_mask (void);
+static unsigned long arm_isr_value (tree);
+static unsigned long arm_compute_func_type (void);
+static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
+static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
+#endif
+static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
+/* APPLE LOCAL v7 support. Merge from mainline */
+static void thumb1_output_function_prologue (FILE *, HOST_WIDE_INT);
+static int arm_comp_type_attributes (tree, tree);
+static void arm_set_default_type_attributes (tree);
+static int arm_adjust_cost (rtx, rtx, rtx, int);
+static int count_insns_for_constant (HOST_WIDE_INT, int);
+static int arm_get_strip_length (int);
+static bool arm_function_ok_for_sibcall (tree, tree);
+static void arm_internal_label (FILE *, const char *, unsigned long);
+static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
+ tree);
+static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
+static bool arm_size_rtx_costs (rtx, int, int, int *);
+static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
+static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
+static bool arm_xscale_rtx_costs (rtx, int, int, int *);
+static bool arm_9e_rtx_costs (rtx, int, int, int *);
+static int arm_address_cost (rtx);
+static bool arm_memory_load_p (rtx);
+static bool arm_cirrus_insn_p (rtx);
+static void cirrus_reorg (rtx);
+static void arm_init_builtins (void);
+static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+static void arm_init_iwmmxt_builtins (void);
+static rtx safe_vector_operand (rtx, enum machine_mode);
+static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
+static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
+static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+static void emit_constant_insn (rtx cond, rtx pattern);
+static rtx emit_set_insn (rtx, rtx);
+static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+
+#ifdef OBJECT_FORMAT_ELF
+static void arm_elf_asm_constructor (rtx, int);
+#endif
+/* APPLE LOCAL begin ARM darwin section_info */
+#if TARGET_MACHO
+static void arm_darwin_encode_section_info (tree, rtx, int);
+#elif !defined(ARM_PE)
+static void arm_encode_section_info (tree, rtx, int);
+#endif
+/* APPLE LOCAL end ARM darwin section_info */
+
+static void arm_file_end (void);
+/* APPLE LOCAL v7 support. Merge from mainline */
+static void arm_file_start (void);
+
+/* APPLE LOCAL begin ARM asm file hooks */
+#if TARGET_MACHO
+static void arm_darwin_file_start (void);
+static void arm_darwin_file_end (void);
+#endif
+/* APPLE LOCAL end ARM asm file hooks */
+
+#ifdef AOF_ASSEMBLER
+static void aof_globalize_label (FILE *, const char *);
+static void aof_dump_imports (FILE *);
+static void aof_dump_pic_table (FILE *);
+static void aof_file_start (void);
+static void aof_file_end (void);
+static void aof_asm_init_sections (void);
+#endif
+static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int *, int);
+static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
+ enum machine_mode, tree, bool);
+static bool arm_promote_prototypes (tree);
+static bool arm_default_short_enums (void);
+static bool arm_align_anon_bitfield (void);
+static bool arm_return_in_msb (tree);
+static bool arm_must_pass_in_stack (enum machine_mode, tree);
+#ifdef TARGET_UNWIND_INFO
+static void arm_unwind_emit (FILE *, rtx);
+static bool arm_output_ttype (rtx);
+#endif
+/* APPLE LOCAL v7 support. Merge from mainline */
+static void arm_dwarf_handle_frame_unspec (const char *, rtx, int);
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+static rtx arm_dwarf_register_span(rtx);
+
+static tree arm_cxx_guard_type (void);
+static bool arm_cxx_guard_mask_bit (void);
+static tree arm_get_cookie_size (tree);
+static bool arm_cookie_has_size (void);
+static bool arm_cxx_cdtor_returns_this (void);
+static bool arm_cxx_key_method_may_be_inline (void);
+static void arm_cxx_determine_class_data_visibility (tree);
+static bool arm_cxx_class_data_always_comdat (void);
+static bool arm_cxx_use_aeabi_atexit (void);
+static void arm_init_libfuncs (void);
+static bool arm_handle_option (size_t, const char *, int);
+static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
+static bool arm_cannot_copy_insn_p (rtx);
+static bool arm_tls_symbol_p (rtx x);
+/* APPLE LOCAL begin ARM -mdynamic-no-pic support */
+static int symbol_mentioned_with_filter (rtx, int);
+static bool arm_cannot_force_const_mem (rtx x);
+/* APPLE LOCAL end ARM -mdynamic-no-pic support */
+/* APPLE LOCAL ARM reliable backtraces */
+static rtx arm_builtin_setjmp_frame_value (void);
+/* APPLE LOCAL begin ARM darwin local binding */
+#if TARGET_MACHO
+static bool arm_binds_local_p (tree);
+#endif
+/* APPLE LOCAL end ARM darwin local binding */
+/* APPLE LOCAL begin 5946347 ms_struct support */
+static tree arm_handle_ms_struct_attribute (tree *, tree, tree, int, bool *);
+static tree arm_handle_gcc_struct_attribute (tree *, tree, tree, int, bool *);
+static bool arm_ms_bitfield_layout_p (tree);
+/* APPLE LOCAL end 5946347 ms_struct support */
+/* APPLE LOCAL ARM 6008578 */
+static HOST_WIDE_INT get_label_pad (rtx, HOST_WIDE_INT);
+
+/* APPLE LOCAL 6902792 Q register clobbers in inline asm */
+static tree arm_md_asm_clobbers (tree, tree, tree);
+
+/* Initialize the GCC target structure. */
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+#undef TARGET_MERGE_DECL_ATTRIBUTES
+#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
+#endif
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE arm_attribute_table
+
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END arm_file_end
+
+/* APPLE LOCAL begin ARM asm file hooks */
+#if TARGET_MACHO
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START arm_darwin_file_start
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END arm_darwin_file_end
+#endif
+/* APPLE LOCAL end ARM asm file hooks */
+
+#ifdef AOF_ASSEMBLER
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\tDCB\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
+#undef TARGET_ASM_GLOBALIZE_LABEL
+#define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START aof_file_start
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END aof_file_end
+#else
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP NULL
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER arm_assemble_integer
+#endif
+
+/* APPLE LOCAL begin ARM MACH assembler */
+#ifdef OBJECT_FORMAT_MACHO
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.long\t"
+#endif
+/* APPLE LOCAL end ARM MACH assembler */
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+/* APPLE LOCAL 6216388 Don't schedule prologue by default */
+#define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT)
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION arm_handle_option
+
+#undef TARGET_COMP_TYPE_ATTRIBUTES
+#define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
+
+#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
+#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST arm_adjust_cost
+
+#undef TARGET_ENCODE_SECTION_INFO
+#ifdef ARM_PE
+#define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
+/* APPLE LOCAL begin ARM darwin section_info */
+#elif TARGET_MACHO
+#define TARGET_ENCODE_SECTION_INFO arm_darwin_encode_section_info
+/* APPLE LOCAL end ARM darwin section_info */
+#else
+#define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
+#endif
+
+#undef TARGET_STRIP_NAME_ENCODING
+#define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
+
+#undef TARGET_ASM_INTERNAL_LABEL
+#define TARGET_ASM_INTERNAL_LABEL arm_internal_label
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
+
+/* This will be overridden in arm_override_options. */
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS arm_slowmul_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST arm_address_cost
+
+#undef TARGET_SHIFT_TRUNCATION_MASK
+#define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS arm_init_builtins
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN arm_expand_builtin
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS arm_init_libfuncs
+
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
+
+#undef TARGET_DEFAULT_SHORT_ENUMS
+#define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
+
+#undef TARGET_ALIGN_ANON_BITFIELD
+#define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
+
+#undef TARGET_NARROW_VOLATILE_BITFIELD
+#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
+
+#undef TARGET_CXX_GUARD_TYPE
+#define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
+
+#undef TARGET_CXX_GUARD_MASK_BIT
+#define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
+
+#undef TARGET_CXX_GET_COOKIE_SIZE
+#define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
+
+#undef TARGET_CXX_COOKIE_HAS_SIZE
+#define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
+
+#undef TARGET_CXX_CDTOR_RETURNS_THIS
+#define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
+
+#undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
+#define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
+
+#undef TARGET_CXX_USE_AEABI_ATEXIT
+#define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
+
+#undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
+#define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
+ arm_cxx_determine_class_data_visibility
+
+#undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
+#define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB arm_return_in_msb
+
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
+
+#ifdef TARGET_UNWIND_INFO
+#undef TARGET_UNWIND_EMIT
+#define TARGET_UNWIND_EMIT arm_unwind_emit
+
+/* EABI unwinding tables use a different format for the typeinfo tables. */
+#undef TARGET_ASM_TTYPE
+#define TARGET_ASM_TTYPE arm_output_ttype
+
+#undef TARGET_ARM_EABI_UNWINDER
+#define TARGET_ARM_EABI_UNWINDER true
+#endif /* TARGET_UNWIND_INFO */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
+#define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+#undef TARGET_CANNOT_COPY_INSN_P
+#define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS true
+#endif
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+/* APPLE LOCAL ARM -mdynamic-no-pic support */
+#define TARGET_CANNOT_FORCE_CONST_MEM arm_cannot_force_const_mem
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 4095
+
+/* The minimum is set such that the total size of the block
+ for a particular anchor is -4088 + 1 + 4095 bytes, which is
+ divisible by eight, ensuring natural spacing of anchors. */
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -4088
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* APPLE LOCAL begin ARM darwin local binding */
+#if TARGET_MACHO
+#undef TARGET_BINDS_LOCAL_P
+#define TARGET_BINDS_LOCAL_P arm_binds_local_p
+#endif
+/* APPLE LOCAL end ARM darwin local binding */
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#undef TARGET_MANGLE_TYPE
+#define TARGET_MANGLE_TYPE arm_mangle_type
+/* APPLE LOCAL end support. Merge from Codesourcery */
+
+/* APPLE LOCAL begin ARM reliable backtraces */
+#undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
+#define TARGET_BUILTIN_SETJMP_FRAME_VALUE arm_builtin_setjmp_frame_value
+/* APPLE LOCAL end ARM reliable backtraces */
+
+/* APPLE LOCAL begin 5946347 ms_struct support */
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P arm_ms_bitfield_layout_p
+/* APPLE LOCAL end 5946347 ms_struct support */
+
+/* APPLE LOCAL begin 6902792 Q register clobbers in inline asm */
+#undef TARGET_MD_ASM_CLOBBERS
+#define TARGET_MD_ASM_CLOBBERS arm_md_asm_clobbers
+/* APPLE LOCAL end 6902792 Q register clobbers in inline asm */
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Obstack for minipool constant handling. */
+static struct obstack minipool_obstack;
+static char * minipool_startobj;
+
+/* The maximum number of insns skipped which
+ will be conditionalised if possible. */
+static int max_insns_skipped = 5;
+
+extern FILE * asm_out_file;
+
+/* APPLE LOCAL begin 6879229 disallow -fasm-blocks */
+extern int flag_iasm_blocks;
+/* APPLE LOCAL end 6879229 disallow -fasm-blocks */
+
+/* True if we are currently building a constant table. */
+int making_const_table;
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. */
+rtx arm_compare_op0, arm_compare_op1;
+
+/* The processor for which instructions should be scheduled. */
+enum processor_type arm_tune = arm_none;
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* The default processor used if not overriden by commandline. */
+static enum processor_type arm_default_cpu = arm_none;
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Which floating point model to use. */
+enum arm_fp_model arm_fp_model;
+
+/* Which floating point hardware is available. */
+enum fputype arm_fpu_arch;
+
+/* Which floating point hardware to schedule for. */
+enum fputype arm_fpu_tune;
+
+/* Whether to use floating point hardware. */
+enum float_abi_type arm_float_abi;
+
+/* Which ABI to use. */
+enum arm_abi_type arm_abi;
+
+/* Which thread pointer model to use. */
+enum arm_tp_type target_thread_pointer = TP_AUTO;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
+
+/* Used for Thumb call_via trampolines. */
+rtx thumb_call_via_label[14];
+static int thumb_call_reg_needed;
+
+/* APPLE LOCAL 5571707 Allow R9 as caller-saved register */
+static int darwin_reserve_r9_on_v6 = 0;
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* Keeps track of which *_switch* functions we've used, so we
+ can emit the right stubs. */
+static GTY(()) rtx switch8_libfunc;
+static GTY(()) rtx switchu8_libfunc;
+static GTY(()) rtx switch16_libfunc;
+static GTY(()) rtx switch32_libfunc;
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* Bit values used to identify processor capabilities. */
+#define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
+#define FL_ARCH3M (1 << 1) /* Extended multiply */
+#define FL_MODE26 (1 << 2) /* 26-bit mode support */
+#define FL_MODE32 (1 << 3) /* 32-bit mode support */
+#define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
+#define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
+#define FL_THUMB (1 << 6) /* Thumb aware */
+#define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
+#define FL_STRONG (1 << 8) /* StrongARM */
+#define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
+#define FL_XSCALE (1 << 10) /* XScale */
+#define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
+#define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
+ media instructions. */
+#define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
+#define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
+ Note: ARM6 & 7 derivatives only. */
+#define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define FL_THUMB2 (1 << 16) /* Thumb-2. */
+#define FL_NOTM (1 << 17) /* Instructions not present in the 'M'
+ profile. */
+#define FL_DIV (1 << 18) /* Hardware divde. */
+#define FL_VFPV3 (1 << 19) /* Vector Floating Point V3. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+#define FL_NEON (1 << 20) /* Neon instructions. */
+
+#define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define FL_FOR_ARCH2 FL_NOTM
+#define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
+#define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
+#define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
+#define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
+#define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
+#define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
+#define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
+#define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
+#define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
+#define FL_FOR_ARCH6J FL_FOR_ARCH6
+#define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
+#define FL_FOR_ARCH6Z FL_FOR_ARCH6
+#define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
+#define FL_FOR_ARCH7 (FL_FOR_ARCH6T2 &~ FL_NOTM)
+/* APPLE LOCAL 6093388 -mfpu=neon default for v7a */
+#define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM | FL_NEON)
+#define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV)
+#define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_DIV)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* The bits in this mask specify which
+ instructions we are allowed to generate. */
+static unsigned long insn_flags = 0;
+
+/* The bits in this mask specify which instruction scheduling options should
+ be used. */
+static unsigned long tune_flags = 0;
+
+/* The following are used in the arm.md file as equivalents to bits
+ in the above two flag variables. */
+
+/* Nonzero if this chip supports the ARM Architecture 3M extensions. */
+int arm_arch3m = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions. */
+int arm_arch4 = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 4t extensions. */
+int arm_arch4t = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 5 extensions. */
+int arm_arch5 = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 5E extensions. */
+int arm_arch5e = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 6 extensions. */
+int arm_arch6 = 0;
+
+/* Nonzero if this chip supports the ARM 6K extensions. */
+int arm_arch6k = 0;
+
+/* APPLE LOCAL begin 6093388 -mfpu=neon default for v7a */
+/* Nonzero if this chip supports the ARM 7A extensions. */
+int arm_arch7a = 0;
+/* APPLE LOCAL end 6093388 -mfpu=neon default for v7a */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Nonzero if instructions not present in the 'M' profile can be used. */
+int arm_arch_notm = 0;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+int arm_ld_sched = 0;
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Nonzero if this chip is a StrongARM. */
+int arm_tune_strongarm = 0;
+
+/* Nonzero if this chip is a Cirrus variant. */
+int arm_arch_cirrus = 0;
+
+/* Nonzero if this chip supports Intel Wireless MMX technology. */
+int arm_arch_iwmmxt = 0;
+
+/* Nonzero if this chip is an XScale. */
+int arm_arch_xscale = 0;
+
+/* Nonzero if tuning for XScale */
+int arm_tune_xscale = 0;
+
+/* Nonzero if we want to tune for stores that access the write-buffer.
+ This typically means an ARM6 or ARM7 with MMU or MPU. */
+int arm_tune_wbuf = 0;
+
+/* Nonzero if generating Thumb instructions. */
+int thumb_code = 0;
+
+/* Nonzero if we should define __THUMB_INTERWORK__ in the
+ preprocessor.
+ XXX This is a bit of a hack, it's intended to help work around
+ problems in GLD which doesn't understand that armv5t code is
+ interworking clean. */
+int arm_cpp_interwork = 0;
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Nonzero if chip supports Thumb 2. */
+int arm_arch_thumb2;
+
+/* Nonzero if chip supports integer division instruction. */
+int arm_arch_hwdiv;
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
+ must report the mode of the memory reference from PRINT_OPERAND to
+ PRINT_OPERAND_ADDRESS. */
+enum machine_mode output_memory_reference_mode;
+
+/* The register number to be used for the PIC offset register. */
+unsigned arm_pic_register = INVALID_REGNUM;
+
+/* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+int return_used_this_function;
+
+/* Set to 1 after arm_reorg has started. Reset to start at the start of
+ the next function. */
+static int after_arm_reorg = 0;
+
+/* The maximum number of insns to be used when loading a constant. */
+static int arm_constant_limit = 3;
+
+/* For an explanation of these variables, see final_prescan_insn below. */
+int arm_ccfsm_state;
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* arm_current_cc is also used for Thumb-2 cond_exec blocks. */
+enum arm_cond_code arm_current_cc;
+rtx arm_target_insn;
+int arm_target_label;
+/* The number of conditionally executed insns, including the current insn. */
+int arm_condexec_count = 0;
+/* A bitmask specifying the patterns for the IT block.
+ Zero means do not output an IT block before this insn. */
+int arm_condexec_mask = 0;
+/* The number of bits used in arm_condexec_mask. */
+int arm_condexec_masklen = 0;
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* The condition codes of the ARM, and the inverse function. */
+static const char * const arm_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define ARM_LSL_NAME (TARGET_UNIFIED_ASM ? "lsl" : "asl")
+#define streq(string1, string2) (strcmp (string1, string2) == 0)
+
+#define THUMB2_WORK_REGS (0xff & ~( (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
+ | (1 << SP_REGNUM) | (1 << PC_REGNUM) \
+ | (1 << PIC_OFFSET_TABLE_REGNUM)))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Initialization code. */
+
+struct processors
+{
+ const char *const name;
+ enum processor_type core;
+ const char *arch;
+ const unsigned long flags;
+ bool (* rtx_costs) (rtx, int, int, int *);
+};
+
+/* Not all of these give usefully different compilation alternatives,
+ but there is no simple way of generalizing them. */
+static const struct processors all_cores[] =
+{
+ /* ARM Cores */
+#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+ {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
+#include "arm-cores.def"
+#undef ARM_CORE
+ {NULL, arm_none, NULL, 0, NULL}
+};
+
+static const struct processors all_architectures[] =
+{
+ /* ARM Architectures */
+ /* We don't specify rtx_costs here as it will be figured out
+ from the core. */
+
+ {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
+ {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
+ {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
+ {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
+ {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
+ /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
+ implementations that support it, so we will leave it out for now. */
+/* APPLE LOCAL begin ARM custom architectures */
+#if TARGET_MACHO
+ {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4, NULL},
+#else
+ {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
+#endif
+/* APPLE LOCAL end ARM custom architectures */
+ {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
+ {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
+ {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
+ {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
+/* APPLE LOCAL begin ARM custom architectures */
+#if TARGET_MACHO
+ {"armv5tej",arm926ejs, "5TEJ",FL_CO_PROC | FL_FOR_ARCH5TEJ, NULL},
+ {"xscale", xscale, "5TE", FL_CO_PROC | FL_XSCALE | FL_FOR_ARCH5TE, NULL},
+ {"armv6", arm1136jfs, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
+ {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
+ {"armv6k", arm1136jfs, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
+#else
+ {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
+ {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
+ {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
+#endif
+/* APPLE LOCAL end ARM custom architectures */
+ {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
+ {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ {"armv6t2", arm1156t2s, "6T2", FL_CO_PROC | FL_FOR_ARCH6T2, NULL},
+ {"armv7", cortexa8, "7", FL_CO_PROC | FL_FOR_ARCH7, NULL},
+ {"armv7a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL},
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ {"armv7r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL},
+ {"armv7m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ {"armv7-a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL},
+ {"armv7-r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL},
+ {"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
+ {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
+ {NULL, arm_none, NULL, 0 , NULL}
+};
+
+struct arm_cpu_select
+{
+ const char * string;
+ const char * name;
+ const struct processors * processors;
+};
+
+/* This is a magic structure. The 'string' field is magically filled in
+ with a pointer to the value specified by the user on the command line
+ assuming that the user has specified such a value. */
+
+static struct arm_cpu_select arm_select[] =
+{
+ /* string name processors */
+ { NULL, "-mcpu=", all_cores },
+ { NULL, "-march=", all_architectures },
+ { NULL, "-mtune=", all_cores }
+};
+
+/* Defines representing the indexes into the above table. */
+#define ARM_OPT_SET_CPU 0
+#define ARM_OPT_SET_ARCH 1
+#define ARM_OPT_SET_TUNE 2
+
+/* The name of the preprocessor macro to define for this architecture. */
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#define ARM_ARCH_NAME_SIZE 25
+char arm_arch_name[ARM_ARCH_NAME_SIZE] = "__ARM_ARCH_0UNK__";
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+struct fpu_desc
+{
+ const char * name;
+ enum fputype fpu;
+};
+
+
+/* Available values for -mfpu=. */
+
+static const struct fpu_desc all_fpus[] =
+{
+ {"fpa", FPUTYPE_FPA},
+ {"fpe2", FPUTYPE_FPA_EMU2},
+ {"fpe3", FPUTYPE_FPA_EMU2},
+ {"maverick", FPUTYPE_MAVERICK},
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ {"vfp", FPUTYPE_VFP},
+ {"vfp3", FPUTYPE_VFP3},
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ {"neon", FPUTYPE_NEON}
+};
+
+
+/* Floating point models used by the different hardware.
+ See fputype in arm.h. */
+
+static const enum fputype fp_model_for_fpu[] =
+{
+ /* No FP hardware. */
+ ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
+ ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
+ ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
+ ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
+ ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
+/* APPLE LOCAL v7 support. Merge from mainline */
+ ARM_FP_MODEL_VFP, /* FPUTYPE_VFP */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ ARM_FP_MODEL_VFP, /* FPUTYPE_VFP3 */
+ ARM_FP_MODEL_VFP /* FPUTYPE_NEON */
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+};
+
+
+struct float_abi
+{
+ const char * name;
+ enum float_abi_type abi_type;
+};
+
+
+/* Available values for -mfloat-abi=. */
+
+static const struct float_abi all_float_abis[] =
+{
+ {"soft", ARM_FLOAT_ABI_SOFT},
+ {"softfp", ARM_FLOAT_ABI_SOFTFP},
+ {"hard", ARM_FLOAT_ABI_HARD}
+};
+
+
+struct abi_name
+{
+ const char *name;
+ enum arm_abi_type abi_type;
+};
+
+
+/* Available values for -mabi=. */
+
+static const struct abi_name arm_all_abis[] =
+{
+ {"apcs-gnu", ARM_ABI_APCS},
+ {"atpcs", ARM_ABI_ATPCS},
+ {"aapcs", ARM_ABI_AAPCS},
+ {"iwmmxt", ARM_ABI_IWMMXT},
+ {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
+};
+
+/* Supported TLS relocations. */
+
+enum tls_reloc {
+ TLS_GD32,
+ TLS_LDM32,
+ TLS_LDO32,
+ TLS_IE32,
+ TLS_LE32
+};
+
+/* Emit an insn that's a simple single-set. Both the operands must be known
+ to be valid. */
+inline static rtx
+emit_set_insn (rtx x, rtx y)
+{
+ return emit_insn (gen_rtx_SET (VOIDmode, x, y));
+}
+
+/* Return the number of bits set in VALUE. */
+static unsigned
+bit_count (unsigned long value)
+{
+ unsigned long count = 0;
+
+ while (value)
+ {
+ count++;
+ value &= value - 1; /* Clear the least-significant set bit. */
+ }
+
+ return count;
+}
+
+/* APPLE LOCAL begin ARM custom frame layout */
+/* Generate a bitmask that has bits end:start (inclusive) set. */
+static unsigned long
+inclusive_bitmask (int start, int end)
+{
+ return (((unsigned long) 1 << start) - 1)
+ ^ (((unsigned long) 1 << (end + 1)) - 1);
+}
+/* APPLE LOCAL end ARM custom frame layout */
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* These are library functions, but calls to them are not
+ represented as calls in the RTL because they do not have
+ normal function-call semantics. We generate the
+ Mach-O stuff lazily in this case. */
+
+void register_switch8_libfunc (void)
+{
+#if TARGET_MACHO
+ if (switch8_libfunc == NULL)
+ switch8_libfunc = gen_rtx_SYMBOL_REF (Pmode,
+ ggc_alloc_string ("__switch8", sizeof ("__switch8")));
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ machopic_validate_stub_or_non_lazy_ptr
+ (machopic_indirection_name (switch8_libfunc, true));
+#endif
+}
+
+void register_switchu8_libfunc (void)
+{
+#if TARGET_MACHO
+ if (switchu8_libfunc == NULL)
+ switchu8_libfunc = gen_rtx_SYMBOL_REF (Pmode,
+ ggc_alloc_string ("__switchu8", sizeof ("__switchu8")));
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ machopic_validate_stub_or_non_lazy_ptr
+ (machopic_indirection_name (switchu8_libfunc, true));
+#endif
+}
+
+void register_switch16_libfunc (void)
+{
+#if TARGET_MACHO
+ if (switch16_libfunc == NULL)
+ switch16_libfunc = gen_rtx_SYMBOL_REF (Pmode,
+ ggc_alloc_string ("__switch16", sizeof ("__switch16")));
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ machopic_validate_stub_or_non_lazy_ptr
+ (machopic_indirection_name (switch16_libfunc, true));
+#endif
+}
+
+void register_switch32_libfunc (void)
+{
+#if TARGET_MACHO
+ if (switch32_libfunc == NULL)
+ switch32_libfunc = gen_rtx_SYMBOL_REF (Pmode,
+ ggc_alloc_string ("__switch32", sizeof ("__switch32")));
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ machopic_validate_stub_or_non_lazy_ptr
+ (machopic_indirection_name (switch32_libfunc, true));
+#endif
+}
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* Set up library functions unique to ARM. */
+
+static void
+arm_init_libfuncs (void)
+{
+ /* APPLE LOCAL begin ARM 4702983 Thumb VFP math */
+ if (TARGET_MACHO && TARGET_THUMB && !TARGET_SOFT_FLOAT
+ && (flag_pic || MACHO_DYNAMIC_NO_PIC_P))
+ {
+ /* Double-precision floating-point arithmetic. */
+ set_optab_libfunc (add_optab, DFmode, "__adddf3vfp");
+ set_optab_libfunc (sdiv_optab, DFmode, "__divdf3vfp");
+ set_optab_libfunc (smul_optab, DFmode, "__muldf3vfp");
+ set_optab_libfunc (neg_optab, DFmode, NULL);
+ set_optab_libfunc (sub_optab, DFmode, "__subdf3vfp");
+
+ /* Double-precision comparisons. */
+ set_optab_libfunc (eq_optab, DFmode, "__eqdf2vfp");
+ set_optab_libfunc (ne_optab, DFmode, "__nedf2vfp");
+ set_optab_libfunc (lt_optab, DFmode, "__ltdf2vfp");
+ set_optab_libfunc (le_optab, DFmode, "__ledf2vfp");
+ set_optab_libfunc (ge_optab, DFmode, "__gedf2vfp");
+ set_optab_libfunc (gt_optab, DFmode, "__gtdf2vfp");
+ set_optab_libfunc (unord_optab, DFmode, "__unorddf2vfp");
+
+ /* Single-precision floating-point arithmetic. */
+ set_optab_libfunc (add_optab, SFmode, "__addsf3vfp");
+ set_optab_libfunc (sdiv_optab, SFmode, "__divsf3vfp");
+ set_optab_libfunc (smul_optab, SFmode, "__mulsf3vfp");
+ set_optab_libfunc (neg_optab, SFmode, NULL);
+ set_optab_libfunc (sub_optab, SFmode, "__subsf3vfp");
+
+ /* Single-precision comparisons. */
+ set_optab_libfunc (eq_optab, SFmode, "__eqsf2vfp");
+ set_optab_libfunc (ne_optab, SFmode, "__nesf2vfp");
+ set_optab_libfunc (lt_optab, SFmode, "__ltsf2vfp");
+ set_optab_libfunc (le_optab, SFmode, "__lesf2vfp");
+ set_optab_libfunc (ge_optab, SFmode, "__gesf2vfp");
+ set_optab_libfunc (gt_optab, SFmode, "__gtsf2vfp");
+ set_optab_libfunc (unord_optab, SFmode, "__unordsf2vfp");
+
+ /* Floating-point to integer conversions. */
+ /* DImode conversions are done via library routines even
+ when generating VFP instructions, so use the same ones. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__fixdfsivfp");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__fixunsdfsivfp");
+ set_conv_libfunc (sfix_optab, SImode, SFmode, "__fixsfsivfp");
+ set_conv_libfunc (ufix_optab, SImode, SFmode, "__fixunssfsivfp");
+
+ /* Conversions between floating types. */
+ set_conv_libfunc (trunc_optab, SFmode, DFmode, "__truncdfsf2vfp");
+ set_conv_libfunc (sext_optab, DFmode, SFmode, "__extendsfdf2vfp");
+
+ /* Integer to floating-point conversions. */
+ /* DImode conversions are done via library routines even
+ when generating VFP instructions, so use the same ones. */
+ set_conv_libfunc (sfloat_optab, DFmode, SImode, "__floatsidfvfp");
+ set_conv_libfunc (ufloat_optab, DFmode, SImode, "__floatunssidfvfp");
+ set_conv_libfunc (sfloat_optab, SFmode, SImode, "__floatsisfvfp");
+ set_conv_libfunc (ufloat_optab, SFmode, SImode, "__floatunssisfvfp");
+ return;
+ }
+ /* APPLE LOCAL end ARM 4702983 Thumb VFP math */
+
+ /* There are no special library functions unless we are using the
+ ARM BPABI. */
+ if (!TARGET_BPABI)
+ return;
+
+ /* The functions below are described in Section 4 of the "Run-Time
+ ABI for the ARM architecture", Version 1.0. */
+
+ /* Double-precision floating-point arithmetic. Table 2. */
+ set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
+ set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
+ set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
+ set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
+ set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
+
+ /* Double-precision comparisons. Table 3. */
+ set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
+ set_optab_libfunc (ne_optab, DFmode, NULL);
+ set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
+ set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
+ set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
+ set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
+ set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
+
+ /* Single-precision floating-point arithmetic. Table 4. */
+ set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
+ set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
+ set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
+ set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
+ set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
+
+ /* Single-precision comparisons. Table 5. */
+ set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
+ set_optab_libfunc (ne_optab, SFmode, NULL);
+ set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
+ set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
+ set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
+ set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
+ set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
+
+ /* Floating-point to integer conversions. Table 6. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
+ set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
+ set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
+ set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
+ set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
+ set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
+ set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
+
+ /* Conversions between floating types. Table 7. */
+ set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
+ set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
+
+ /* Integer to floating-point conversions. Table 8. */
+ set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
+ set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
+ set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
+ set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
+ set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
+ set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
+ set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
+ set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
+
+ /* Long long. Table 9. */
+ set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
+ set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
+ set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
+ set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
+ set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
+ set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
+ set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
+ set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
+
+ /* Integer (32/32->32) division. \S 4.3.1. */
+ set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
+ set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
+
+ /* The divmod functions are designed so that they can be used for
+ plain division, even though they return both the quotient and the
+ remainder. The quotient is returned in the usual location (i.e.,
+ r0 for SImode, {r0, r1} for DImode), just as would be expected
+ for an ordinary division routine. Because the AAPCS calling
+ conventions specify that all of { r0, r1, r2, r3 } are
+ callee-saved registers, there is no need to tell the compiler
+ explicitly that those registers are clobbered by these
+ routines. */
+ set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
+ set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
+
+ /* For SImode division the ABI provides div-without-mod routines,
+ which are faster. */
+ set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
+ set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
+
+ /* We don't have mod libcalls. Fortunately gcc knows how to use the
+ divmod libcalls instead. */
+ set_optab_libfunc (smod_optab, DImode, NULL);
+ set_optab_libfunc (umod_optab, DImode, NULL);
+ set_optab_libfunc (smod_optab, SImode, NULL);
+ set_optab_libfunc (umod_optab, SImode, NULL);
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ case OPT_march_:
+ arm_select[1].string = arg;
+ return true;
+
+ case OPT_mcpu_:
+ arm_select[0].string = arg;
+ return true;
+
+ case OPT_mhard_float:
+ target_float_abi_name = "hard";
+ return true;
+
+ case OPT_msoft_float:
+ target_float_abi_name = "soft";
+ return true;
+
+ case OPT_mtune_:
+ arm_select[2].string = arg;
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+/* Fix up any incompatible options that the user has specified.
+ This has now turned into a maze. */
+void
+arm_override_options (void)
+{
+ unsigned i;
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ int len;
+ enum processor_type target_arch_cpu = arm_none;
+
+ /* Set up the flags based on the cpu/architecture selected by the user. */
+ for (i = ARRAY_SIZE (arm_select); i--;)
+ {
+ struct arm_cpu_select * ptr = arm_select + i;
+
+ if (ptr->string != NULL && ptr->string[0] != '\0')
+ {
+ const struct processors * sel;
+
+ for (sel = ptr->processors; sel->name != NULL; sel++)
+ if (streq (ptr->string, sel->name))
+ {
+ /* Set the architecture define. */
+ if (i != ARM_OPT_SET_TUNE)
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ {
+ len = snprintf (arm_arch_name, ARM_ARCH_NAME_SIZE,
+ "__ARM_ARCH_%s__", sel->arch);
+ gcc_assert (len < ARM_ARCH_NAME_SIZE);
+ }
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+ /* Determine the processor core for which we should
+ tune code-generation. */
+ if (/* -mcpu= is a sensible default. */
+ i == ARM_OPT_SET_CPU
+ /* -mtune= overrides -mcpu= and -march=. */
+ || i == ARM_OPT_SET_TUNE)
+ arm_tune = (enum processor_type) (sel - ptr->processors);
+
+ /* Remember the CPU associated with this architecture.
+ If no other option is used to set the CPU type,
+ we'll use this to guess the most suitable tuning
+ options. */
+ if (i == ARM_OPT_SET_ARCH)
+ target_arch_cpu = sel->core;
+
+ if (i != ARM_OPT_SET_TUNE)
+ {
+/* APPLE LOCAL begin ARM darwin driver */
+#if !TARGET_MACHO
+ /* If we have been given an architecture and a processor
+ make sure that they are compatible. We only generate
+ a warning though, and we prefer the CPU over the
+ architecture. */
+ if (insn_flags != 0 && (insn_flags ^ sel->flags))
+ warning (0, "switch -mcpu=%s conflicts with -march= switch",
+ ptr->string);
+#else
+ /* More likely the -march was inherited from -arch which
+ had to be given to the darwin driver to get the correct
+ compiler. So, make it relatively painless to specify
+ -mcpu=... by not warning that it supercedes -march. */
+#endif
+/* APPLE LOCAL end ARM darwin driver */
+
+ insn_flags = sel->flags;
+ }
+
+ break;
+ }
+
+ if (sel->name == NULL)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* Guess the tuning options from the architecture if necessary. */
+ if (arm_tune == arm_none)
+ arm_tune = target_arch_cpu;
+
+ /* If the user did not specify a processor, choose one for them. */
+ if (insn_flags == 0)
+ {
+ const struct processors * sel;
+ unsigned int sought;
+ enum processor_type cpu;
+
+ cpu = TARGET_CPU_DEFAULT;
+ if (cpu == arm_none)
+ {
+#ifdef SUBTARGET_CPU_DEFAULT
+ /* Use the subtarget default CPU if none was specified by
+ configure. */
+ cpu = SUBTARGET_CPU_DEFAULT;
+#endif
+ /* Default to ARM6. */
+ if (cpu == arm_none)
+ cpu = arm6;
+ }
+ sel = &all_cores[cpu];
+
+ insn_flags = sel->flags;
+
+ /* Now check to see if the user has specified some command line
+ switch that require certain abilities from the cpu. */
+ sought = 0;
+
+ if (TARGET_INTERWORK || TARGET_THUMB)
+ {
+ sought |= (FL_THUMB | FL_MODE32);
+
+ /* There are no ARM processors that support both APCS-26 and
+ interworking. Therefore we force FL_MODE26 to be removed
+ from insn_flags here (if it was set), so that the search
+ below will always be able to find a compatible processor. */
+ insn_flags &= ~FL_MODE26;
+ }
+
+ if (sought != 0 && ((sought & insn_flags) != sought))
+ {
+ /* Try to locate a CPU type that supports all of the abilities
+ of the default CPU, plus the extra abilities requested by
+ the user. */
+ for (sel = all_cores; sel->name != NULL; sel++)
+ if ((sel->flags & sought) == (sought | insn_flags))
+ break;
+
+ if (sel->name == NULL)
+ {
+ unsigned current_bit_count = 0;
+ const struct processors * best_fit = NULL;
+
+ /* Ideally we would like to issue an error message here
+ saying that it was not possible to find a CPU compatible
+ with the default CPU, but which also supports the command
+ line options specified by the programmer, and so they
+ ought to use the -mcpu=<name> command line option to
+ override the default CPU type.
+
+ If we cannot find a cpu that has both the
+ characteristics of the default cpu and the given
+ command line options we scan the array again looking
+ for a best match. */
+ for (sel = all_cores; sel->name != NULL; sel++)
+ if ((sel->flags & sought) == sought)
+ {
+ unsigned count;
+
+ count = bit_count (sel->flags & insn_flags);
+
+ if (count >= current_bit_count)
+ {
+ best_fit = sel;
+ current_bit_count = count;
+ }
+ }
+
+ gcc_assert (best_fit);
+ sel = best_fit;
+ }
+
+ insn_flags = sel->flags;
+ }
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+
+ len = snprintf (arm_arch_name, ARM_ARCH_NAME_SIZE,
+ "__ARM_ARCH_%s__", sel->arch);
+ gcc_assert (len < ARM_ARCH_NAME_SIZE);
+
+ arm_default_cpu = (enum processor_type) (sel - all_cores);
+ if (arm_tune == arm_none)
+ arm_tune = arm_default_cpu;
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ }
+
+ /* The processor for which we should tune should now have been
+ chosen. */
+ gcc_assert (arm_tune != arm_none);
+
+ tune_flags = all_cores[(int)arm_tune].flags;
+ if (optimize_size)
+ targetm.rtx_costs = arm_size_rtx_costs;
+ else
+ targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
+
+ /* Make sure that the processor choice does not conflict with any of the
+ other command line choices. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_ARM && !(insn_flags & FL_NOTM))
+ error ("target CPU does not support ARM mode");
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+ if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
+ {
+ /* APPLE LOCAL begin ARM interworking */
+ /* Don't emit warning for MACHO -- see radar://4825264 */
+ if (! TARGET_MACHO)
+ warning (0, "target CPU does not support interworking" );
+
+ interwork_option = 0;
+ /* APPLE LOCAL end ARM interworking */
+ }
+
+ /* APPLE LOCAL begin 6150882 use thumb2 by default for v7 */
+ /* If we're compiling for v7, we should default to using thumb2
+ codegen. */
+ if ((insn_flags & FL_FOR_ARCH7A) == FL_FOR_ARCH7A
+ && thumb_option < 0)
+ thumb_option = 1;
+ /* APPLE LOCAL end 6150882 use thumb2 by default for v7 */
+
+ if (TARGET_THUMB && !(insn_flags & FL_THUMB))
+ {
+ warning (0, "target CPU does not support THUMB instructions");
+ /* APPLE LOCAL 6150882 use thumb2 by default for v7 */
+ thumb_option = 0;
+ }
+
+ if (TARGET_APCS_FRAME && TARGET_THUMB)
+ {
+ /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
+ target_flags &= ~MASK_APCS_FRAME;
+ }
+
+ /* Callee super interworking implies thumb interworking. Adding
+ this to the flags here simplifies the logic elsewhere. */
+ if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
+ /* APPLE LOCAL ARM interworking */
+ interwork_option = 1;
+
+ /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
+ from here where no function is being compiled currently. */
+ if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
+ warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
+
+ if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
+ warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
+
+ if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
+ warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
+
+ if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
+ {
+ warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
+ target_flags |= MASK_APCS_FRAME;
+ }
+
+ if (TARGET_POKE_FUNCTION_NAME)
+ target_flags |= MASK_APCS_FRAME;
+
+ if (TARGET_APCS_REENT && flag_pic)
+ error ("-fpic and -mapcs-reent are incompatible");
+
+ if (TARGET_APCS_REENT)
+ warning (0, "APCS reentrant code not supported. Ignored");
+
+ /* If this target is normally configured to use APCS frames, warn if they
+ are turned off and debugging is turned on. */
+ if (TARGET_ARM
+ && write_symbols != NO_DEBUG
+ && !TARGET_APCS_FRAME
+ && (TARGET_DEFAULT & MASK_APCS_FRAME))
+ warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
+
+ /* APPLE LOCAL begin 6879229 disallow -fasm-blocks */
+ if (flag_iasm_blocks)
+ error ("-fasm-blocks option not supported for ARM");
+ /* APPLE LOCAL end 6879229 disallow -fasm-blocks */
+
+ /* If stack checking is disabled, we can use r10 as the PIC register,
+ which keeps r9 available. */
+ /* APPLE LOCAL ARM pic support */
+ if (flag_pic && TARGET_SINGLE_PIC_BASE && !TARGET_MACHO)
+ arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
+
+ if (TARGET_APCS_FLOAT)
+ warning (0, "passing floating point arguments in fp regs not yet supported");
+
+ /* Initialize boolean versions of the flags, for use in the arm.md file. */
+ arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
+ arm_arch4 = (insn_flags & FL_ARCH4) != 0;
+ arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
+ arm_arch5 = (insn_flags & FL_ARCH5) != 0;
+ arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
+ arm_arch6 = (insn_flags & FL_ARCH6) != 0;
+ arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
+ /* APPLE LOCAL 6093388 -mfpu=neon default for v7a */
+ arm_arch7a = (insn_flags & FL_FOR_ARCH7A) == FL_FOR_ARCH7A;
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ arm_arch_notm = (insn_flags & FL_NOTM) != 0;
+ arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
+ arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
+
+ arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
+ arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
+ thumb_code = (TARGET_ARM == 0);
+ arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
+ arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
+ arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ arm_arch_hwdiv = (insn_flags & FL_DIV) != 0;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* If we are not using the default (ARM mode) section anchor offset
+ ranges, then set the correct ranges now. */
+ if (TARGET_THUMB1)
+ {
+ /* Thumb-1 LDR instructions cannot have negative offsets.
+ Permissible positive offset ranges are 5-bit (for byte loads),
+ 6-bit (for halfword loads), or 7-bit (for word loads).
+ Empirical results suggest a 7-bit anchor range gives the best
+ overall code size. */
+ targetm.min_anchor_offset = 0;
+ targetm.max_anchor_offset = 127;
+ }
+ else if (TARGET_THUMB2)
+ {
+ /* The minimum is set such that the total size of the block
+ for a particular anchor is 248 + 1 + 4095 bytes, which is
+ divisible by eight, ensuring natural spacing of anchors. */
+ targetm.min_anchor_offset = -248;
+ targetm.max_anchor_offset = 4095;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ /* APPLE LOCAL begin ARM interworking */
+ /* Choose a default interworking setting if not specified on the
+ command line. */
+ if (interwork_option == -1)
+ interwork_option = arm_arch5 ? 1 : 0;
+
+ /* XXX However, we must pass the right pre-processor defines to CPP
+ or GLD can get confused. This is a hack. */
+ if (TARGET_INTERWORK)
+ arm_cpp_interwork = 1;
+ /* APPLE LOCAL end ARM interworking */
+
+ if (target_abi_name)
+ {
+ for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
+ {
+ if (streq (arm_all_abis[i].name, target_abi_name))
+ {
+ arm_abi = arm_all_abis[i].abi_type;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE (arm_all_abis))
+ error ("invalid ABI option: -mabi=%s", target_abi_name);
+ }
+ else
+ arm_abi = ARM_DEFAULT_ABI;
+
+ if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
+ error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
+
+ if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
+ error ("iwmmxt abi requires an iwmmxt capable cpu");
+
+ arm_fp_model = ARM_FP_MODEL_UNKNOWN;
+ if (target_fpu_name == NULL && target_fpe_name != NULL)
+ {
+ if (streq (target_fpe_name, "2"))
+ target_fpu_name = "fpe2";
+ else if (streq (target_fpe_name, "3"))
+ target_fpu_name = "fpe3";
+ else
+ error ("invalid floating point emulation option: -mfpe=%s",
+ target_fpe_name);
+ }
+ if (target_fpu_name != NULL)
+ {
+ /* The user specified a FPU. */
+ for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
+ {
+ if (streq (all_fpus[i].name, target_fpu_name))
+ {
+ arm_fpu_arch = all_fpus[i].fpu;
+ arm_fpu_tune = arm_fpu_arch;
+ arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
+ break;
+ }
+ }
+ if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
+ error ("invalid floating point option: -mfpu=%s", target_fpu_name);
+ }
+ else
+ {
+#ifdef FPUTYPE_DEFAULT
+ /* Use the default if it is specified for this platform. */
+ arm_fpu_arch = FPUTYPE_DEFAULT;
+ arm_fpu_tune = FPUTYPE_DEFAULT;
+#else
+ /* Pick one based on CPU type. */
+ /* ??? Some targets assume FPA is the default.
+ if ((insn_flags & FL_VFP) != 0)
+ arm_fpu_arch = FPUTYPE_VFP;
+ else
+ */
+ if (arm_arch_cirrus)
+ arm_fpu_arch = FPUTYPE_MAVERICK;
+ else
+ arm_fpu_arch = FPUTYPE_FPA_EMU2;
+#endif
+ if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
+ arm_fpu_tune = FPUTYPE_FPA;
+ else
+ arm_fpu_tune = arm_fpu_arch;
+ arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
+ gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
+ }
+
+ if (target_float_abi_name != NULL)
+ {
+ /* The user specified a FP ABI. */
+ for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
+ {
+ if (streq (all_float_abis[i].name, target_float_abi_name))
+ {
+ arm_float_abi = all_float_abis[i].abi_type;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE (all_float_abis))
+ error ("invalid floating point abi: -mfloat-abi=%s",
+ target_float_abi_name);
+ }
+ else
+ arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
+
+ if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
+ sorry ("-mfloat-abi=hard and VFP");
+
+ /* FPA and iWMMXt are incompatible because the insn encodings overlap.
+ VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
+ will ever exist. GCC makes no attempt to support this combination. */
+ if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
+ sorry ("iWMMXt and hardware floating point");
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* ??? iWMMXt insn patterns need auditing for Thumb-2. */
+ if (TARGET_THUMB2 && TARGET_IWMMXT)
+ sorry ("Thumb-2 iWMMXt");
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* If soft-float is specified then don't use FPU. */
+ if (TARGET_SOFT_FLOAT)
+ arm_fpu_arch = FPUTYPE_NONE;
+
+ /* For arm2/3 there is no need to do any scheduling if there is only
+ a floating point emulator, or we are doing software floating-point. */
+ if ((TARGET_SOFT_FLOAT
+ || arm_fpu_tune == FPUTYPE_FPA_EMU2
+ || arm_fpu_tune == FPUTYPE_FPA_EMU3)
+ && (tune_flags & FL_MODE32) == 0)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+
+ if (target_thread_switch)
+ {
+ if (strcmp (target_thread_switch, "soft") == 0)
+ target_thread_pointer = TP_SOFT;
+ else if (strcmp (target_thread_switch, "auto") == 0)
+ target_thread_pointer = TP_AUTO;
+ else if (strcmp (target_thread_switch, "cp15") == 0)
+ target_thread_pointer = TP_CP15;
+ else
+ error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
+ }
+
+ /* Use the cp15 method if it is available. */
+ if (target_thread_pointer == TP_AUTO)
+ {
+ if (arm_arch6k && !TARGET_THUMB)
+ target_thread_pointer = TP_CP15;
+ else
+ target_thread_pointer = TP_SOFT;
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_HARD_TP && TARGET_THUMB1)
+ error ("can not use -mtp=cp15 with 16-bit Thumb");
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ /* Override the default structure alignment for AAPCS ABI. */
+ if (TARGET_AAPCS_BASED)
+ arm_structure_size_boundary = 8;
+
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32
+ || (ARM_DOUBLEWORD_ALIGN && size == 64))
+ arm_structure_size_boundary = size;
+ else
+ warning (0, "structure size boundary can only be set to %s",
+ ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
+ }
+
+ if (arm_pic_register_string != NULL)
+ {
+ int pic_register = decode_reg_name (arm_pic_register_string);
+
+ if (!flag_pic)
+ warning (0, "-mpic-register= is useless without -fpic");
+
+ /* Prevent the user from choosing an obviously stupid PIC register. */
+ else if (pic_register < 0 || call_used_regs[pic_register]
+ || pic_register == HARD_FRAME_POINTER_REGNUM
+ || pic_register == STACK_POINTER_REGNUM
+ || pic_register >= PC_REGNUM)
+ error ("unable to use '%s' for PIC register", arm_pic_register_string);
+ else
+ arm_pic_register = pic_register;
+ }
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? We might want scheduling for thumb2. */
+ if (TARGET_THUMB && flag_schedule_insns)
+ {
+ /* Don't warn since it's on by default in -O2. */
+ flag_schedule_insns = 0;
+ }
+
+ if (optimize_size)
+ {
+ arm_constant_limit = 1;
+
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM). */
+ max_insns_skipped = 6;
+ }
+ else
+ {
+ /* For processors with load scheduling, it never costs more than
+ 2 cycles to load a constant, and the load scheduler may well
+ reduce that to 1. */
+ if (arm_ld_sched)
+ arm_constant_limit = 1;
+
+ /* On XScale the longer latency of a load makes it more difficult
+ to achieve a good schedule, so it's faster to synthesize
+ constants that can be done in two insns. */
+ if (arm_tune_xscale)
+ arm_constant_limit = 2;
+
+ /* StrongARM has early execution of branches, so a sequence
+ that is worth skipping is shorter. */
+ if (arm_tune_strongarm)
+ max_insns_skipped = 3;
+ }
+
+/* APPLE LOCAL begin ARM darwin options */
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
+ SUBSUBTARGET_OVERRIDE_OPTIONS;
+#endif
+/* APPLE LOCAL end ARM darwin options */
+
+ /* Register global variables with the garbage collector. */
+ arm_add_gc_roots ();
+}
+
+static void
+arm_add_gc_roots (void)
+{
+ gcc_obstack_init(&minipool_obstack);
+ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
+}
+
+/* A table of known ARM exception types.
+ For use with the interrupt function attribute. */
+
+typedef struct
+{
+ const char *const arg;
+ const unsigned long return_value;
+}
+isr_attribute_arg;
+
+static const isr_attribute_arg isr_attribute_args [] =
+{
+ { "IRQ", ARM_FT_ISR },
+ { "irq", ARM_FT_ISR },
+ { "FIQ", ARM_FT_FIQ },
+ { "fiq", ARM_FT_FIQ },
+ { "ABORT", ARM_FT_ISR },
+ { "abort", ARM_FT_ISR },
+ { "ABORT", ARM_FT_ISR },
+ { "abort", ARM_FT_ISR },
+ { "UNDEF", ARM_FT_EXCEPTION },
+ { "undef", ARM_FT_EXCEPTION },
+ { "SWI", ARM_FT_EXCEPTION },
+ { "swi", ARM_FT_EXCEPTION },
+ { NULL, ARM_FT_NORMAL }
+};
+
+/* Returns the (interrupt) function type of the current
+ function, or ARM_FT_UNKNOWN if the type cannot be determined. */
+
+static unsigned long
+arm_isr_value (tree argument)
+{
+ const isr_attribute_arg * ptr;
+ const char * arg;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (!arm_arch_notm)
+ return ARM_FT_NORMAL | ARM_FT_STACKALIGN;
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* No argument - default to IRQ. */
+ if (argument == NULL_TREE)
+ return ARM_FT_ISR;
+
+ /* Get the value of the argument. */
+ if (TREE_VALUE (argument) == NULL_TREE
+ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
+ return ARM_FT_UNKNOWN;
+
+ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
+
+ /* Check it against the list of known arguments. */
+ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
+ if (streq (arg, ptr->arg))
+ return ptr->return_value;
+
+ /* An unrecognized interrupt type. */
+ return ARM_FT_UNKNOWN;
+}
+
+/* Computes the type of the current function. */
+
+static unsigned long
+arm_compute_func_type (void)
+{
+ unsigned long type = ARM_FT_UNKNOWN;
+ tree a;
+ tree attr;
+
+ gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
+
+ /* Decide if the current function is volatile. Such functions
+ never return, and many memory cycles can be saved by not storing
+ register values that will never be needed again. This optimization
+ was added to speed up context switching in a kernel application. */
+ if (optimize > 0
+ && (TREE_NOTHROW (current_function_decl)
+ || !(flag_unwind_tables
+ || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
+ && TREE_THIS_VOLATILE (current_function_decl))
+ type |= ARM_FT_VOLATILE;
+
+ if (cfun->static_chain_decl != NULL)
+ type |= ARM_FT_NESTED;
+
+ attr = DECL_ATTRIBUTES (current_function_decl);
+
+ a = lookup_attribute ("naked", attr);
+ if (a != NULL_TREE)
+ type |= ARM_FT_NAKED;
+
+ a = lookup_attribute ("isr", attr);
+ if (a == NULL_TREE)
+ a = lookup_attribute ("interrupt", attr);
+
+ if (a == NULL_TREE)
+ /* APPLE LOCAL ARM interworking */
+ type |= (TARGET_INTERWORK && !arm_arch5) ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
+ else
+ type |= arm_isr_value (TREE_VALUE (a));
+
+ return type;
+}
+
+/* Returns the type of the current function. */
+
+unsigned long
+arm_current_func_type (void)
+{
+ if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
+ cfun->machine->func_type = arm_compute_func_type ();
+
+ return cfun->machine->func_type;
+}
+
+/* APPLE LOCAL begin ARM indirect sibcalls */
+/* Look for an indirect sibling call that uses a callee-saved reg.
+ We'll need to copy this reg to IP and change the call, since
+ the callee-saved reg will be clobbered by the restore of the old
+ value. (This negates the code size advantage of the sibcall, but
+ not the gain in stack size at runtime.) */
+
+static int
+indirect_sibreturn_reg (rtx sibling, bool *is_value)
+{
+ if (GET_CODE (sibling) == CALL_INSN
+ && GET_CODE (PATTERN (sibling)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (sibling), 0, 0)) == CALL
+ && GET_CODE (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0)) == REG)
+ {
+ *is_value = 0;
+ return REGNO (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0));
+ }
+ if (GET_CODE (sibling) == CALL_INSN
+ && GET_CODE (PATTERN (sibling)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (sibling), 0, 0)) == SET
+ && GET_CODE (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1)) == CALL
+ && GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0)) == REG)
+ {
+ *is_value = 1;
+ return REGNO (XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0));
+ }
+ return -1;
+}
+
+/* Look for an indirect sibling call that uses a memory location, at
+ reg + or - constant; this will be a stack location, but registers
+ other than SP and FP are possible with large stack frames.
+ We'll need to load this location into IP and change the call, since
+ a memory location is not valid in the instruction. (The usual approach
+ of forcing reload to copy the value into a register through predicates
+ and constraints will not work here, as the load would come out after
+ the restore of FP and SP, too late.)
+ Return value = signed offset from register *reg (usually SP or FP).
+ Null if this case doesn't apply.
+ We do not check for offsets too big to fit in a load, nor offsets in a
+ register; it is believed that these cases cannot occur. */
+
+static rtx
+indirect_sibreturn_mem (rtx sibling, rtx* reg, bool *is_value)
+{
+ rtx mem = NULL_RTX;
+ if (GET_CODE (sibling) == CALL_INSN
+ && GET_CODE (PATTERN (sibling)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (sibling), 0, 0)) == CALL
+ && GET_CODE (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0)) == MEM)
+ {
+ *is_value = 0;
+ mem = XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0);
+ }
+ else if (GET_CODE (sibling) == CALL_INSN
+ && GET_CODE (PATTERN (sibling)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (sibling), 0, 0)) == SET
+ && GET_CODE (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1)) == CALL
+ && GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0)) == MEM)
+ {
+ *is_value = 1;
+ mem = XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0);
+ }
+ if (mem
+ && GET_CODE (XEXP (mem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (mem, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (mem, 0), 1)) == CONST_INT)
+ {
+ *reg = XEXP (XEXP (mem, 0), 0);
+ return XEXP (XEXP (mem, 0), 1);
+ }
+ else if (mem && GET_CODE (XEXP (mem, 0)) == REG)
+ {
+ *reg = XEXP (mem, 0);
+ return const0_rtx;
+ }
+ return NULL_RTX;
+}
+/* APPLE LOCAL end ARM indirect sibcalls */
+
+/* Return 1 if it is possible to return using a single instruction.
+ If SIBLING is non-null, this is a test for a return before a sibling
+ call. SIBLING is the call insn, so we can examine its register usage. */
+
+int
+use_return_insn (int iscond, rtx sibling)
+{
+ int regno;
+ unsigned int func_type;
+ unsigned long saved_int_regs;
+ unsigned HOST_WIDE_INT stack_adjust;
+ arm_stack_offsets *offsets;
+
+ /* Never use a return instruction before reload has run. */
+ if (!reload_completed)
+ return 0;
+
+ func_type = arm_current_func_type ();
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Naked, volatile and stack alignment functions need special
+ consideration. */
+ if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED | ARM_FT_STACKALIGN))
+ return 0;
+
+ /* So do interrupt functions that use the frame pointer and Thumb
+ interrupt functions. */
+ if (IS_INTERRUPT (func_type) && (frame_pointer_needed || TARGET_THUMB))
+ return 0;
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+ offsets = arm_get_frame_offsets ();
+ stack_adjust = offsets->outgoing_args - offsets->saved_regs;
+
+ /* As do variadic functions. */
+ if (current_function_pretend_args_size
+ || cfun->machine->uses_anonymous_args
+ /* Or if the function calls __builtin_eh_return () */
+ || current_function_calls_eh_return
+ /* Or if the function calls alloca */
+ || current_function_calls_alloca
+ /* APPLE LOCAL begin ARM custom frame layout */
+ /* Or if there is a stack adjustment. */
+ || !(stack_adjust == 0))
+ /* APPLE LOCAL end ARM custom frame layout */
+ return 0;
+
+ saved_int_regs = arm_compute_save_reg_mask ();
+
+ /* Unfortunately, the insn
+
+ ldmib sp, {..., sp, ...}
+
+ triggers a bug on most SA-110 based devices, such that the stack
+ pointer won't be correctly restored if the instruction takes a
+ page fault. We work around this problem by popping r3 along with
+ the other registers, since that is never slower than executing
+ another instruction.
+
+ We test for !arm_arch5 here, because code for any architecture
+ less than this could potentially be run on one of the buggy
+ chips. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (stack_adjust == 4 && !arm_arch5 && TARGET_ARM)
+ {
+ /* Validate that r3 is a call-clobbered register (always true in
+ the default abi) ... */
+ if (!call_used_regs[3])
+ return 0;
+
+ /* ... that it isn't being used for a return value ... */
+ if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
+ return 0;
+
+ /* ... or for a tail-call argument ... */
+ if (sibling)
+ {
+ gcc_assert (GET_CODE (sibling) == CALL_INSN);
+
+ if (find_regno_fusage (sibling, USE, 3))
+ return 0;
+
+ /* APPLE LOCAL begin ARM indirect sibcalls */
+ /* ... or to hold the target address for an indirect sibcall. */
+ {
+ bool ignored;
+ int regno = indirect_sibreturn_reg (sibling, &ignored);
+ if (regno == 3)
+ return 0;
+ }
+ /* APPLE LOCAL end ARM indirect sibcalls */
+ }
+
+ /* ... and that there are no call-saved registers in r0-r2
+ (always true in the default ABI). */
+ if (saved_int_regs & 0x7)
+ return 0;
+ }
+
+ /* Can't be done if interworking with Thumb, and any registers have been
+ stacked. */
+ /* APPLE LOCAL ARM interworking */
+ if (TARGET_INTERWORK && !arm_arch5 && saved_int_regs != 0)
+ return 0;
+
+ /* On StrongARM, conditional returns are expensive if they aren't
+ taken and multiple registers have been stacked. */
+ if (iscond && arm_tune_strongarm)
+ {
+ /* Conditional return when just the LR is stored is a simple
+ conditional-load instruction, that's not expensive. */
+ if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
+ return 0;
+
+ if (flag_pic
+ && arm_pic_register != INVALID_REGNUM
+ && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
+ return 0;
+ }
+
+ /* If there are saved registers but the LR isn't saved, then we need
+ two instructions for the return. */
+ if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
+ return 0;
+
+ /* APPLE LOCAL begin ARM indirect sibcalls */
+ /* If we have an indirect sibcall that uses a saved reg, we'll need
+ to copy that value into IP before restoring. */
+ if (sibling)
+ {
+ bool ignored;
+ int regno = indirect_sibreturn_reg (sibling, &ignored);
+ if (regno > 3 && regno != 12)
+ return 0;
+ if (regno == -1)
+ return 0;
+ }
+ /* APPLE LOCAL end ARM indirect sibcalls */
+
+ /* Can't be done if any of the FPA regs are pushed,
+ since this also requires an insn. */
+ if (TARGET_HARD_FLOAT && TARGET_FPA)
+ for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
+ if (regs_ever_live[regno] && !call_used_regs[regno])
+ return 0;
+
+ /* Likewise VFP regs. */
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
+ if (regs_ever_live[regno] && !call_used_regs[regno])
+ return 0;
+
+ if (TARGET_REALLY_IWMMXT)
+ for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs [regno])
+ return 0;
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ /* If anything above the frame pointer was saved, they were saved
+ below r0, which means we have to pop them in a separate
+ instruction. */
+ if (saved_int_regs & (1 << LR_REGNUM))
+ for (regno = ARM_HARD_FRAME_POINTER_REGNUM + 1; regno <= 11; regno++)
+ if (saved_int_regs & (1 << regno))
+ return 0;
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ return 1;
+}
+
+/* Return TRUE if int I is a valid immediate ARM constant. */
+
+int
+const_ok_for_arm (HOST_WIDE_INT i)
+{
+ int lowbit;
+
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ be all zero, or all one. */
+ if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
+ && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
+ != ((~(unsigned HOST_WIDE_INT) 0)
+ & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
+ return FALSE;
+
+ i &= (unsigned HOST_WIDE_INT) 0xffffffff;
+
+ /* Fast return for 0 and small values. We must do this for zero, since
+ the code below can't handle that one case. */
+ if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
+ return TRUE;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Get the number of trailing zeros. */
+ lowbit = ffs((int) i) - 1;
+
+ /* Only even shifts are allowed in ARM mode so round down to the
+ nearest even number. */
+ if (TARGET_ARM)
+ lowbit &= ~1;
+
+ if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
+ return TRUE;
+
+ if (TARGET_ARM)
+ {
+ /* Allow rotated constants in ARM mode. */
+ if (lowbit <= 4
+ && ((i & ~0xc000003f) == 0
+ || (i & ~0xf000000f) == 0
+ || (i & ~0xfc000003) == 0))
+ return TRUE;
+ }
+ else
+ {
+ HOST_WIDE_INT v;
+
+ /* Allow repeated pattern. */
+ v = i & 0xff;
+ v |= v << 16;
+ if (i == v || i == (v | (v << 8)))
+ return TRUE;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ return FALSE;
+}
+
+/* Return true if I is a valid constant for the operation CODE. */
+static int
+const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
+{
+ if (const_ok_for_arm (i))
+ return 1;
+
+ switch (code)
+ {
+ case PLUS:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
+
+ case MINUS: /* Should only occur with (MINUS I reg) => rsb */
+ case XOR:
+ case IOR:
+ return 0;
+
+ case AND:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Emit a sequence of insns to handle a large constant.
+ CODE is the code of the operation required, it can be any of SET, PLUS,
+ IOR, AND, XOR, MINUS;
+ MODE is the mode in which the operation is being performed;
+ VAL is the integer to operate on;
+ SOURCE is the other operand (a register, or a null-pointer for SET);
+ SUBTARGETS means it is safe to create scratch registers if that will
+ either produce a simpler sequence, or we will want to cse the values.
+ Return value is the number of insns emitted. */
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* ??? Tweak this for thumb2. */
+int
+arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
+ HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
+{
+ rtx cond;
+
+ if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
+ cond = COND_EXEC_TEST (PATTERN (insn));
+ else
+ cond = NULL_RTX;
+
+ if (subtargets || code == SET
+ || (GET_CODE (target) == REG && GET_CODE (source) == REG
+ && REGNO (target) != REGNO (source)))
+ {
+ /* After arm_reorg has been called, we can't fix up expensive
+ constants by pushing them into memory so we must synthesize
+ them in-line, regardless of the cost. This is only likely to
+ be more costly on chips that have load delay slots and we are
+ compiling without running the scheduler (so no splitting
+ occurred before the final instruction emission).
+
+ Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
+ */
+ if (!after_arm_reorg
+ && !cond
+ && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
+ 1, 0)
+ > arm_constant_limit + (code != SET)))
+ {
+ if (code == SET)
+ {
+ /* Currently SET is the only monadic value for CODE, all
+ the rest are diadic. */
+ emit_set_insn (target, GEN_INT (val));
+ return 1;
+ }
+ else
+ {
+ rtx temp = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_set_insn (temp, GEN_INT (val));
+ /* For MINUS, the value is subtracted from, since we never
+ have subtraction of a constant. */
+ if (code == MINUS)
+ emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
+ else
+ emit_set_insn (target,
+ gen_rtx_fmt_ee (code, mode, source, temp));
+ return 2;
+ }
+ }
+ }
+
+ return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
+ 1);
+}
+
+/* APPLE LOCAL begin 6258536 atomic builtins */
+/* A subroutine of the atomic operation splitter. Emit a load exclusive
+ instruction in MODE. */
+static void
+emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
+{
+ rtx (*fn) (rtx, rtx) = NULL;
+ switch (mode) {
+ case QImode:
+ fn = gen_load_locked_qi;
+ break;
+ case HImode:
+ fn = gen_load_locked_hi;
+ break;
+ case SImode:
+ fn = gen_load_locked_si;
+ break;
+ case DImode:
+ fn = gen_load_locked_di;
+ break;
+ default:
+ abort();
+ }
+ emit_insn (fn (reg, mem));
+}
+
+/* A subroutine of the atomic operation splitter. Emit a store-conditional
+ instruction in MODE. */
+static void
+emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
+{
+ rtx (*fn) (rtx, rtx, rtx) = NULL;
+ switch (mode) {
+ case QImode:
+ fn = gen_store_conditional_qi;
+ break;
+ case HImode:
+ fn = gen_store_conditional_hi;
+ break;
+ case SImode:
+ fn = gen_store_conditional_si;
+ break;
+ case DImode:
+ fn = gen_store_conditional_di;
+ break;
+ default:
+ abort();
+ }
+ emit_insn (fn (res, mem, val));
+}
+
+
+void
+arm_split_compare_and_swap(rtx dest, rtx mem, rtx oldval, rtx newval,
+ rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CC_REGNUM);
+ rtx dest_cmp, oldval_cmp;
+ rtx block_scratch, block_unspec;
+
+ block_scratch = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ block_unspec = gen_rtx_UNSPEC (BLKmode,
+ gen_rtvec (1, gen_rtx_MEM (BLKmode,
+ block_scratch)),
+ UNSPEC_BARRIER);
+
+ /* Use the insn patterns directly rather than the expander since we're
+ * post-reload here. The v6 pattern needs a scratch register and we
+ * have one here already, so just re-use it. */
+ if (arm_arch7a)
+ emit_insn (gen_arm_memory_barrier_v7 (block_scratch, block_unspec));
+ else
+ emit_insn (gen_arm_memory_barrier_v6_explicit(block_scratch,
+ block_unspec, scratch));
+
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (mode, dest, mem);
+ /* If this is for a mode smaller than SI, zext to SI for the comparison. */
+ dest_cmp = dest;
+ oldval_cmp = oldval;
+ switch (mode)
+ {
+ case QImode: case HImode:
+ dest_cmp = gen_rtx_REG (SImode, REGNO(dest));
+ oldval_cmp = gen_rtx_REG (SImode, REGNO(oldval));
+ emit_insn (gen_zero_extendqisi2 (dest_cmp, dest));
+ emit_insn (gen_zero_extendqisi2 (oldval_cmp, oldval));
+ /* fall through */
+ case SImode:
+ x = gen_rtx_COMPARE (CCmode, dest_cmp, oldval_cmp);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label2, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ break;
+ case DImode:
+ {
+ rtx sub1, sub2;
+ /* compare the high word */
+ sub1 = gen_highpart (SImode, dest);
+ sub2 = gen_highpart (SImode, oldval);
+ x = gen_rtx_COMPARE (CCmode, sub1, sub2);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label2, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ /* compare the low word */
+ sub1 = gen_lowpart (SImode, dest);
+ sub2 = gen_lowpart (SImode, oldval);
+ x = gen_rtx_COMPARE (CCmode, sub1, sub2);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label2, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ break;
+ }
+ default:
+ /* nothing else should get here. */
+ abort();
+ }
+
+ emit_store_conditional (mode, scratch, mem, newval);
+ x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label1, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+
+ if (arm_arch7a)
+ emit_insn (gen_arm_memory_barrier_v7 (block_scratch, block_unspec));
+ else
+ emit_insn (gen_arm_memory_barrier_v6_explicit(block_scratch,
+ block_unspec, scratch));
+ emit_label (XEXP (label2, 0));
+}
+/* APPLE LOCAL end 6258536 atomic builtins */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Return the number of ARM instructions required to synthesize the given
+ constant. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+static int
+count_insns_for_constant (HOST_WIDE_INT remainder, int i)
+{
+ HOST_WIDE_INT temp1;
+ int num_insns = 0;
+ do
+ {
+ int end;
+
+ if (i <= 0)
+ i += 32;
+ if (remainder & (3 << (i - 2)))
+ {
+ end = i - 8;
+ if (end < 0)
+ end += 32;
+ temp1 = remainder & ((0x0ff << end)
+ | ((i < end) ? (0xff >> (32 - end)) : 0));
+ remainder &= ~temp1;
+ num_insns++;
+ i -= 6;
+ }
+ i -= 2;
+ } while (remainder);
+ return num_insns;
+}
+
+/* Emit an instruction with the indicated PATTERN. If COND is
+ non-NULL, conditionalize the execution of the instruction on COND
+ being true. */
+
+static void
+emit_constant_insn (rtx cond, rtx pattern)
+{
+ if (cond)
+ pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
+ emit_insn (pattern);
+}
+
+/* As above, but extra parameter GENERATE which, if clear, suppresses
+ RTL generation. */
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* ??? This needs more work for thumb2. */
+
+static int
+arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
+ HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
+ int generate)
+{
+ int can_invert = 0;
+ int can_negate = 0;
+ int can_negate_initial = 0;
+ int can_shift = 0;
+ int i;
+ int num_bits_set = 0;
+ int set_sign_bit_copies = 0;
+ int clear_sign_bit_copies = 0;
+ int clear_zero_bit_copies = 0;
+ int set_zero_bit_copies = 0;
+ int insns = 0;
+ unsigned HOST_WIDE_INT temp1, temp2;
+ unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
+
+ /* Find out which operations are safe for a given CODE. Also do a quick
+ check for degenerate cases; these can occur when DImode operations
+ are split. */
+ switch (code)
+ {
+ case SET:
+ can_invert = 1;
+ can_shift = 1;
+ can_negate = 1;
+ break;
+
+ case PLUS:
+ can_negate = 1;
+ can_negate_initial = 1;
+ break;
+
+ case IOR:
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ GEN_INT (ARM_SIGN_EXTEND (val))));
+ return 1;
+ }
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target, source));
+ return 1;
+ }
+ break;
+
+ case AND:
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target, const0_rtx));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target, source));
+ return 1;
+ }
+ can_invert = 1;
+ break;
+
+ case XOR:
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target, source));
+ return 1;
+ }
+
+ /* We don't know how to handle other cases yet. */
+ gcc_assert (remainder == 0xffffffff);
+
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_NOT (mode, source)));
+ return 1;
+
+ case MINUS:
+ /* We treat MINUS as (val - source), since (source - val) is always
+ passed as (source + (-val)). */
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_NEG (mode, source)));
+ return 1;
+ }
+ if (const_ok_for_arm (val))
+ {
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_MINUS (mode, GEN_INT (val),
+ source)));
+ return 1;
+ }
+ can_negate = 1;
+
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If we can do it in one insn get out quickly. */
+ if (const_ok_for_arm (val)
+ || (can_negate_initial && const_ok_for_arm (-val))
+ || (can_invert && const_ok_for_arm (~val)))
+ {
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ (source
+ ? gen_rtx_fmt_ee (code, mode, source,
+ GEN_INT (val))
+ : GEN_INT (val))));
+ return 1;
+ }
+
+ /* Calculate a few attributes that may be useful for specific
+ optimizations. */
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_zero_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_zero_bit_copies++;
+ else
+ break;
+ }
+
+ switch (code)
+ {
+ case SET:
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* See if we can use movw. */
+ if (arm_arch_thumb2 && (remainder & 0xffff0000) == 0)
+ {
+ if (generate)
+ emit_constant_insn (cond, gen_rtx_SET (VOIDmode, target,
+ GEN_INT (val)));
+ return 1;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* See if we can do this by sign_extending a constant that is known
+ to be negative. This is a good, way of doing it, since the shift
+ may well merge into a subsequent insn. */
+ if (set_sign_bit_copies > 1)
+ {
+ if (const_ok_for_arm
+ (temp1 = ARM_SIGN_EXTEND (remainder
+ << (set_sign_bit_copies - 1))))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_constant_insn (cond,
+ gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ /* For an inverted constant, we will need to set the low bits,
+ these will be shifted out of harm's way. */
+ temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
+ if (const_ok_for_arm (~temp1))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_constant_insn (cond,
+ gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ }
+
+ /* See if we can calculate the value as the difference between two
+ valid immediates. */
+ if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
+ {
+ int topshift = clear_sign_bit_copies & ~1;
+
+ temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
+ & (0xff000000 >> topshift));
+
+ /* If temp1 is zero, then that means the 9 most significant
+ bits of remainder were 1 and we've caused it to overflow.
+ When topshift is 0 we don't need to do anything since we
+ can borrow from 'bit 32'. */
+ if (temp1 == 0 && topshift != 0)
+ temp1 = 0x80000000 >> (topshift - 1);
+
+ temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
+
+ if (const_ok_for_arm (temp2))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_constant_insn (cond,
+ gen_addsi3 (target, new_src,
+ GEN_INT (-temp2)));
+ }
+
+ return 2;
+ }
+ }
+
+ /* See if we can generate this by setting the bottom (or the top)
+ 16 bits, and then shifting these into the other half of the
+ word. We only look for the simplest cases, to do more would cost
+ too much. Be careful, however, not to generate this when the
+ alternative would take fewer insns. */
+ if (val & 0xffff0000)
+ {
+ temp1 = remainder & 0xffff0000;
+ temp2 = remainder & 0x0000ffff;
+
+ /* Overlaps outside this range are best done using other methods. */
+ for (i = 9; i < 24; i++)
+ {
+ if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
+ && !const_ok_for_arm (temp2))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, cond, temp2, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_constant_insn
+ (cond,
+ gen_rtx_SET
+ (VOIDmode, target,
+ gen_rtx_IOR (mode,
+ gen_rtx_ASHIFT (mode, source,
+ GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+
+ /* Don't duplicate cases already considered. */
+ for (i = 17; i < 24; i++)
+ {
+ if (((temp1 | (temp1 >> i)) == remainder)
+ && !const_ok_for_arm (temp1))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, cond, temp1, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_constant_insn
+ (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_IOR
+ (mode,
+ gen_rtx_LSHIFTRT (mode, source,
+ GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+ }
+ break;
+
+ case IOR:
+ case XOR:
+ /* If we have IOR or XOR, and the constant can be loaded in a
+ single instruction, and we can find a temporary to put it in,
+ then this can be done in two instructions instead of 3-4. */
+ if (subtargets
+ /* TARGET can't be NULL if SUBTARGETS is 0 */
+ || (reload_completed && !reg_mentioned_p (target, source)))
+ {
+ if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, sub,
+ GEN_INT (val)));
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_fmt_ee (code, mode,
+ source, sub)));
+ }
+ return 2;
+ }
+ }
+
+ if (code == XOR)
+ break;
+
+ if (set_sign_bit_copies > 8
+ && (val & (-1 << (32 - set_sign_bit_copies))) == val)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_sign_bit_copies);
+
+ emit_constant_insn
+ (cond,
+ gen_rtx_SET (VOIDmode, sub,
+ gen_rtx_NOT (mode,
+ gen_rtx_ASHIFT (mode,
+ source,
+ shift))));
+ emit_constant_insn
+ (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_NOT (mode,
+ gen_rtx_LSHIFTRT (mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (set_zero_bit_copies > 8
+ && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_zero_bit_copies);
+
+ emit_constant_insn
+ (cond,
+ gen_rtx_SET (VOIDmode, sub,
+ gen_rtx_NOT (mode,
+ gen_rtx_LSHIFTRT (mode,
+ source,
+ shift))));
+ emit_constant_insn
+ (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_NOT (mode,
+ gen_rtx_ASHIFT (mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, sub,
+ gen_rtx_NOT (mode, source)));
+ source = sub;
+ if (subtargets)
+ sub = gen_reg_rtx (mode);
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, sub,
+ gen_rtx_AND (mode, source,
+ GEN_INT (temp1))));
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_NOT (mode, sub)));
+ }
+ return 3;
+ }
+ break;
+
+ case AND:
+ /* See if two shifts will do 2 or more insn's worth of work. */
+ if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = ((0xffffffff
+ << (32 - clear_sign_bit_copies))
+ & 0xffffffff);
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ insns = arm_gen_constant (AND, mode, cond,
+ remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+ insns = arm_gen_constant (AND, mode, cond,
+ remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_sign_bit_copies);
+
+ emit_insn (gen_ashlsi3 (new_src, source, shift));
+ emit_insn (gen_lshrsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+
+ insns = arm_gen_constant (AND, mode, cond,
+ remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+
+ insns = arm_gen_constant (AND, mode, cond,
+ remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_zero_bit_copies);
+
+ emit_insn (gen_lshrsi3 (new_src, source, shift));
+ emit_insn (gen_ashlsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < 32; i++)
+ if (remainder & (1 << i))
+ num_bits_set++;
+
+ if (code == AND || (can_invert && num_bits_set > 16))
+ remainder = (~remainder) & 0xffffffff;
+ else if (code == PLUS && num_bits_set > 16)
+ remainder = (-remainder) & 0xffffffff;
+ else
+ {
+ can_invert = 0;
+ can_negate = 0;
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Now try and find a way of doing the job in either two or three
+ instructions.
+ We start by looking for the largest block of zeros that are aligned on
+ a 2-bit boundary, we then fill up the temps, wrapping around to the
+ top of the word when we drop off the bottom.
+ In the worst case this code should produce no more than four insns.
+ Thumb-2 constants are shifted, not rotated, so the MSB is always the
+ best place to start. */
+
+ /* ??? Use thumb2 replicated constants when the high and low halfwords are
+ the same. */
+ {
+ int best_start = 0;
+ if (!TARGET_THUMB2)
+ {
+ int best_consecutive_zeros = 0;
+
+ for (i = 0; i < 32; i += 2)
+ {
+ int consecutive_zeros = 0;
+
+ if (!(remainder & (3 << i)))
+ {
+ while ((i < 32) && !(remainder & (3 << i)))
+ {
+ consecutive_zeros += 2;
+ i += 2;
+ }
+ if (consecutive_zeros > best_consecutive_zeros)
+ {
+ best_consecutive_zeros = consecutive_zeros;
+ best_start = i - consecutive_zeros;
+ }
+ i -= 2;
+ }
+ }
+
+ /* So long as it won't require any more insns to do so, it's
+ desirable to emit a small constant (in bits 0...9) in the last
+ insn. This way there is more chance that it can be combined with
+ a later addressing insn to form a pre-indexed load or store
+ operation. Consider:
+
+ *((volatile int *)0xe0000100) = 1;
+ *((volatile int *)0xe0000110) = 2;
+
+ We want this to wind up as:
+
+ mov rA, #0xe0000000
+ mov rB, #1
+ str rB, [rA, #0x100]
+ mov rB, #2
+ str rB, [rA, #0x110]
+
+ rather than having to synthesize both large constants from scratch.
+
+ Therefore, we calculate how many insns would be required to emit
+ the constant starting from `best_start', and also starting from
+ zero (i.e. with bit 31 first to be output). If `best_start' doesn't
+ yield a shorter sequence, we may as well use zero. */
+ if (best_start != 0
+ && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
+ && (count_insns_for_constant (remainder, 0) <=
+ count_insns_for_constant (remainder, best_start)))
+ best_start = 0;
+ }
+
+ /* Now start emitting the insns. */
+ i = best_start;
+ do
+ {
+ int end;
+
+ if (i <= 0)
+ i += 32;
+ if (remainder & (3 << (i - 2)))
+ {
+ end = i - 8;
+ if (end < 0)
+ end += 32;
+ temp1 = remainder & ((0x0ff << end)
+ | ((i < end) ? (0xff >> (32 - end)) : 0));
+ remainder &= ~temp1;
+
+ if (generate)
+ {
+ rtx new_src, temp1_rtx;
+
+ if (code == SET || code == MINUS)
+ {
+ new_src = (subtargets ? gen_reg_rtx (mode) : target);
+ if (can_invert && code != MINUS)
+ temp1 = ~temp1;
+ }
+ else
+ {
+ if (remainder && subtargets)
+ new_src = gen_reg_rtx (mode);
+ else
+ new_src = target;
+ if (can_invert)
+ temp1 = ~temp1;
+ else if (can_negate)
+ temp1 = -temp1;
+ }
+
+ temp1 = trunc_int_for_mode (temp1, mode);
+ temp1_rtx = GEN_INT (temp1);
+
+ if (code == SET)
+ ;
+ else if (code == MINUS)
+ temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
+ else
+ temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
+
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, new_src,
+ temp1_rtx));
+ source = new_src;
+ }
+
+ if (code == SET)
+ {
+ can_invert = 0;
+ code = PLUS;
+ }
+ else if (code == MINUS)
+ code = PLUS;
+
+ insns++;
+ if (TARGET_ARM)
+ i -= 6;
+ else
+ i -= 7;
+ }
+ /* Arm allows rotates by a multiple of two. Thumb-2 allows arbitary
+ shifts. */
+ if (TARGET_ARM)
+ i -= 2;
+ else
+ i--;
+ }
+ while (remainder);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ return insns;
+}
+
+/* Canonicalize a comparison so that we are more likely to recognize it.
+ This can be done for a few constant compares, where we can make the
+ immediate value easier to load. */
+
+enum rtx_code
+arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
+ rtx * op1)
+{
+ unsigned HOST_WIDE_INT i = INTVAL (*op1);
+ unsigned HOST_WIDE_INT maxval;
+ maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
+
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ case LE:
+ if (i != maxval
+ && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
+ {
+ *op1 = GEN_INT (i + 1);
+ return code == GT ? GE : LT;
+ }
+ break;
+
+ case GE:
+ case LT:
+ if (i != ~maxval
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
+ {
+ *op1 = GEN_INT (i - 1);
+ return code == GE ? GT : LE;
+ }
+ break;
+
+ case GTU:
+ case LEU:
+ if (i != ~((unsigned HOST_WIDE_INT) 0)
+ && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
+ {
+ *op1 = GEN_INT (i + 1);
+ return code == GTU ? GEU : LTU;
+ }
+ break;
+
+ case GEU:
+ case LTU:
+ if (i != 0
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
+ {
+ *op1 = GEN_INT (i - 1);
+ return code == GEU ? GTU : LEU;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return code;
+}
+
+
+/* Define how to find the value returned by a function. */
+
+rtx
+arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+ int unsignedp ATTRIBUTE_UNUSED;
+ rtx r ATTRIBUTE_UNUSED;
+
+ mode = TYPE_MODE (type);
+ /* Promote integer types. */
+ if (INTEGRAL_TYPE_P (type))
+ PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
+
+ /* Promotes small structs returned in a register to full-word size
+ for big-endian AAPCS. */
+ if (arm_return_in_msb (type))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ if (size % UNITS_PER_WORD != 0)
+ {
+ size += UNITS_PER_WORD - size % UNITS_PER_WORD;
+ mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ return LIBCALL_VALUE(mode);
+}
+
+/* Determine the amount of memory needed to store the possible return
+ registers of an untyped call. */
+int
+arm_apply_result_size (void)
+{
+ int size = 16;
+
+ if (TARGET_ARM)
+ {
+ if (TARGET_HARD_FLOAT_ABI)
+ {
+ if (TARGET_FPA)
+ size += 12;
+ if (TARGET_MAVERICK)
+ size += 8;
+ }
+ if (TARGET_IWMMXT_ABI)
+ size += 8;
+ }
+
+ return size;
+}
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+int
+arm_return_in_memory (tree type)
+{
+ HOST_WIDE_INT size;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ size = int_size_in_bytes (type);
+
+ /* Vector values should be returned using ARM registers, not memory (unless
+ they're over 16 bytes, which will break since we only have four
+ call-clobbered registers to play with). */
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ return (size < 0 || size > (4 * UNITS_PER_WORD));
+
+ if (!AGGREGATE_TYPE_P (type) &&
+ !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
+ /* All simple types are returned in registers.
+ For AAPCS, complex types are treated the same as aggregates. */
+ return 0;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if (arm_abi != ARM_ABI_APCS)
+ {
+ /* ATPCS and later return aggregate types in memory only if they are
+ larger than a word (or are variable size). */
+ return (size < 0 || size > UNITS_PER_WORD);
+ }
+
+ /* APPLE LOCAL v7 support. Merge from Codesourcery */
+ /* Removed lines */
+ /* For the arm-wince targets we choose to be compatible with Microsoft's
+ ARM and Thumb compilers, which always return aggregates in memory. */
+#ifndef ARM_WINCE
+ /* All structures/unions bigger than one word are returned in memory.
+ Also catch the case where int_size_in_bytes returns -1. In this case
+ the aggregate is either huge or of variable size, and in either case
+ we will want to return it via memory and not in a register. */
+ if (size < 0 || size > UNITS_PER_WORD)
+ return 1;
+
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we only return in a register
+ if the type is 'integer like' and every addressable element
+ has an offset of zero. For practical purposes this means
+ that the structure can have at most one non bit-field element
+ and that this element must be the first one in the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Check that the first field is valid for returning in a register. */
+
+ /* ... Floats are not allowed */
+ if (FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+
+ /* ... Aggregates that are not themselves valid for returning in
+ a register are not allowed. */
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+
+ /* Now check the remaining fields, if any. Only bitfields are allowed,
+ since they are not addressable. */
+ for (field = TREE_CHAIN (field);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (!DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+#endif /* not ARM_WINCE */
+
+ /* Return all other types in memory. */
+ return 1;
+}
+
+/* Indicate whether or not words of a double are in big-endian order. */
+
+int
+arm_float_words_big_endian (void)
+{
+ if (TARGET_MAVERICK)
+ return 0;
+
+ /* For FPA, float words are always big-endian. For VFP, floats words
+ follow the memory system mode. */
+
+ if (TARGET_FPA)
+ {
+ return 1;
+ }
+
+ if (TARGET_VFP)
+ return (TARGET_BIG_END ? 1 : 0);
+
+ return 1;
+}
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is NULL. */
+void
+arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
+ rtx libname ATTRIBUTE_UNUSED,
+/* APPLE LOCAL 6738583 -mlong-calls PIC static functions */
+ tree fndecl)
+{
+ /* On the ARM, the offset starts at 0. */
+ pcum->nregs = 0;
+ pcum->iwmmxt_nregs = 0;
+ pcum->can_split = true;
+
+ pcum->call_cookie = CALL_NORMAL;
+
+ if (TARGET_LONG_CALLS)
+ pcum->call_cookie = CALL_LONG;
+
+ /* Check for long call/short call attributes. The attributes
+ override any command line option. */
+ if (fntype)
+ {
+ if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
+ pcum->call_cookie = CALL_SHORT;
+ else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
+ pcum->call_cookie = CALL_LONG;
+ /* APPLE LOCAL begin 6738583 -mlong-calls PIC static functions */
+ else if (fndecl && ! TREE_PUBLIC (fndecl))
+ pcum->call_cookie = CALL_SHORT;
+ /* APPLE LOCAL end 6738583 -mlong-calls PIC static functions */
+ }
+
+ /* Varargs vectors are treated the same as long long.
+ named_count avoids having to change the way arm handles 'named' */
+ pcum->named_count = 0;
+ pcum->nargs = 0;
+
+ if (TARGET_REALLY_IWMMXT && fntype)
+ {
+ tree fn_arg;
+
+ for (fn_arg = TYPE_ARG_TYPES (fntype);
+ fn_arg;
+ fn_arg = TREE_CHAIN (fn_arg))
+ pcum->named_count += 1;
+
+ if (! pcum->named_count)
+ pcum->named_count = INT_MAX;
+ }
+}
+
+
+/* Return true if mode/type need doubleword alignment. */
+bool
+arm_needs_doubleword_align (enum machine_mode mode, tree type)
+{
+ return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
+ || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
+}
+
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+rtx
+arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
+ tree type, int named)
+{
+ int nregs;
+
+ /* Varargs vectors are treated the same as long long.
+ named_count avoids having to change the way arm handles 'named' */
+ if (TARGET_IWMMXT_ABI
+ && arm_vector_mode_supported_p (mode)
+ && pcum->named_count > pcum->nargs + 1)
+ {
+ if (pcum->iwmmxt_nregs <= 9)
+ return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
+ else
+ {
+ pcum->can_split = false;
+ return NULL_RTX;
+ }
+ }
+
+ /* Put doubleword aligned quantities in even register pairs. */
+ if (pcum->nregs & 1
+ && ARM_DOUBLEWORD_ALIGN
+ && arm_needs_doubleword_align (mode, type))
+ pcum->nregs++;
+
+ if (mode == VOIDmode)
+ /* Compute operand 2 of the call insn. */
+ return GEN_INT (pcum->call_cookie);
+
+ /* Only allow splitting an arg between regs and memory if all preceding
+ args were allocated to regs. For args passed by reference we only count
+ the reference pointer. */
+ if (pcum->can_split)
+ nregs = 1;
+ else
+ nregs = ARM_NUM_REGS2 (mode, type);
+
+ if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
+ return NULL_RTX;
+
+ return gen_rtx_REG (mode, pcum->nregs);
+}
+
+static int
+arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ int nregs = pcum->nregs;
+
+ /* APPLE LOCAL v7 support. Merge from Codesourcery */
+ if (TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (mode))
+ return 0;
+
+ if (NUM_ARG_REGS > nregs
+ && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
+ && pcum->can_split)
+ return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
+
+ return 0;
+}
+
+/* Variable sized types are passed by reference. This is a GCC
+ extension to the ARM ABI. */
+
+static bool
+arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
+}
+
+/* Encode the current state of the #pragma [no_]long_calls. */
+typedef enum
+{
+ OFF, /* No #pragma [no_]long_calls is in effect. */
+ LONG, /* #pragma long_calls is in effect. */
+ SHORT /* #pragma no_long_calls is in effect. */
+} arm_pragma_enum;
+
+static arm_pragma_enum arm_pragma_long_calls = OFF;
+
+void
+arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ arm_pragma_long_calls = LONG;
+}
+
+void
+arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ arm_pragma_long_calls = SHORT;
+}
+
+void
+arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
+{
+ arm_pragma_long_calls = OFF;
+}
+
+/* Table of machine attributes. */
+const struct attribute_spec arm_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ /* Function calls made to this symbol must be done indirectly, because
+ it may lie outside of the 26 bit addressing range of a normal function
+ call. */
+ { "long_call", 0, 0, false, true, true, NULL },
+ /* Whereas these functions are always known to reside within the 26 bit
+ addressing range. */
+ { "short_call", 0, 0, false, true, true, NULL },
+ /* Interrupt Service Routines have special prologue and epilogue requirements. */
+ { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
+ { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
+ { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
+#ifdef ARM_PE
+ /* ARM/PE has three new attributes:
+ interfacearm - ?
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times.
+ */
+ { "dllimport", 0, 0, true, false, false, NULL },
+ { "dllexport", 0, 0, true, false, false, NULL },
+ { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
+#elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
+ { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
+ { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
+ { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
+#endif
+/* APPLE LOCAL begin 5946347 ms_struct support */
+ { "ms_struct", 0, 0, false, false, false, arm_handle_ms_struct_attribute },
+ { "gcc_struct", 0, 0, false, false, false, arm_handle_gcc_struct_attribute },
+/* APPLE LOCAL end 5946347 ms_struct support */
+/* APPLE LOCAL begin ARM darwin attributes */
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE,
+#endif
+/* APPLE LOCAL end ARM darwin attributes */
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Handle an attribute requiring a FUNCTION_DECL;
+ arguments as in struct attribute_spec.handler. */
+static tree
+arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qs attribute only applies to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle an "interrupt" or "isr" attribute;
+ arguments as in struct attribute_spec.handler. */
+static tree
+arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
+ bool *no_add_attrs)
+{
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qs attribute only applies to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ /* FIXME: the argument if any is checked for type attributes;
+ should it be checked for decl ones? */
+ }
+ else
+ {
+ if (TREE_CODE (*node) == FUNCTION_TYPE
+ || TREE_CODE (*node) == METHOD_TYPE)
+ {
+ if (arm_isr_value (args) == ARM_FT_UNKNOWN)
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ }
+ else if (TREE_CODE (*node) == POINTER_TYPE
+ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
+ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
+ && arm_isr_value (args) != ARM_FT_UNKNOWN)
+ {
+ *node = build_variant_type_copy (*node);
+ TREE_TYPE (*node) = build_type_attribute_variant
+ (TREE_TYPE (*node),
+ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
+ *no_add_attrs = true;
+ }
+ else
+ {
+ /* Possibly pass this attribute on from the type to a decl. */
+ if (flags & ((int) ATTR_FLAG_DECL_NEXT
+ | (int) ATTR_FLAG_FUNCTION_NEXT
+ | (int) ATTR_FLAG_ARRAY_NEXT))
+ {
+ *no_add_attrs = true;
+ return tree_cons (name, args, NULL_TREE);
+ }
+ else
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ }
+ }
+ }
+
+ return NULL_TREE;
+}
+
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+/* Handle the "notshared" attribute. This attribute is another way of
+ requesting hidden visibility. ARM's compiler supports
+ "__declspec(notshared)"; we support the same thing via an
+ attribute. */
+
+static tree
+arm_handle_notshared_attribute (tree *node,
+ tree name ATTRIBUTE_UNUSED,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree decl = TYPE_NAME (*node);
+
+ if (decl)
+ {
+ DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
+ DECL_VISIBILITY_SPECIFIED (decl) = 1;
+ *no_add_attrs = false;
+ }
+ return NULL_TREE;
+}
+#endif
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+ are compatible, and 2 if they are nearly compatible (which causes a
+ warning to be generated). */
+static int
+arm_comp_type_attributes (tree type1, tree type2)
+{
+ int l1, l2, s1, s2;
+
+ /* Check for mismatch of non-default calling convention. */
+ if (TREE_CODE (type1) != FUNCTION_TYPE)
+ return 1;
+
+ /* Check for mismatched call attributes. */
+ l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
+ l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
+ s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
+ s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
+
+ /* Only bother to check if an attribute is defined. */
+ if (l1 | l2 | s1 | s2)
+ {
+ /* If one type has an attribute, the other must have the same attribute. */
+ if ((l1 != l2) || (s1 != s2))
+ return 0;
+
+ /* Disallow mixed attributes. */
+ if ((l1 & s2) || (l2 & s1))
+ return 0;
+ }
+
+ /* Check for mismatched ISR attribute. */
+ l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
+ if (! l1)
+ l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
+ l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
+ if (! l2)
+ l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
+ if (l1 != l2)
+ return 0;
+
+ return 1;
+}
+
+/* APPLE LOCAL begin ARM longcall */
+/* Encode long_call or short_call attribute by prefixing
+ symbol name in DECL with a special character FLAG. */
+void
+arm_encode_call_attribute (tree decl, int flag)
+{
+#if TARGET_MACHO
+ rtx sym_ref = XEXP (DECL_RTL (decl), 0);
+
+ /* Do not allow weak functions with default visibility to be treated
+ as short call. */
+ if (DECL_WEAK (decl)
+ && DECL_VISIBILITY (decl) == VISIBILITY_DEFAULT
+ && flag == SYMBOL_SHORT_CALL)
+ return;
+
+ SYMBOL_REF_FLAGS (sym_ref) |= flag;
+#else
+ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+ int len = strlen (str);
+ char * newstr;
+
+ /* Do not allow weak functions to be treated as short call. */
+ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
+ return;
+
+ newstr = alloca (len + 2);
+ newstr[0] = flag;
+ strcpy (newstr + 1, str);
+
+ newstr = (char *) ggc_alloc_string (newstr, len + 1);
+ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
+#endif
+}
+/* APPLE LOCAL end ARM longcall */
+
+/* Assigns default attributes to newly defined type. This is used to
+ set short_call/long_call attributes for function types of
+ functions defined inside corresponding #pragma scopes. */
+static void
+arm_set_default_type_attributes (tree type)
+{
+ /* Add __attribute__ ((long_call)) to all functions, when
+ inside #pragma long_calls or __attribute__ ((short_call)),
+ when inside #pragma no_long_calls. */
+ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
+ {
+ tree type_attr_list, attr_name;
+ type_attr_list = TYPE_ATTRIBUTES (type);
+
+ if (arm_pragma_long_calls == LONG)
+ attr_name = get_identifier ("long_call");
+ else if (arm_pragma_long_calls == SHORT)
+ attr_name = get_identifier ("short_call");
+ else
+ return;
+
+ type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
+ TYPE_ATTRIBUTES (type) = type_attr_list;
+ }
+ /* APPLE LOCAL begin 5946347 ms_struct support */
+ /* If -mms-bitfields is active and this is a structure or union type
+ definition, then add an ms_struct attribute. */
+#if TARGET_MACHO
+ else if ((TARGET_MS_BITFIELD_LAYOUT || darwin_ms_struct)
+ && (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE))
+#else
+ else if (TARGET_MS_BITFIELD_LAYOUT
+ && (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE))
+#endif
+ {
+ TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("ms_struct"),
+ NULL_TREE,
+ TYPE_ATTRIBUTES (type));
+ }
+ /* APPLE LOCAL end 5946347 ms_struct support */
+}
+
+/* Return 1 if the operand is a SYMBOL_REF for a function known to be
+ defined within the current compilation unit. If this cannot be
+ determined, then 0 is returned. */
+static int
+current_file_function_operand (rtx sym_ref)
+{
+ /* This is a bit of a fib. A function will have a short call flag
+ applied to its name if it has the short call attribute, or it has
+ already been defined within the current compilation unit. */
+/* APPLE LOCAL begin ARM longcall */
+#if TARGET_MACHO
+ if (SYMBOL_SHORT_CALL_ATTR_P (sym_ref))
+#else
+ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
+#endif
+ return 1;
+/* APPLE LOCAL end ARM longcall */
+
+ /* The current function is always defined within the current compilation
+ unit. If it s a weak definition however, then this may not be the real
+ definition of the function, and so we have to say no. */
+ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
+ && !DECL_WEAK (current_function_decl))
+ return 1;
+
+ /* We cannot make the determination - default to returning 0. */
+ return 0;
+}
+
+/* Return nonzero if a 32 bit "long_call" should be generated for
+ this call. We generate a long_call if the function:
+
+ a. has an __attribute__((long call))
+ or b. is within the scope of a #pragma long_calls
+ or c. the -mlong-calls command line switch has been specified
+ . and either:
+ 1. -ffunction-sections is in effect
+ or 2. the current function has __attribute__ ((section))
+ or 3. the target function has __attribute__ ((section))
+
+ However we do not generate a long call if the function:
+
+ d. has an __attribute__ ((short_call))
+ or e. is inside the scope of a #pragma no_long_calls
+ or f. is defined within the current compilation unit.
+
+ This function will be called by C fragments contained in the machine
+ description file. SYM_REF and CALL_COOKIE correspond to the matched
+ rtl operands. CALL_SYMBOL is used to distinguish between
+ two different callers of the function. It is set to 1 in the
+ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
+ and "call_value" patterns. This is because of the difference in the
+ SYM_REFs passed by these patterns. */
+int
+arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
+{
+ if (!call_symbol)
+ {
+ if (GET_CODE (sym_ref) != MEM)
+ return 0;
+
+ sym_ref = XEXP (sym_ref, 0);
+ }
+
+ if (GET_CODE (sym_ref) != SYMBOL_REF)
+ return 0;
+
+ if (call_cookie & CALL_SHORT)
+ return 0;
+
+ if (TARGET_LONG_CALLS)
+ {
+ if (flag_function_sections
+ || DECL_SECTION_NAME (current_function_decl))
+ /* c.3 is handled by the definition of the
+ ARM_DECLARE_FUNCTION_SIZE macro. */
+ return 1;
+ }
+
+ if (current_file_function_operand (sym_ref))
+ return 0;
+
+ /* APPLE LOCAL begin ARM longcall */
+#if TARGET_MACHO
+ return (call_cookie & CALL_LONG)
+ || SYMBOL_LONG_CALL_ATTR_P (sym_ref)
+ || TARGET_LONG_CALLS;
+#else
+ return (call_cookie & CALL_LONG)
+ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
+ || TARGET_LONG_CALLS;
+#endif
+ /* APPLE LOCAL end ARM longcall */
+}
+
+/* Return nonzero if it is ok to make a tail-call to DECL. */
+static bool
+arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ unsigned long func_type;
+
+ if (cfun->machine->sibcall_blocked)
+ return false;
+
+ /* APPLE LOCAL begin ARM indirect sibcalls */
+ /* Never tailcall something for which we have no decl, or if we
+ are in Thumb mode. */
+ if (TARGET_THUMB)
+ return false;
+
+ /* All indirect calls are within range, since we load the address into a
+ register. */
+ if (decl == NULL)
+ return true;
+ /* APPLE LOCAL end ARM indirect sibcalls */
+
+ /* Get the calling method. */
+ if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
+ call_type = CALL_SHORT;
+ else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
+ call_type = CALL_LONG;
+
+ /* Cannot tail-call to long calls, since these are out of range of
+ a branch instruction. However, if not compiling PIC, we know
+ we can reach the symbol if it is in this compilation unit. */
+ if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
+ return false;
+
+ /* If we are interworking and the function is not declared static
+ then we can't tail-call it unless we know that it exists in this
+ compilation unit (since it might be a Thumb routine). */
+ /* APPLE LOCAL begin ARM interworking */
+ if (TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl) && TARGET_INTERWORK)
+ {
+ if (TARGET_MACHO)
+ return false;
+ else if (!arm_arch5)
+ return false;
+ }
+ /* APPLE LOCAL end ARM interworking */
+
+ /* APPLE LOCAL begin ARM 4956366 */
+ /* If it's weak, the function called may end up being from a different
+ compilation unit. */
+ if (arm_cpp_interwork && TREE_PUBLIC (decl) && DECL_WEAK (decl))
+ return false;
+ /* APPLE LOCAL end ARM 4956366 */
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ func_type = arm_current_func_type ();
+ /* Never tailcall from an ISR routine - it needs a special exit sequence. */
+ if (IS_INTERRUPT (func_type))
+ return false;
+
+ /* Never tailcall if function may be called with a misaligned SP. */
+ if (IS_STACKALIGN (func_type))
+ return false;
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* Everything else is ok. */
+ return true;
+}
+
+
+/* Addressing mode support functions. */
+
+/* Return nonzero if X is a legitimate immediate operand when compiling
+ for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
+int
+legitimate_pic_operand_p (rtx x)
+{
+ if (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
+ return 0;
+
+ return 1;
+}
+
+rtx
+legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
+{
+ if (GET_CODE (orig) == SYMBOL_REF
+ || GET_CODE (orig) == LABEL_REF)
+ {
+#ifndef AOF_ASSEMBLER
+ rtx pic_ref, address;
+#endif
+ /* APPLE LOCAL ARM pic support */
+ rtx norig, l1 = NULL_RTX;
+ rtx insn;
+ int subregs = 0;
+ /* APPLE LOCAL ARM pic support */
+ bool avoid_ind = true;
+
+ /* If this function doesn't have a pic register, create one now.
+ A lot of the logic here is made obscure by the fact that this
+ routine gets called as part of the rtx cost estimation
+ process. We don't want those calls to affect any assumptions
+ about the real function; and further, we can't call
+ entry_of_function() until we start the real expansion
+ process. */
+ /* APPLE LOCAL ARM pic support */
+ if (!TARGET_MACHO && !current_function_uses_pic_offset_table)
+ {
+ gcc_assert (!no_new_pseudos);
+ if (arm_pic_register != INVALID_REGNUM)
+ {
+ cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
+
+ /* Play games to avoid marking the function as needing pic
+ if we are being called as part of the cost-estimation
+ process. */
+ if (!ir_type())
+ current_function_uses_pic_offset_table = 1;
+ }
+ else
+ {
+ rtx seq;
+
+ cfun->machine->pic_reg = gen_reg_rtx (Pmode);
+
+ /* Play games to avoid marking the function as needing pic
+ if we are being called as part of the cost-estimation
+ process. */
+ if (!ir_type())
+ {
+ current_function_uses_pic_offset_table = 1;
+ start_sequence ();
+
+ arm_load_pic_register (0UL);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_after (seq, entry_of_function ());
+ }
+ }
+ }
+
+ if (reg == 0)
+ {
+ gcc_assert (!no_new_pseudos);
+ reg = gen_reg_rtx (Pmode);
+
+ subregs = 1;
+ }
+
+#ifdef AOF_ASSEMBLER
+ /* The AOF assembler can generate relocations for these directly, and
+ understands that the PIC register has to be added into the offset. */
+ insn = emit_insn (gen_pic_load_addr_based (reg, orig));
+#else
+ if (subregs)
+ address = gen_reg_rtx (Pmode);
+ else
+ address = reg;
+
+ /* APPLE LOCAL begin ARM pic support */
+ norig = orig;
+#if TARGET_MACHO
+ if (TARGET_MACHO)
+ {
+ if (GET_CODE (orig) == SYMBOL_REF
+ || GET_CODE (orig) == LABEL_REF)
+ {
+ rtx x, ptr_ref = orig;
+
+ l1 = gen_label_rtx ();
+
+ if (GET_CODE (orig) == SYMBOL_REF)
+ {
+ bool defined = machopic_data_defined_p (orig);
+
+ if (defined && MACHO_DYNAMIC_NO_PIC_P)
+ return orig;
+
+ if (! defined)
+ {
+ avoid_ind = false;
+ ptr_ref = gen_rtx_SYMBOL_REF (Pmode,
+ machopic_indirection_name (orig, false));
+ SET_SYMBOL_REF_DECL (ptr_ref, SYMBOL_REF_DECL (orig));
+ SYMBOL_REF_FLAGS (ptr_ref) |= MACHO_SYMBOL_FLAG_DEFINED;
+ }
+ }
+ else
+ {
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ return orig;
+ }
+
+ if (! MACHO_DYNAMIC_NO_PIC_P)
+ {
+ x = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
+ ptr_ref = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, ptr_ref, x));
+ }
+ norig = ptr_ref;
+ }
+ }
+#endif
+ if (TARGET_MACHO && ! MACHO_DYNAMIC_NO_PIC_P)
+ {
+ if (GET_CODE (orig) == SYMBOL_REF
+ || GET_CODE (orig) == LABEL_REF)
+ {
+ if (TARGET_ARM)
+ {
+ emit_insn (gen_pic_load_addr_arm (address, norig, l1));
+ emit_insn (gen_pic_add_dot_plus_eight (address, l1, address));
+ }
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (TARGET_THUMB2)
+ {
+ emit_insn (gen_pic_load_addr_thumb2 (address, norig, l1));
+ emit_insn (gen_pic_add_dot_plus_four (address, l1, address));
+ }
+ else /* TARGET_THUMB1 */
+ {
+ emit_insn (gen_pic_load_addr_thumb1 (address, norig, l1));
+ emit_insn (gen_pic_add_dot_plus_four (address, l1, address));
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+ else
+ abort ();
+ }
+ else
+ {
+ if (TARGET_ARM)
+ emit_insn (gen_pic_load_addr_arm (address, norig, l1));
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (TARGET_THUMB2)
+ emit_insn (gen_pic_load_addr_thumb2 (address, norig, l1));
+ else /* TARGET_THUMB1 */
+ emit_insn (gen_pic_load_addr_thumb1 (address, norig, l1));
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+ /* APPLE LOCAL end ARM pic support */
+
+ if ((GET_CODE (orig) == LABEL_REF
+ || (GET_CODE (orig) == SYMBOL_REF &&
+ SYMBOL_REF_LOCAL_P (orig)))
+ && NEED_GOT_RELOC)
+ pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
+ else
+ {
+ /* APPLE LOCAL begin ARM pic support */
+ if (! TARGET_MACHO)
+ pic_ref = gen_const_mem (Pmode,
+ gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
+ address));
+ else if (avoid_ind)
+ pic_ref = address;
+ else
+ pic_ref = gen_const_mem (Pmode, address);
+ /* APPLE LOCAL end ARM pic support */
+ }
+
+ insn = emit_move_insn (reg, pic_ref);
+#endif
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
+ return orig;
+
+ if (GET_CODE (XEXP (orig, 0)) == UNSPEC
+ && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
+ return orig;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* Handle the case where we have:
+ const (plus (UNSPEC_TLS) (ADDEND)). The ADDEND must be a
+ CONST_INT. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == UNSPEC
+ && XINT (XEXP (XEXP (orig, 0), 0), 1) == UNSPEC_TLS)
+ {
+ gcc_assert (GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT);
+ return orig;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if (reg == 0)
+ {
+ gcc_assert (!no_new_pseudos);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
+
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+
+ /* APPLE LOCAL begin 6327222 */
+ /* #if 0 for now so it's here for reference since this is a tricky
+ bit. */
+#if 0
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ /* The base register doesn't really matter, we only want to
+ test the index for the appropriate mode. */
+ if (!arm_legitimate_index_p (mode, offset, SET, 0))
+ {
+ gcc_assert (!no_new_pseudos);
+ offset = force_reg (Pmode, offset);
+ }
+
+ if (GET_CODE (offset) == CONST_INT)
+ return plus_constant (base, INTVAL (offset));
+ }
+#endif
+ /* APPLE LOCAL end 6327222 */
+ if (GET_MODE_SIZE (mode) > 4
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || TARGET_SOFT_FLOAT))
+ {
+ emit_insn (gen_addsi3 (reg, base, offset));
+ return reg;
+ }
+
+ return gen_rtx_PLUS (Pmode, base, offset);
+ }
+
+ return orig;
+}
+
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Find a spare register to use during the prolog of a function. */
+
+static int
+thumb_find_work_register (unsigned long pushed_regs_mask)
+{
+ int reg;
+
+ /* Check the argument registers first as these are call-used. The
+ register allocation order means that sometimes r3 might be used
+ but earlier argument registers might not, so check them all. */
+ for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
+ if (!regs_ever_live[reg])
+ return reg;
+
+ /* Before going on to check the call-saved registers we can try a couple
+ more ways of deducing that r3 is available. The first is when we are
+ pushing anonymous arguments onto the stack and we have less than 4
+ registers worth of fixed arguments(*). In this case r3 will be part of
+ the variable argument list and so we can be sure that it will be
+ pushed right at the start of the function. Hence it will be available
+ for the rest of the prologue.
+ (*): ie current_function_pretend_args_size is greater than 0. */
+ if (cfun->machine->uses_anonymous_args
+ && current_function_pretend_args_size > 0)
+ return LAST_ARG_REGNUM;
+
+ /* The other case is when we have fixed arguments but less than 4 registers
+ worth. In this case r3 might be used in the body of the function, but
+ it is not being used to convey an argument into the function. In theory
+ we could just check current_function_args_size to see how many bytes are
+ being passed in argument registers, but it seems that it is unreliable.
+ Sometimes it will have the value 0 when in fact arguments are being
+ passed. (See testcase execute/20021111-1.c for an example). So we also
+ check the args_info.nregs field as well. The problem with this field is
+ that it makes no allowances for arguments that are passed to the
+ function but which are not used. Hence we could miss an opportunity
+ when a function has an unused argument in r3. But it is better to be
+ safe than to be sorry. */
+ if (! cfun->machine->uses_anonymous_args
+ && current_function_args_size >= 0
+ && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
+ && cfun->args_info.nregs < 4)
+ return LAST_ARG_REGNUM;
+
+ /* Otherwise look for a call-saved register that is going to be pushed. */
+ for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
+ if (pushed_regs_mask & (1 << reg))
+ return reg;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_THUMB2)
+ {
+ /* Thumb-2 can use high regs. */
+ for (reg = FIRST_HI_REGNUM; reg < 15; reg ++)
+ if (pushed_regs_mask & (1 << reg))
+ return reg;
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* Something went wrong - thumb_compute_save_reg_mask()
+ should have arranged for a suitable register to be pushed. */
+ gcc_unreachable ();
+}
+
+static GTY(()) int pic_labelno;
+
+/* Generate code to load the PIC register. In thumb mode SCRATCH is a
+ low register. */
+
+void
+arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
+{
+#ifndef AOF_ASSEMBLER
+ rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
+ rtx global_offset_table;
+
+ if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
+ return;
+
+ gcc_assert (flag_pic);
+
+ /* We use an UNSPEC rather than a LABEL_REF because this label never appears
+ in the code stream. */
+
+ labelno = GEN_INT (pic_labelno++);
+ l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
+ l1 = gen_rtx_CONST (VOIDmode, l1);
+
+ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+ /* On the ARM the PC register contains 'dot + 8' at the time of the
+ addition, on the Thumb it is 'dot + 4'. */
+ pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
+ if (GOT_PCREL)
+ pic_tmp2 = gen_rtx_CONST (VOIDmode,
+ gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
+ else
+ pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
+
+ pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
+
+ if (TARGET_ARM)
+ {
+ /* APPLE LOCAL begin ARM pic support */
+ emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx, l1));
+ emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg, l1,
+ cfun->machine->pic_reg));
+ /* APPLE LOCAL end ARM pic support */
+ }
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ else if (TARGET_THUMB2)
+ {
+ /* Thumb-2 only allows very limited access to the PC. Calculate the
+ address in a temporary register. */
+ if (arm_pic_register != INVALID_REGNUM)
+ {
+ pic_tmp = gen_rtx_REG (SImode,
+ thumb_find_work_register (saved_regs));
+ }
+ else
+ {
+ gcc_assert (!no_new_pseudos);
+ pic_tmp = gen_reg_rtx (Pmode);
+ }
+
+ emit_insn (gen_pic_load_addr_thumb2 (cfun->machine->pic_reg,
+ pic_rtx, l1));
+ emit_insn (gen_pic_load_dot_plus_four (pic_tmp, labelno));
+ emit_insn (gen_addsi3 (cfun->machine->pic_reg, cfun->machine->pic_reg,
+ pic_tmp));
+ }
+ else /* TARGET_THUMB1 */
+ {
+ /* APPLE LOCAL begin ARM pic support */
+ if (arm_pic_register != INVALID_REGNUM
+ && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
+ {
+ /* We will have pushed the pic register, so we should always be
+ able to find a work register. */
+ pic_tmp = gen_rtx_REG (SImode,
+ thumb_find_work_register (saved_regs));
+ emit_insn (gen_pic_load_addr_thumb1 (pic_tmp, pic_rtx, l1));
+ emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
+ }
+ else
+ emit_insn (gen_pic_load_addr_thumb1 (cfun->machine->pic_reg, pic_rtx, l1));
+ emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg, l1,
+ cfun->machine->pic_reg));
+ /* APPLE LOCAL end ARM pic support */
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
+#endif /* AOF_ASSEMBLER */
+}
+
+
+/* Return nonzero if X is valid as an ARM state addressing register. */
+static int
+arm_address_register_rtx_p (rtx x, int strict_p)
+{
+ int regno;
+
+ if (GET_CODE (x) != REG)
+ return 0;
+
+ regno = REGNO (x);
+
+ if (strict_p)
+ return ARM_REGNO_OK_FOR_BASE_P (regno);
+
+ return (regno <= LAST_ARM_REGNUM
+ || regno >= FIRST_PSEUDO_REGISTER
+ || regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM);
+}
+
+/* Return TRUE if this rtx is the difference of a symbol and a label,
+ and will reduce to a PC-relative relocation in the object file.
+ Expressions like this can be left alone when generating PIC, rather
+ than forced through the GOT. */
+static int
+pcrel_constant_p (rtx x)
+{
+ if (GET_CODE (x) == MINUS)
+ return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
+
+ return FALSE;
+}
+
+/* Return nonzero if X is a valid ARM state address operand. */
+int
+arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
+ int strict_p)
+{
+ bool use_ldrd;
+ enum rtx_code code = GET_CODE (x);
+
+ if (arm_address_register_rtx_p (x, strict_p))
+ return 1;
+
+ use_ldrd = (TARGET_LDRD
+ && (mode == DImode
+ || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
+
+ /* APPLE LOCAL begin 6293989 */
+ if (TARGET_NEON && VECTOR_MODE_P (mode)
+ && (code == PRE_DEC || code == PRE_INC || code == POST_DEC))
+ return 0;
+ /* APPLE LOCAL end 6293989 */
+
+ if (code == POST_INC || code == PRE_DEC
+ || ((code == PRE_INC || code == POST_DEC)
+ && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
+ return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
+
+ else if ((code == POST_MODIFY || code == PRE_MODIFY)
+ && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
+ && GET_CODE (XEXP (x, 1)) == PLUS
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+ {
+ rtx addend = XEXP (XEXP (x, 1), 1);
+
+ /* Don't allow ldrd post increment by register because it's hard
+ to fixup invalid register choices. */
+ if (use_ldrd
+ && GET_CODE (x) == POST_MODIFY
+ && GET_CODE (addend) == REG)
+ return 0;
+
+ return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
+ && arm_legitimate_index_p (mode, addend, outer, strict_p));
+ }
+
+ /* After reload constants split into minipools will have addresses
+ from a LABEL_REF. */
+ else if (reload_completed
+ && (code == LABEL_REF
+ || (code == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
+ return 1;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
+ return 0;
+
+ else if (code == PLUS)
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+
+ return ((arm_address_register_rtx_p (xop0, strict_p)
+ && arm_legitimate_index_p (mode, xop1, outer, strict_p))
+ || (arm_address_register_rtx_p (xop1, strict_p)
+ && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
+ }
+
+#if 0
+ /* Reload currently can't handle MINUS, so disable this for now */
+ else if (GET_CODE (x) == MINUS)
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+
+ return (arm_address_register_rtx_p (xop0, strict_p)
+ && arm_legitimate_index_p (mode, xop1, outer, strict_p));
+ }
+#endif
+
+ else if (GET_MODE_CLASS (mode) != MODE_FLOAT
+ && code == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x)
+ && ! (flag_pic
+ && symbol_mentioned_p (get_pool_constant (x))
+ && ! pcrel_constant_p (get_pool_constant (x))))
+ return 1;
+
+ return 0;
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Return nonzero if X is a valid Thumb-2 address operand. */
+int
+thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
+{
+ bool use_ldrd;
+ enum rtx_code code = GET_CODE (x);
+
+ if (arm_address_register_rtx_p (x, strict_p))
+ return 1;
+
+ use_ldrd = (TARGET_LDRD
+ && (mode == DImode
+ || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
+
+ /* APPLE LOCAL begin 6293989 */
+ if (TARGET_NEON && VECTOR_MODE_P (mode)
+ && (code == PRE_DEC || code == PRE_INC || code == POST_DEC))
+ return 0;
+ /* APPLE LOCAL end 6293989 */
+
+ if (code == POST_INC || code == PRE_DEC
+ || ((code == PRE_INC || code == POST_DEC)
+ && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
+ return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
+
+ else if ((code == POST_MODIFY || code == PRE_MODIFY)
+ && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
+ && GET_CODE (XEXP (x, 1)) == PLUS
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+ {
+ /* Thumb-2 only has autoincrement by constant. */
+ rtx addend = XEXP (XEXP (x, 1), 1);
+ HOST_WIDE_INT offset;
+
+ if (GET_CODE (addend) != CONST_INT)
+ return 0;
+
+ offset = INTVAL(addend);
+ if (GET_MODE_SIZE (mode) <= 4)
+ return (offset > -256 && offset < 256);
+
+ return (use_ldrd && offset > -1024 && offset < 1024
+ && (offset & 3) == 0);
+ }
+
+ /* After reload constants split into minipools will have addresses
+ from a LABEL_REF. */
+ else if (reload_completed
+ && (code == LABEL_REF
+ || (code == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
+ return 1;
+
+ /* APPLE LOCAL v7 support. Merge from Codesourcery */
+ else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
+ return 0;
+
+ else if (code == PLUS)
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+
+ return ((arm_address_register_rtx_p (xop0, strict_p)
+ && thumb2_legitimate_index_p (mode, xop1, strict_p))
+ || (arm_address_register_rtx_p (xop1, strict_p)
+ && thumb2_legitimate_index_p (mode, xop0, strict_p)));
+ }
+
+ else if (GET_MODE_CLASS (mode) != MODE_FLOAT
+ && code == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x)
+ && ! (flag_pic
+ && symbol_mentioned_p (get_pool_constant (x))
+ && ! pcrel_constant_p (get_pool_constant (x))))
+ return 1;
+
+ return 0;
+}
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Return nonzero if INDEX is valid for an address index operand in
+ ARM state. */
+static int
+arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
+ int strict_p)
+{
+ HOST_WIDE_INT range;
+ enum rtx_code code = GET_CODE (index);
+
+ /* Standard coprocessor addressing modes. */
+ if (TARGET_HARD_FLOAT
+ /* APPLE LOCAL ARM 4480764 */
+ && (TARGET_FPA || TARGET_MAVERICK || TARGET_VFP)
+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || (TARGET_MAVERICK && mode == DImode)))
+ return (code == CONST_INT && INTVAL (index) < 1024
+ && INTVAL (index) > -1024
+ && (INTVAL (index) & 3) == 0);
+
+ if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
+ {
+ /* For DImode assume values will usually live in core regs
+ and only allow LDRD addressing modes. */
+ if (!TARGET_LDRD || mode != DImode)
+ return (code == CONST_INT
+ && INTVAL (index) < 1024
+ && INTVAL (index) > -1024
+ && (INTVAL (index) & 3) == 0);
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (TARGET_NEON
+ /* APPLE LOCAL 6150882 use thumb2 by default for v7 */
+ && VECTOR_MODE_P (mode)
+ && (VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode)))
+ return (code == CONST_INT
+ && INTVAL (index) < 1016
+ && INTVAL (index) > -1024
+ && (INTVAL (index) & 3) == 0);
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if (arm_address_register_rtx_p (index, strict_p)
+ && (GET_MODE_SIZE (mode) <= 4))
+ return 1;
+
+ if (mode == DImode || mode == DFmode)
+ {
+ if (code == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (index);
+
+ if (TARGET_LDRD)
+ return val > -256 && val < 256;
+ else
+ return val > -4096 && val < 4092;
+ }
+
+ return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
+ }
+
+ if (GET_MODE_SIZE (mode) <= 4
+ && ! (arm_arch4
+ && (mode == HImode
+ || (mode == QImode && outer == SIGN_EXTEND))))
+ {
+ if (code == MULT)
+ {
+ rtx xiop0 = XEXP (index, 0);
+ rtx xiop1 = XEXP (index, 1);
+
+ return ((arm_address_register_rtx_p (xiop0, strict_p)
+ && power_of_two_operand (xiop1, SImode))
+ || (arm_address_register_rtx_p (xiop1, strict_p)
+ && power_of_two_operand (xiop0, SImode)));
+ }
+ else if (code == LSHIFTRT || code == ASHIFTRT
+ || code == ASHIFT || code == ROTATERT)
+ {
+ rtx op = XEXP (index, 1);
+
+ return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
+ && GET_CODE (op) == CONST_INT
+ && INTVAL (op) > 0
+ && INTVAL (op) <= 31);
+ }
+ }
+
+ /* For ARM v4 we may be doing a sign-extend operation during the
+ load. */
+ if (arm_arch4)
+ {
+ if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
+ range = 256;
+ else
+ range = 4096;
+ }
+ else
+ range = (mode == HImode) ? 4095 : 4096;
+
+ return (code == CONST_INT
+ && INTVAL (index) < range
+ && INTVAL (index) > -range);
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Return true if OP is a valid index scaling factor for Thumb-2 address
+ index operand. i.e. 1, 2, 4 or 8. */
+static bool
+thumb2_index_mul_operand (rtx op)
+{
+ HOST_WIDE_INT val;
+
+ if (GET_CODE(op) != CONST_INT)
+ return false;
+
+ val = INTVAL(op);
+ return (val == 1 || val == 2 || val == 4 || val == 8);
+}
+
+/* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
+static int
+thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
+{
+ enum rtx_code code = GET_CODE (index);
+
+ /* ??? Combine arm and thumb2 coprocessor addressing modes. */
+ /* Standard coprocessor addressing modes. */
+ if (TARGET_HARD_FLOAT
+ /* APPLE LOCAL 7109945 floating point stores should use vstr */
+ && (TARGET_FPA || TARGET_MAVERICK || TARGET_VFP)
+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || (TARGET_MAVERICK && mode == DImode)))
+ return (code == CONST_INT && INTVAL (index) < 1024
+ /* APPLE LOCAL 7198870 STR only allows down to -255 offset */
+ && INTVAL (index) > -256
+ && (INTVAL (index) & 3) == 0);
+
+ if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
+ return (code == CONST_INT
+ && INTVAL (index) < 1024
+ && INTVAL (index) > -1024
+ && (INTVAL (index) & 3) == 0);
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (TARGET_NEON
+ /* APPLE LOCAL 6150882 use thumb2 by default for v7 */
+ && VECTOR_MODE_P (mode)
+ && (VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode)))
+ return (code == CONST_INT
+ && INTVAL (index) < 1016
+ && INTVAL (index) > -1024
+ && (INTVAL (index) & 3) == 0);
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if (arm_address_register_rtx_p (index, strict_p)
+ && (GET_MODE_SIZE (mode) <= 4))
+ return 1;
+
+ if (mode == DImode || mode == DFmode)
+ {
+ HOST_WIDE_INT val = INTVAL (index);
+ /* ??? Can we assume ldrd for thumb2? */
+ /* Thumb-2 ldrd only has reg+const addressing modes. */
+ if (code != CONST_INT)
+ return 0;
+
+ /* ldrd supports offsets of +-1020.
+ However the ldr fallback does not. */
+ return val > -256 && val < 256 && (val & 3) == 0;
+ }
+
+ if (code == MULT)
+ {
+ rtx xiop0 = XEXP (index, 0);
+ rtx xiop1 = XEXP (index, 1);
+
+ return ((arm_address_register_rtx_p (xiop0, strict_p)
+ && thumb2_index_mul_operand (xiop1))
+ || (arm_address_register_rtx_p (xiop1, strict_p)
+ && thumb2_index_mul_operand (xiop0)));
+ }
+ else if (code == ASHIFT)
+ {
+ rtx op = XEXP (index, 1);
+
+ return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
+ && GET_CODE (op) == CONST_INT
+ && INTVAL (op) > 0
+ && INTVAL (op) <= 3);
+ }
+
+ return (code == CONST_INT
+ && INTVAL (index) < 4096
+ && INTVAL (index) > -256);
+}
+
+/* Return nonzero if X is valid as a 16-bit Thumb state base register. */
+static int
+thumb1_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
+{
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ int regno;
+
+ if (GET_CODE (x) != REG)
+ return 0;
+
+ regno = REGNO (x);
+
+ if (strict_p)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ return THUMB1_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
+
+ return (regno <= LAST_LO_REGNUM
+ || regno > LAST_VIRTUAL_REGISTER
+ || regno == FRAME_POINTER_REGNUM
+ || (GET_MODE_SIZE (mode) >= 4
+ && (regno == STACK_POINTER_REGNUM
+ || regno >= FIRST_PSEUDO_REGISTER
+ || x == hard_frame_pointer_rtx
+ || x == arg_pointer_rtx)));
+}
+
+/* Return nonzero if x is a legitimate index register. This is the case
+ for any base register that can access a QImode object. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+inline static int
+thumb1_index_register_rtx_p (rtx x, int strict_p)
+{
+ return thumb1_base_register_rtx_p (x, QImode, strict_p);
+}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Return nonzero if x is a legitimate 16-bit Thumb-state address.
+
+ The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64.
+
+ ??? Verify whether the above is the right approach.
+
+ ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also.
+
+ ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems.
+
+ Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the
+ reload pass starts. This is so that eliminating such addresses
+ into stack based ones won't produce impossible code. */
+int
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
+{
+ /* ??? Not clear if this is right. Experiment. */
+ if (GET_MODE_SIZE (mode) < 4
+ && !(reload_in_progress || reload_completed)
+ && (reg_mentioned_p (frame_pointer_rtx, x)
+ || reg_mentioned_p (arg_pointer_rtx, x)
+ || reg_mentioned_p (virtual_incoming_args_rtx, x)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, x)
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
+ || reg_mentioned_p (virtual_stack_vars_rtx, x)))
+ return 0;
+
+ /* Accept any base register. SP only in SImode or larger. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else if (thumb1_base_register_rtx_p (x, mode, strict_p))
+ return 1;
+
+ /* This is PC relative data before arm_reorg runs. */
+ else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
+ && GET_CODE (x) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
+ return 1;
+
+ /* This is PC relative data after arm_reorg runs. */
+ else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
+ && (GET_CODE (x) == LABEL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
+ return 1;
+
+ /* Post-inc indexing only supported for SImode and larger. */
+ else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p))
+ return 1;
+
+ else if (GET_CODE (x) == PLUS)
+ {
+ /* REG+REG address can be any two index registers. */
+ /* We disallow FRAME+REG addressing since we know that FRAME
+ will be replaced with STACK, and SP relative addressing only
+ permits SP+OFFSET. */
+ if (GET_MODE_SIZE (mode) <= 4
+ && XEXP (x, 0) != frame_pointer_rtx
+ && XEXP (x, 1) != frame_pointer_rtx
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
+ && thumb1_index_register_rtx_p (XEXP (x, 1), strict_p))
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ return 1;
+
+ /* REG+const has 5-7 bit offset for non-SP registers. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
+ || XEXP (x, 0) == arg_pointer_rtx)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
+ return 1;
+
+ /* REG+const has 10 bit offset for SP, but only SImode and
+ larger is supported. */
+ /* ??? Should probably check for DI/DFmode overflow here
+ just like GO_IF_LEGITIMATE_OFFSET does. */
+ else if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
+ && GET_MODE_SIZE (mode) >= 4
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) >= 0
+ && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
+ && (INTVAL (XEXP (x, 1)) & 3) == 0)
+ return 1;
+
+ else if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
+ && GET_MODE_SIZE (mode) >= 4
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) & 3) == 0)
+ return 1;
+ }
+
+ else if (GET_MODE_CLASS (mode) != MODE_FLOAT
+ && GET_MODE_SIZE (mode) == 4
+ && GET_CODE (x) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x)
+ && ! (flag_pic
+ && symbol_mentioned_p (get_pool_constant (x))
+ && ! pcrel_constant_p (get_pool_constant (x))))
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if VAL can be used as an offset in a Thumb-state address
+ instruction of mode MODE. */
+int
+thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
+{
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 1:
+ return val >= 0 && val < 32;
+
+ case 2:
+ return val >= 0 && val < 64 && (val & 1) == 0;
+
+ default:
+ return (val >= 0
+ && (val + GET_MODE_SIZE (mode)) <= 128
+ && (val & 3) == 0);
+ }
+}
+
+/* Build the SYMBOL_REF for __tls_get_addr. */
+
+static GTY(()) rtx tls_get_addr_libfunc;
+
+static rtx
+get_tls_get_addr (void)
+{
+ if (!tls_get_addr_libfunc)
+ tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
+ return tls_get_addr_libfunc;
+}
+
+static rtx
+arm_load_tp (rtx target)
+{
+ if (!target)
+ target = gen_reg_rtx (SImode);
+
+ if (TARGET_HARD_TP)
+ {
+ /* Can return in any reg. */
+ emit_insn (gen_load_tp_hard (target));
+ }
+ else
+ {
+ /* Always returned in r0. Immediately copy the result into a pseudo,
+ otherwise other uses of r0 (e.g. setting up function arguments) may
+ clobber the value. */
+
+ rtx tmp;
+
+ emit_insn (gen_load_tp_soft ());
+
+ tmp = gen_rtx_REG (SImode, 0);
+ emit_move_insn (target, tmp);
+ }
+ return target;
+}
+
+static rtx
+load_tls_operand (rtx x, rtx reg)
+{
+ rtx tmp;
+
+ if (reg == NULL_RTX)
+ reg = gen_reg_rtx (SImode);
+
+ tmp = gen_rtx_CONST (SImode, x);
+
+ emit_move_insn (reg, tmp);
+
+ return reg;
+}
+
+static rtx
+arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
+{
+ rtx insns, label, labelno, sum;
+
+ start_sequence ();
+
+ labelno = GEN_INT (pic_labelno++);
+ label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
+ label = gen_rtx_CONST (VOIDmode, label);
+
+ sum = gen_rtx_UNSPEC (Pmode,
+ gen_rtvec (4, x, GEN_INT (reloc), label,
+ GEN_INT (TARGET_ARM ? 8 : 4)),
+ UNSPEC_TLS);
+ reg = load_tls_operand (sum, reg);
+
+ if (TARGET_ARM)
+ emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (TARGET_THUMB2)
+ {
+ rtx tmp;
+ /* Thumb-2 only allows very limited access to the PC. Calculate
+ the address in a temporary register. */
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
+ emit_insn (gen_addsi3(reg, reg, tmp));
+ }
+ else /* TARGET_THUMB1 */
+ emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
+ Pmode, 1, reg, Pmode);
+
+ insns = get_insns ();
+ end_sequence ();
+
+ return insns;
+}
+
+rtx
+legitimize_tls_address (rtx x, rtx reg)
+{
+ rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
+ unsigned int model = SYMBOL_REF_TLS_MODEL (x);
+
+ switch (model)
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
+ dest = gen_reg_rtx (Pmode);
+ emit_libcall_block (insns, dest, ret, x);
+ return dest;
+
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
+
+ /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
+ share the LDM result with other LD model accesses. */
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
+ UNSPEC_TLS);
+ dest = gen_reg_rtx (Pmode);
+ emit_libcall_block (insns, dest, ret, eqv);
+
+ /* Load the addend. */
+ addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
+ UNSPEC_TLS);
+ addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
+ return gen_rtx_PLUS (Pmode, dest, addend);
+
+ case TLS_MODEL_INITIAL_EXEC:
+ labelno = GEN_INT (pic_labelno++);
+ label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
+ label = gen_rtx_CONST (VOIDmode, label);
+ sum = gen_rtx_UNSPEC (Pmode,
+ gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
+ GEN_INT (TARGET_ARM ? 8 : 4)),
+ UNSPEC_TLS);
+ reg = load_tls_operand (sum, reg);
+
+ if (TARGET_ARM)
+ emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (TARGET_THUMB2)
+ {
+ rtx tmp;
+ /* Thumb-2 only allows very limited access to the PC. Calculate
+ the address in a temporary register. */
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
+ emit_insn (gen_addsi3(reg, reg, tmp));
+ emit_move_insn (reg, gen_const_mem (SImode, reg));
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ else
+ {
+ emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
+ emit_move_insn (reg, gen_const_mem (SImode, reg));
+ }
+
+ tp = arm_load_tp (NULL_RTX);
+
+ return gen_rtx_PLUS (Pmode, tp, reg);
+
+ case TLS_MODEL_LOCAL_EXEC:
+ tp = arm_load_tp (NULL_RTX);
+
+ reg = gen_rtx_UNSPEC (Pmode,
+ gen_rtvec (2, x, GEN_INT (TLS_LE32)),
+ UNSPEC_TLS);
+ reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
+
+ return gen_rtx_PLUS (Pmode, tp, reg);
+
+ default:
+ abort ();
+ }
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address. */
+rtx
+arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
+{
+ if (arm_tls_symbol_p (x))
+ return legitimize_tls_address (x, NULL_RTX);
+
+ /* APPLE LOCAL begin ARM addresses involving large constants */
+ if (flag_pic)
+ {
+ /* We need to find and carefully transform any SYMBOL and LABEL
+ references; so go back to the original address expression. */
+ rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
+
+ if (new_x != orig_x)
+ x = new_x;
+ }
+ else if (GET_CODE (x) == PLUS)
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+
+ if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
+ xop0 = force_reg (SImode, xop0);
+
+ if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1)
+ && GET_CODE (xop1) != CONST_INT)
+ xop1 = force_reg (SImode, xop1);
+
+ if (GET_CODE (xop1) == CONST_INT)
+ {
+ HOST_WIDE_INT n, low_n;
+ rtx base_reg, val;
+
+ /* Look for
+ (+ (+ (foo, SFP) const)). It is better to rearrange this as
+ (+ (foo (+ (SFP, const))). The eventual SP + const1 + const will
+ get folded. */
+
+ if (GET_CODE (xop0) == PLUS)
+ {
+ rtx xop00 = XEXP (xop0, 0);
+ rtx xop01 = XEXP (xop0, 1);
+
+ if (xop01 == virtual_stack_vars_rtx)
+ {
+ base_reg = gen_reg_rtx (SImode);
+ val = force_operand (gen_rtx_PLUS (SImode, xop01, xop1),
+ NULL_RTX);
+ emit_move_insn (base_reg, val);
+ /* Canonical form requires some non-reg ops to be first. */
+ x = gen_rtx_PLUS (SImode, xop00, base_reg);
+ return x;
+ }
+ }
+
+ n = INTVAL (xop1);
+ /* The size of constant that fits in a load or store instruction
+ is different for different sized operations. Break N into
+ low_n (the part that will fit in the instruction) and n
+ (the part that won't). */
+ /* VFP addressing modes actually allow greater offsets, but for
+ now we just stick with the lowest common denominator. */
+ if (mode == DImode
+ || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
+ {
+ low_n = n & 0x0f;
+ n &= ~0x0f;
+ if (low_n > 4)
+ {
+ n += 16;
+ low_n -= 16;
+ }
+ }
+ else if ((mode == HImode || mode == QImode) && arm_arch4)
+ {
+ low_n = n >= 0 ? (n & 0xff) : -((-n) & 0xff);
+ n -= low_n;
+ }
+ else
+ {
+ low_n = ((mode) == TImode ? 0
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
+ n -= low_n;
+ }
+
+ if (n != 0)
+ {
+ /* Emit an auxiliary instruction to compute base+high_part
+ into a register base_reg, then return base_reg+low_part. */
+ base_reg = gen_reg_rtx (SImode);
+ val = force_operand (plus_constant (xop0, n), NULL_RTX);
+ emit_move_insn (base_reg, val);
+ x = plus_constant (base_reg, low_n);
+ }
+ else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
+ x = gen_rtx_PLUS (SImode, xop0, xop1);
+ }
+ else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
+ x = gen_rtx_PLUS (SImode, xop0, xop1);
+ }
+
+ /* XXX We don't allow MINUS any more -- see comment in
+ arm_legitimate_address_p (). */
+ else if (GET_CODE (x) == MINUS)
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+
+ if (CONSTANT_P (xop0))
+ xop0 = force_reg (SImode, xop0);
+
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
+ xop1 = force_reg (SImode, xop1);
+
+ if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
+ x = gen_rtx_MINUS (SImode, xop0, xop1);
+ }
+
+ /* Make sure to take full advantage of the pre-indexed addressing mode
+ with absolute addresses which often allows for the base register to
+ be factorized for multiple adjacent memory references, and it might
+ even allows for the mini pool to be avoided entirely. */
+ else if (GET_CODE (x) == CONST_INT && optimize > 0)
+ {
+ unsigned int bits;
+ HOST_WIDE_INT mask, base, index;
+ rtx base_reg;
+
+ /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
+ use a 8 bit index. So let's use a 12 bit index for SImode only and
+ hope that arm_gen_constant will enable ldrb to use more bits. */
+ bits = (mode == SImode) ? 12 : 8;
+ mask = (1 << bits) - 1;
+ base = INTVAL (x) & ~mask;
+ index = INTVAL (x) & mask;
+ if (bit_count (base & 0xffffffff) > (32 - bits)/2)
+ {
+ /* It'll most probably be more efficient to generate the base
+ with more bits set and use a negative index instead. */
+ base |= mask;
+ index -= mask;
+ }
+ base_reg = force_reg (SImode, GEN_INT (base));
+ x = plus_constant (base_reg, index);
+ }
+ /* APPLE LOCAL end ARM addresses involving large constants */
+
+ return x;
+}
+
+
+/* Try machine-dependent ways of modifying an illegitimate Thumb address
+ to be legitimate. If we find one, return the new, valid address. */
+rtx
+thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
+{
+ if (arm_tls_symbol_p (x))
+ return legitimize_tls_address (x, NULL_RTX);
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
+ || INTVAL (XEXP (x, 1)) < 0))
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+ HOST_WIDE_INT offset = INTVAL (xop1);
+
+ /* Try and fold the offset into a biasing of the base register and
+ then offsetting that. Don't do this when optimizing for space
+ since it can cause too many CSEs. */
+ if (optimize_size && offset >= 0
+ && offset < 256 + 31 * GET_MODE_SIZE (mode))
+ {
+ HOST_WIDE_INT delta;
+
+ if (offset >= 256)
+ delta = offset - (256 - GET_MODE_SIZE (mode));
+ else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
+ delta = 31 * GET_MODE_SIZE (mode);
+ else
+ delta = offset & (~31 * GET_MODE_SIZE (mode));
+
+ xop0 = force_operand (plus_constant (xop0, offset - delta),
+ NULL_RTX);
+ x = plus_constant (xop0, delta);
+ }
+ else if (offset < 0 && offset > -256)
+ /* Small negative offsets are best done with a subtract before the
+ dereference, forcing these into a register normally takes two
+ instructions. */
+ x = force_operand (x, NULL_RTX);
+ else
+ {
+ /* For the remaining cases, force the constant into a register. */
+ xop1 = force_reg (SImode, xop1);
+ x = gen_rtx_PLUS (SImode, xop0, xop1);
+ }
+ }
+ else if (GET_CODE (x) == PLUS
+ && s_register_operand (XEXP (x, 1), SImode)
+ && !s_register_operand (XEXP (x, 0), SImode))
+ {
+ rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
+
+ x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
+ }
+
+ if (flag_pic)
+ {
+ /* We need to find and carefully transform any SYMBOL and LABEL
+ references; so go back to the original address expression. */
+ rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
+
+ if (new_x != orig_x)
+ x = new_x;
+ }
+
+ return x;
+}
+
+rtx
+thumb_legitimize_reload_address (rtx *x_p,
+ enum machine_mode mode,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED)
+{
+ rtx x = *x_p;
+
+ if (GET_CODE (x) == PLUS
+ && GET_MODE_SIZE (mode) < 4
+ && REG_P (XEXP (x, 0))
+ && XEXP (x, 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
+ {
+ rtx orig_x = x;
+
+ x = copy_rtx (x);
+ push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
+ Pmode, VOIDmode, 0, 0, opnum, type);
+ return x;
+ }
+
+ /* If both registers are hi-regs, then it's better to reload the
+ entire expression rather than each register individually. That
+ only requires one reload register rather than two. */
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && REG_P (XEXP (x, 1))
+ && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
+ && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
+ {
+ rtx orig_x = x;
+
+ x = copy_rtx (x);
+ push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
+ Pmode, VOIDmode, 0, 0, opnum, type);
+ return x;
+ }
+
+ return NULL;
+}
+
+/* Test for various thread-local symbols. */
+
+/* Return TRUE if X is a thread-local symbol. */
+
+static bool
+arm_tls_symbol_p (rtx x)
+{
+ if (! TARGET_HAVE_TLS)
+ return false;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return false;
+
+ return SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+/* Helper for arm_tls_referenced_p. */
+
+static int
+arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (*x) == SYMBOL_REF)
+ return SYMBOL_REF_TLS_MODEL (*x) != 0;
+
+ /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
+ TLS offsets, not real symbol references. */
+ if (GET_CODE (*x) == UNSPEC
+ && XINT (*x, 1) == UNSPEC_TLS)
+ return -1;
+
+ return 0;
+}
+
+/* Return TRUE if X contains any TLS symbol references. */
+
+bool
+arm_tls_referenced_p (rtx x)
+{
+ if (! TARGET_HAVE_TLS)
+ return false;
+
+ return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
+}
+
+/* APPLE LOCAL begin ARM -mdynamic-no-pic support */
+static bool
+arm_cannot_force_const_mem (rtx x)
+{
+ return arm_tls_referenced_p (x)
+ || ! LEGITIMATE_INDIRECT_OPERAND_P (x);
+}
+/* APPLE LOCAL end ARM -mdynamic-no-pic support */
+
+#define REG_OR_SUBREG_REG(X) \
+ (GET_CODE (X) == REG \
+ || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
+
+#define REG_OR_SUBREG_RTX(X) \
+ (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
+
+#ifndef COSTS_N_INSNS
+#define COSTS_N_INSNS(N) ((N) * 4 - 2)
+#endif
+static inline int
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ switch (code)
+ {
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATERT:
+ case PLUS:
+ case MINUS:
+ case COMPARE:
+ case NEG:
+ case NOT:
+ return COSTS_N_INSNS (1);
+
+ case MULT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ int cycles = 0;
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
+
+ while (i)
+ {
+ i >>= 2;
+ cycles++;
+ }
+ return COSTS_N_INSNS (2) + cycles;
+ }
+ return COSTS_N_INSNS (1) + 16;
+
+ case SET:
+ return (COSTS_N_INSNS (1)
+ + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
+ + GET_CODE (SET_DEST (x)) == MEM));
+
+ case CONST_INT:
+ if (outer == SET)
+ {
+ if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
+ return 0;
+ if (thumb_shiftable_const (INTVAL (x)))
+ return COSTS_N_INSNS (2);
+ return COSTS_N_INSNS (3);
+ }
+ else if ((outer == PLUS || outer == COMPARE)
+ && INTVAL (x) < 256 && INTVAL (x) > -256)
+ return 0;
+ else if (outer == AND
+ && INTVAL (x) < 256 && INTVAL (x) >= -256)
+ return COSTS_N_INSNS (1);
+ else if (outer == ASHIFT || outer == ASHIFTRT
+ || outer == LSHIFTRT)
+ return 0;
+ return COSTS_N_INSNS (2);
+
+ case CONST:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return COSTS_N_INSNS (3);
+
+ case UDIV:
+ case UMOD:
+ case DIV:
+ case MOD:
+ return 100;
+
+ case TRUNCATE:
+ return 99;
+
+ case AND:
+ case XOR:
+ case IOR:
+ /* XXX guess. */
+ return 8;
+
+ case MEM:
+ /* XXX another guess. */
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
+ ? 4 : 0));
+
+ case IF_THEN_ELSE:
+ /* XXX a guess. */
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case ZERO_EXTEND:
+ /* XXX still guessing. */
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ default:
+ return 99;
+ }
+
+ default:
+ return 99;
+ }
+}
+
+/* APPLE LOCAL begin ARM size variant of thumb costs */
+/* This is very much a work in progress; it is just thumb_rtx_costs
+ with modifications for size as discovered. Currently, the costs
+ for MULT, AND, XOR, IOR have been fixed; all of these are single
+ instructions. (Not for DImode, but that's not taken into account
+ anywhere here.) */
+
+static inline int
+thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ switch (code)
+ {
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATERT:
+ case PLUS:
+ case MINUS:
+ case COMPARE:
+ case NEG:
+ case NOT:
+ case AND:
+ case XOR:
+ case IOR:
+ case MULT:
+ return COSTS_N_INSNS (1);
+
+ case SET:
+ return (COSTS_N_INSNS (1)
+ + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
+ + GET_CODE (SET_DEST (x)) == MEM));
+
+ case CONST_INT:
+ if (outer == SET)
+ {
+ if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
+ return 0;
+ if (thumb_shiftable_const (INTVAL (x)))
+ return COSTS_N_INSNS (2);
+ return COSTS_N_INSNS (3);
+ }
+ else if ((outer == PLUS || outer == COMPARE)
+ && INTVAL (x) < 256 && INTVAL (x) > -256)
+ return 0;
+ else if (outer == AND
+ && INTVAL (x) < 256 && INTVAL (x) >= -256)
+ return COSTS_N_INSNS (1);
+ else if (outer == ASHIFT || outer == ASHIFTRT
+ || outer == LSHIFTRT)
+ return 0;
+ return COSTS_N_INSNS (2);
+
+ case CONST:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return COSTS_N_INSNS (3);
+
+ case UDIV:
+ case UMOD:
+ case DIV:
+ case MOD:
+ return 100;
+
+ case TRUNCATE:
+ return 99;
+
+ case MEM:
+ /* XXX another guess. */
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
+ ? 4 : 0));
+
+ case IF_THEN_ELSE:
+ /* XXX a guess. */
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case ZERO_EXTEND:
+ /* XXX still guessing. */
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ default:
+ return 99;
+ }
+
+ default:
+ return 99;
+ }
+}
+/* APPLE LOCAL end ARM size variant of thumb costs */
+
+/* Worker routine for arm_rtx_costs. */
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* ??? This needs updating for thumb2. */
+static inline int
+arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code subcode;
+ int extra_cost;
+
+ switch (code)
+ {
+ case MEM:
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + (GET_CODE (x) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
+
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ return optimize_size ? COSTS_N_INSNS (2) : 100;
+
+ case ROTATE:
+ if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ return 4;
+ /* Fall through */
+ case ROTATERT:
+ if (mode != SImode)
+ return 8;
+ /* Fall through */
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ if (mode == DImode)
+ return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
+ + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 8));
+ return (1 + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 4)
+ + ((GET_CODE (XEXP (x, 1)) == REG
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT))
+ ? 0 : 4));
+
+ case MINUS:
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (GET_CODE (XEXP (x, 1)) == MULT && mode == SImode && arm_arch_thumb2)
+ {
+ extra_cost = rtx_cost (XEXP (x, 1), code);
+ if (!REG_OR_SUBREG_REG (XEXP (x, 0)))
+ extra_cost += 4 * ARM_NUM_REGS (mode);
+ return extra_cost;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ if (mode == DImode)
+ return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
+ ? 0 : 8));
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && arm_const_double_rtx (XEXP (x, 1))))
+ ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
+ && arm_const_double_rtx (XEXP (x, 0))))
+ ? 0 : 8));
+
+ if (((GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))
+ && REG_OR_SUBREG_REG (XEXP (x, 1))))
+ || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
+ || subcode == ASHIFTRT || subcode == LSHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
+ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
+ && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
+ || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ && REG_OR_SUBREG_REG (XEXP (x, 0))))
+ return 1;
+ /* Fall through */
+
+ case PLUS:
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ extra_cost = rtx_cost (XEXP (x, 0), code);
+ if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
+ extra_cost += 4 * ARM_NUM_REGS (mode);
+ return extra_cost;
+ }
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && arm_const_double_rtx (XEXP (x, 1))))
+ ? 0 : 8));
+
+ /* Fall through */
+ case AND: case XOR: case IOR:
+ extra_cost = 0;
+
+ /* Normally the frame registers will be spilt into reg+const during
+ reload, so it is a bad idea to combine them with other instructions,
+ since then they might not be moved outside of loops. As a compromise
+ we allow integration with ops that have a constant as their second
+ operand. */
+ if ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
+ && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
+ extra_cost = 4;
+
+ if (mode == DImode)
+ return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
+ ? 0 : 8));
+
+ if (REG_OR_SUBREG_REG (XEXP (x, 0)))
+ return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
+ ? 0 : 4));
+
+ /* APPLE LOCAL begin ARM 4652753 */
+ /* If the previous insn feeds into the shifted operand of this one,
+ there is a 1 cycle delay. We can't tell here whether this will
+ be the case or not. Model it for now, as this seems to lead to
+ better decisions about splitting up multiply-by-constant. */
+ else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
+ return (1 + extra_cost
+ + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
+ || subcode == LSHIFTRT || subcode == ASHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
+ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
+ && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
+ ? 1 : 4));
+ /* APPLE LOCAL end ARM 4652753 */
+
+ return 8;
+
+ case MULT:
+ /* This should have been handled by the CPU specific routines. */
+ gcc_unreachable ();
+
+ case TRUNCATE:
+ if (arm_arch3m && mode == SImode
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
+ return 8;
+ return 99;
+
+ case NEG:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
+ /* Fall through */
+ case NOT:
+ if (mode == DImode)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case COMPARE:
+ return 1;
+
+ case ABS:
+ return 4 + (mode == DImode ? 4 : 0);
+
+ case SIGN_EXTEND:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? value extensions are cheaper on armv6. */
+ if (GET_MODE (XEXP (x, 0)) == QImode)
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ /* Fall through */
+ case ZERO_EXTEND:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case V8QImode:
+ case V4HImode:
+ case V2SImode:
+ case V4QImode:
+ case V2HImode:
+ return 1;
+
+ default:
+ gcc_unreachable ();
+ }
+ gcc_unreachable ();
+
+ case CONST_INT:
+ if (const_ok_for_arm (INTVAL (x)))
+ return outer == SET ? 2 : -1;
+ else if (outer == AND
+ && const_ok_for_arm (~INTVAL (x)))
+ return -1;
+ else if ((outer == COMPARE
+ || outer == PLUS || outer == MINUS)
+ && const_ok_for_arm (-INTVAL (x)))
+ return -1;
+ else
+ return 5;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return 6;
+
+ case CONST_DOUBLE:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (arm_const_double_rtx (x) || vfp3_const_double_rtx (x))
+ return outer == SET ? 2 : -1;
+ else if ((outer == COMPARE || outer == PLUS)
+ && neg_const_double_rtx_ok_for_fpa (x))
+ return -1;
+ return 7;
+
+ default:
+ return 99;
+ }
+}
+
+/* RTX costs when optimizing for size. */
+static bool
+arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ if (TARGET_THUMB)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ *total = thumb1_size_rtx_costs (x, code, outer_code);
+ return true;
+ }
+
+ switch (code)
+ {
+ case MEM:
+ /* A memory access costs 1 insn if the mode is small, or the address is
+ a single register, otherwise it costs one insn per word. */
+ if (REG_P (XEXP (x, 0)))
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
+ return true;
+
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ /* Needs a libcall, so it costs about this. */
+ *total = COSTS_N_INSNS (2);
+ return false;
+
+ case ROTATE:
+ if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ {
+ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
+ return true;
+ }
+ /* Fall through */
+ case ROTATERT:
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
+ return true;
+ }
+ else if (mode == SImode)
+ {
+ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
+ /* Slightly disparage register shifts, but not by much. */
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += 1 + rtx_cost (XEXP (x, 1), code);
+ return true;
+ }
+
+ /* Needs a libcall. */
+ *total = COSTS_N_INSNS (2);
+ return false;
+
+ case MINUS:
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+
+ if (mode == SImode)
+ {
+ enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
+ enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
+
+ if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
+ || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
+ || subcode1 == ROTATE || subcode1 == ROTATERT
+ || subcode1 == ASHIFT || subcode1 == LSHIFTRT
+ || subcode1 == ASHIFTRT)
+ {
+ /* It's just the cost of the two operands. */
+ *total = 0;
+ return false;
+ }
+
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+
+ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
+ return false;
+
+ case PLUS:
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+
+ /* Fall through */
+ case AND: case XOR: case IOR:
+ if (mode == SImode)
+ {
+ enum rtx_code subcode = GET_CODE (XEXP (x, 0));
+
+ if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
+ || subcode == LSHIFTRT || subcode == ASHIFTRT
+ || (code == AND && subcode == NOT))
+ {
+ /* It's just the cost of the two operands. */
+ *total = 0;
+ return false;
+ }
+ }
+
+ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
+ return false;
+
+ /* APPLE LOCAL begin DImode multiply enhancement */
+ case MULT:
+ if (mode == DImode)
+ {
+ if (((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ && GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
+ || (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ && GET_CODE (XEXP (x, 1)) == ZERO_EXTEND))
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+ && GET_MODE (XEXP (XEXP (x, 1), 0)) == SImode)
+ {
+ /* SMULL, etc., do sign extend better than free */
+ *total = COSTS_N_INSNS (1)
+ + rtx_cost (XEXP (XEXP (x, 0), 0), MULT)
+ + rtx_cost (XEXP (XEXP (x, 1), 0), MULT);
+ return true;
+ }
+ else
+ {
+ /* broken into 3 insns later, plus cost of kids */
+ /** does not allow for Cirrus instruction **/
+ *total = COSTS_N_INSNS (3);
+ return false;
+ }
+ }
+ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
+ return false;
+ /* APPLE LOCAL end DImode multiply enhancement */
+
+ case NEG:
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ *total = COSTS_N_INSNS (1);
+ /* Fall through */
+ case NOT:
+ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
+
+ return false;
+
+ case IF_THEN_ELSE:
+ *total = 0;
+ return false;
+
+ case COMPARE:
+ if (cc_register (XEXP (x, 0), VOIDmode))
+ * total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case ABS:
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
+ return false;
+
+ case SIGN_EXTEND:
+ *total = 0;
+ if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
+ {
+ if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
+ *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
+ }
+ if (mode == DImode)
+ *total += COSTS_N_INSNS (1);
+ return false;
+
+ case ZERO_EXTEND:
+ *total = 0;
+ if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
+ {
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ *total += COSTS_N_INSNS (1);
+ break;
+
+ case HImode:
+ *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
+
+ case SImode:
+ break;
+
+ default:
+ *total += COSTS_N_INSNS (2);
+ }
+ }
+
+ if (mode == DImode)
+ *total += COSTS_N_INSNS (1);
+
+ return false;
+
+ case CONST_INT:
+ if (const_ok_for_arm (INTVAL (x)))
+ *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
+ else if (const_ok_for_arm (~INTVAL (x)))
+ *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
+ else if (const_ok_for_arm (-INTVAL (x)))
+ {
+ if (outer_code == COMPARE || outer_code == PLUS
+ || outer_code == MINUS)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ else
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case CONST_DOUBLE:
+ *total = COSTS_N_INSNS (4);
+ return true;
+
+ default:
+ if (mode != VOIDmode)
+ *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
+ else
+ *total = COSTS_N_INSNS (4); /* How knows? */
+ return false;
+ }
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* RTX costs for cores with a slow MUL implementation. Thumb-2 is not
+ supported on any "slowmul" cores, so it can be ignored. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+static bool
+arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ if (TARGET_THUMB)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ *total = thumb1_rtx_costs (x, code, outer_code);
+ return true;
+ }
+
+ switch (code)
+ {
+ case MULT:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ {
+ *total = 30;
+ return true;
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int cost, const_ok = const_ok_for_arm (i);
+ int j, booth_unit_size;
+
+ /* Tune as appropriate. */
+ cost = const_ok ? 4 : 8;
+ booth_unit_size = 2;
+ for (j = 0; i && j < 32; j += booth_unit_size)
+ {
+ i >>= booth_unit_size;
+ cost += 2;
+ }
+
+ *total = cost;
+ return true;
+ }
+
+ *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
+ return true;
+
+ default:
+ *total = arm_rtx_costs_1 (x, code, outer_code);
+ return true;
+ }
+}
+
+
+/* RTX cost for cores with a fast multiply unit (M variants). */
+
+static bool
+arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ *total = thumb1_rtx_costs (x, code, outer_code);
+ return true;
+ }
+
+ /* ??? should thumb2 use different costs? */
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ switch (code)
+ {
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all. */
+ if (mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ {
+ *total = 8;
+ return true;
+ }
+
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ {
+ *total = 30;
+ return true;
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int cost, const_ok = const_ok_for_arm (i);
+ int j, booth_unit_size;
+
+ /* Tune as appropriate. */
+ cost = const_ok ? 4 : 8;
+ booth_unit_size = 8;
+ for (j = 0; i && j < 32; j += booth_unit_size)
+ {
+ i >>= booth_unit_size;
+ cost += 2;
+ }
+
+ *total = cost;
+ return true;
+ }
+
+ *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
+ return true;
+
+ default:
+ *total = arm_rtx_costs_1 (x, code, outer_code);
+ return true;
+ }
+}
+
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* RTX cost for XScale CPUs. Thumb-2 is not supported on any xscale cores,
+ so it can be ignored. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+static bool
+arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ if (TARGET_THUMB)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ *total = thumb1_rtx_costs (x, code, outer_code);
+ return true;
+ }
+
+ switch (code)
+ {
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all. */
+ if (mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ {
+ *total = 8;
+ return true;
+ }
+
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ {
+ *total = 30;
+ return true;
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int cost, const_ok = const_ok_for_arm (i);
+ unsigned HOST_WIDE_INT masked_const;
+
+ /* The cost will be related to two insns.
+ First a load of the constant (MOV or LDR), then a multiply. */
+ cost = 2;
+ if (! const_ok)
+ cost += 1; /* LDR is probably more expensive because
+ of longer result latency. */
+ masked_const = i & 0xffff8000;
+ if (masked_const != 0 && masked_const != 0xffff8000)
+ {
+ masked_const = i & 0xf8000000;
+ if (masked_const == 0 || masked_const == 0xf8000000)
+ cost += 1;
+ else
+ cost += 2;
+ }
+ *total = cost;
+ return true;
+ }
+
+ *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
+ return true;
+
+ case COMPARE:
+ /* A COMPARE of a MULT is slow on XScale; the muls instruction
+ will stall until the multiplication is complete. */
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 4 + rtx_cost (XEXP (x, 0), code);
+ else
+ *total = arm_rtx_costs_1 (x, code, outer_code);
+ return true;
+
+ default:
+ *total = arm_rtx_costs_1 (x, code, outer_code);
+ return true;
+ }
+}
+
+
+/* RTX costs for 9e (and later) cores. */
+
+static bool
+arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+ int nonreg_cost;
+ int cost;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ switch (code)
+ {
+ case MULT:
+ *total = COSTS_N_INSNS (3);
+ return true;
+
+ default:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ *total = thumb1_rtx_costs (x, code, outer_code);
+ return true;
+ }
+ }
+
+ switch (code)
+ {
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all. */
+ if (mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ {
+ *total = 3;
+ return true;
+ }
+
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ *total = 30;
+ return true;
+ }
+ if (mode == DImode)
+ {
+ cost = 7;
+ nonreg_cost = 8;
+ }
+ else
+ {
+ cost = 2;
+ nonreg_cost = 4;
+ }
+
+
+ *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
+ return true;
+
+ default:
+ *total = arm_rtx_costs_1 (x, code, outer_code);
+ return true;
+ }
+}
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+static inline int
+arm_arm_address_cost (rtx x)
+{
+ enum rtx_code c = GET_CODE (x);
+
+ if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
+ return 0;
+ if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
+ return 10;
+
+ if (c == PLUS || c == MINUS)
+ {
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ return 2;
+
+ if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
+ return 3;
+
+ return 4;
+ }
+
+ return 6;
+}
+
+static inline int
+arm_thumb_address_cost (rtx x)
+{
+ enum rtx_code c = GET_CODE (x);
+
+ if (c == REG)
+ return 1;
+ if (c == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return 1;
+
+ return 2;
+}
+
+static int
+arm_address_cost (rtx x)
+{
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ return TARGET_32BIT ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
+}
+
+static int
+arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
+{
+ rtx i_pat, d_pat;
+
+ /* Some true dependencies can have a higher cost depending
+ on precisely how certain input operands are used. */
+ if (arm_tune_xscale
+ && REG_NOTE_KIND (link) == 0
+ && recog_memoized (insn) >= 0
+ && recog_memoized (dep) >= 0)
+ {
+ int shift_opnum = get_attr_shift (insn);
+ enum attr_type attr_type = get_attr_type (dep);
+
+ /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
+ operand for INSN. If we have a shifted input operand and the
+ instruction we depend on is another ALU instruction, then we may
+ have to account for an additional stall. */
+ if (shift_opnum != 0
+ && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
+ {
+ rtx shifted_operand;
+ int opno;
+
+ /* Get the shifted operand. */
+ extract_insn (insn);
+ shifted_operand = recog_data.operand[shift_opnum];
+
+ /* Iterate over all the operands in DEP. If we write an operand
+ that overlaps with SHIFTED_OPERAND, then we have increase the
+ cost of this dependency. */
+ extract_insn (dep);
+ preprocess_constraints ();
+ for (opno = 0; opno < recog_data.n_operands; opno++)
+ {
+ /* We can ignore strict inputs. */
+ if (recog_data.operand_type[opno] == OP_IN)
+ continue;
+
+ if (reg_overlap_mentioned_p (recog_data.operand[opno],
+ shifted_operand))
+ return 2;
+ }
+ }
+ }
+
+ /* XXX This is not strictly true for the FPA. */
+ if (REG_NOTE_KIND (link) == REG_DEP_ANTI
+ || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
+ return 0;
+
+ /* Call insns don't incur a stall, even if they follow a load. */
+ if (REG_NOTE_KIND (link) == 0
+ && GET_CODE (insn) == CALL_INSN)
+ return 1;
+
+ if ((i_pat = single_set (insn)) != NULL
+ && GET_CODE (SET_SRC (i_pat)) == MEM
+ && (d_pat = single_set (dep)) != NULL
+ && GET_CODE (SET_DEST (d_pat)) == MEM)
+ {
+ rtx src_mem = XEXP (SET_SRC (i_pat), 0);
+ /* This is a load after a store, there is no conflict if the load reads
+ from a cached area. Assume that loads from the stack, and from the
+ constant pool are cached, and that others will miss. This is a
+ hack. */
+
+ if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
+ || reg_mentioned_p (stack_pointer_rtx, src_mem)
+ || reg_mentioned_p (frame_pointer_rtx, src_mem)
+ || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
+ return 1;
+ }
+
+ return cost;
+}
+
+static int fp_consts_inited = 0;
+
+/* Only zero is valid for VFP. Other values are also valid for FPA. */
+static const char * const strings_fp[8] =
+{
+ "0", "1", "2", "3",
+ "4", "5", "0.5", "10"
+};
+
+static REAL_VALUE_TYPE values_fp[8];
+
+static void
+init_fp_table (void)
+{
+ int i;
+ REAL_VALUE_TYPE r;
+
+ if (TARGET_VFP)
+ fp_consts_inited = 1;
+ else
+ fp_consts_inited = 8;
+
+ for (i = 0; i < fp_consts_inited; i++)
+ {
+ r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
+ values_fp[i] = r;
+ }
+}
+
+/* Return TRUE if rtx X is a valid immediate FP constant. */
+int
+arm_const_double_rtx (rtx x)
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fp_consts_inited)
+ init_fp_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < fp_consts_inited; i++)
+ if (REAL_VALUES_EQUAL (r, values_fp[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPA constant. */
+int
+neg_const_double_rtx_ok_for_fpa (rtx x)
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fp_consts_inited)
+ init_fp_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fp[i]))
+ return 1;
+
+ return 0;
+}
+
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* VFPv3 has a fairly wide range of representable immediates, formed from
+ "quarter-precision" floating-point values. These can be evaluated using this
+ formula (with ^ for exponentiation):
+
+ -1^s * n * 2^-r
+
+ Where 's' is a sign bit (0/1), 'n' and 'r' are integers such that
+ 16 <= n <= 31 and 0 <= r <= 7.
+
+ These values are mapped onto an 8-bit integer ABCDEFGH s.t.
+
+ - A (most-significant) is the sign bit.
+ - BCD are the exponent (encoded as r XOR 3).
+ - EFGH are the mantissa (encoded as n - 16).
+*/
+
+/* Return an integer index for a VFPv3 immediate operand X suitable for the
+ fconst[sd] instruction, or -1 if X isn't suitable. */
+static int
+vfp3_const_double_index (rtx x)
+{
+ REAL_VALUE_TYPE r, m;
+ int sign, exponent;
+ unsigned HOST_WIDE_INT mantissa, mant_hi;
+ unsigned HOST_WIDE_INT mask;
+ HOST_WIDE_INT m1, m2;
+ int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
+
+ if (!TARGET_VFP3 || GET_CODE (x) != CONST_DOUBLE)
+ return -1;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+
+ /* We can't represent these things, so detect them first. */
+ if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) || REAL_VALUE_MINUS_ZERO (r))
+ return -1;
+
+ /* Extract sign, exponent and mantissa. */
+ sign = REAL_VALUE_NEGATIVE (r) ? 1 : 0;
+ r = REAL_VALUE_ABS (r);
+ exponent = REAL_EXP (&r);
+ /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
+ highest (sign) bit, with a fixed binary point at bit point_pos.
+ WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
+ bits for the mantissa, this may fail (low bits would be lost). */
+ real_ldexp (&m, &r, point_pos - exponent);
+ REAL_VALUE_TO_INT (&m1, &m2, m);
+ mantissa = m1;
+ mant_hi = m2;
+
+ /* If there are bits set in the low part of the mantissa, we can't
+ represent this value. */
+ if (mantissa != 0)
+ return -1;
+
+ /* Now make it so that mantissa contains the most-significant bits, and move
+ the point_pos to indicate that the least-significant bits have been
+ discarded. */
+ point_pos -= HOST_BITS_PER_WIDE_INT;
+ mantissa = mant_hi;
+
+ /* We can permit four significant bits of mantissa only, plus a high bit
+ which is always 1. */
+ mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
+ if ((mantissa & mask) != 0)
+ return -1;
+
+ /* Now we know the mantissa is in range, chop off the unneeded bits. */
+ mantissa >>= point_pos - 5;
+
+ /* The mantissa may be zero. Disallow that case. (It's possible to load the
+ floating-point immediate zero with Neon using an integer-zero load, but
+ that case is handled elsewhere.) */
+ if (mantissa == 0)
+ return -1;
+
+ gcc_assert (mantissa >= 16 && mantissa <= 31);
+
+ /* The value of 5 here would be 4 if GCC used IEEE754-like encoding (where
+ normalised significands are in the range [1, 2). (Our mantissa is shifted
+ left 4 places at this point relative to normalised IEEE754 values). GCC
+ internally uses [0.5, 1) (see real.c), so the exponent returned from
+ REAL_EXP must be altered. */
+ exponent = 5 - exponent;
+
+ if (exponent < 0 || exponent > 7)
+ return -1;
+
+ /* Sign, mantissa and exponent are now in the correct form to plug into the
+ formulae described in the comment above. */
+ return (sign << 7) | ((exponent ^ 3) << 4) | (mantissa - 16);
+}
+
+/* Return TRUE if rtx X is a valid immediate VFPv3 constant. */
+int
+vfp3_const_double_rtx (rtx x)
+{
+ if (!TARGET_VFP3)
+ return 0;
+
+ return vfp3_const_double_index (x) != -1;
+}
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Recognize immediates which can be used in various Neon instructions. Legal
+ immediates are described by the following table (for VMVN variants, the
+ bitwise inverse of the constant shown is recognized. In either case, VMOV
+ is output and the correct instruction to use for a given constant is chosen
+ by the assembler). The constant shown is replicated across all elements of
+ the destination vector.
+
+ insn elems variant constant (binary)
+ ---- ----- ------- -----------------
+ vmov i32 0 00000000 00000000 00000000 abcdefgh
+ vmov i32 1 00000000 00000000 abcdefgh 00000000
+ vmov i32 2 00000000 abcdefgh 00000000 00000000
+ vmov i32 3 abcdefgh 00000000 00000000 00000000
+ vmov i16 4 00000000 abcdefgh
+ vmov i16 5 abcdefgh 00000000
+ vmvn i32 6 00000000 00000000 00000000 abcdefgh
+ vmvn i32 7 00000000 00000000 abcdefgh 00000000
+ vmvn i32 8 00000000 abcdefgh 00000000 00000000
+ vmvn i32 9 abcdefgh 00000000 00000000 00000000
+ vmvn i16 10 00000000 abcdefgh
+ vmvn i16 11 abcdefgh 00000000
+ vmov i32 12 00000000 00000000 abcdefgh 11111111
+ vmvn i32 13 00000000 00000000 abcdefgh 11111111
+ vmov i32 14 00000000 abcdefgh 11111111 11111111
+ vmvn i32 15 00000000 abcdefgh 11111111 11111111
+ vmov i8 16 abcdefgh
+ vmov i64 17 aaaaaaaa bbbbbbbb cccccccc dddddddd
+ eeeeeeee ffffffff gggggggg hhhhhhhh
+ vmov f32 18 aBbbbbbc defgh000 00000000 00000000
+
+ For case 18, B = !b. Representable values are exactly those accepted by
+ vfp3_const_double_index, but are output as floating-point numbers rather
+ than indices.
+
+ Variants 0-5 (inclusive) may also be used as immediates for the second
+ operand of VORR/VBIC instructions.
+
+ The INVERSE argument causes the bitwise inverse of the given operand to be
+ recognized instead (used for recognizing legal immediates for the VAND/VORN
+ pseudo-instructions). If INVERSE is true, the value placed in *MODCONST is
+ *not* inverted (i.e. the pseudo-instruction forms vand/vorn should still be
+ output, rather than the real insns vbic/vorr).
+
+ INVERSE makes no difference to the recognition of float vectors.
+
+ The return value is the variant of immediate as shown in the above table, or
+ -1 if the given value doesn't match any of the listed patterns.
+*/
+static int
+neon_valid_immediate (rtx op, enum machine_mode mode, int inverse,
+ rtx *modconst, int *elementwidth)
+{
+#define CHECK(STRIDE, ELSIZE, CLASS, TEST) \
+ matches = 1; \
+ for (i = 0; i < idx; i += (STRIDE)) \
+ if (!(TEST)) \
+ matches = 0; \
+ if (matches) \
+ { \
+ immtype = (CLASS); \
+ elsize = (ELSIZE); \
+ break; \
+ }
+
+ unsigned int i, elsize, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
+ unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned char bytes[16];
+ int immtype = -1, matches;
+ unsigned int invmask = inverse ? 0xff : 0;
+
+ /* Vectors of float constants. */
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ {
+ rtx el0 = CONST_VECTOR_ELT (op, 0);
+ REAL_VALUE_TYPE r0;
+
+ if (!vfp3_const_double_rtx (el0))
+ return -1;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r0, el0);
+
+ for (i = 1; i < n_elts; i++)
+ {
+ rtx elt = CONST_VECTOR_ELT (op, i);
+ REAL_VALUE_TYPE re;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (re, elt);
+
+ if (!REAL_VALUES_EQUAL (r0, re))
+ return -1;
+ }
+
+ if (modconst)
+ *modconst = CONST_VECTOR_ELT (op, 0);
+
+ if (elementwidth)
+ *elementwidth = 0;
+
+ return 18;
+ }
+
+ /* Splat vector constant out into a byte vector. */
+ for (i = 0; i < n_elts; i++)
+ {
+ rtx el = CONST_VECTOR_ELT (op, i);
+ unsigned HOST_WIDE_INT elpart;
+ unsigned int part, parts;
+
+ if (GET_CODE (el) == CONST_INT)
+ {
+ elpart = INTVAL (el);
+ parts = 1;
+ }
+ else if (GET_CODE (el) == CONST_DOUBLE)
+ {
+ elpart = CONST_DOUBLE_LOW (el);
+ parts = 2;
+ }
+ else
+ gcc_unreachable ();
+
+ for (part = 0; part < parts; part++)
+ {
+ unsigned int byte;
+ for (byte = 0; byte < innersize; byte++)
+ {
+ bytes[idx++] = (elpart & 0xff) ^ invmask;
+ elpart >>= BITS_PER_UNIT;
+ }
+ if (GET_CODE (el) == CONST_DOUBLE)
+ elpart = CONST_DOUBLE_HIGH (el);
+ }
+ }
+
+ /* Sanity check. */
+ gcc_assert (idx == GET_MODE_SIZE (mode));
+
+ do
+ {
+ CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0);
+
+ CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0);
+
+ CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0);
+
+ CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3]);
+
+ CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0);
+
+ CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1]);
+
+ CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
+
+ CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
+
+ CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff);
+
+ CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3]);
+
+ CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff);
+
+ CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1]);
+
+ CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0);
+
+ CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
+
+ CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0);
+
+ CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff);
+
+ CHECK (1, 8, 16, bytes[i] == bytes[0]);
+
+ CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
+ && bytes[i] == bytes[(i + 8) % idx]);
+ }
+ while (0);
+
+ if (immtype == -1)
+ return -1;
+
+ if (elementwidth)
+ *elementwidth = elsize;
+
+ if (modconst)
+ {
+ unsigned HOST_WIDE_INT imm = 0;
+
+ /* Un-invert bytes of recognized vector, if neccessary. */
+ if (invmask != 0)
+ for (i = 0; i < idx; i++)
+ bytes[i] ^= invmask;
+
+ if (immtype == 17)
+ {
+ /* FIXME: Broken on 32-bit H_W_I hosts. */
+ gcc_assert (sizeof (HOST_WIDE_INT) == 8);
+
+ for (i = 0; i < 8; i++)
+ imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
+ << (i * BITS_PER_UNIT);
+
+ *modconst = GEN_INT (imm);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT imm = 0;
+
+ for (i = 0; i < elsize / BITS_PER_UNIT; i++)
+ imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
+
+ *modconst = GEN_INT (imm);
+ }
+ }
+
+ return immtype;
+#undef CHECK
+}
+
+/* Return TRUE if rtx X is legal for use as either a Neon VMOV (or, implicitly,
+ VMVN) immediate. Write back width per element to *ELEMENTWIDTH (or zero for
+ float elements), and a modified constant (whatever should be output for a
+ VMOV) in *MODCONST. */
+
+int
+neon_immediate_valid_for_move (rtx op, enum machine_mode mode,
+ rtx *modconst, int *elementwidth)
+{
+ rtx tmpconst;
+ int tmpwidth;
+ int retval = neon_valid_immediate (op, mode, 0, &tmpconst, &tmpwidth);
+
+ if (retval == -1)
+ return 0;
+
+ if (modconst)
+ *modconst = tmpconst;
+
+ if (elementwidth)
+ *elementwidth = tmpwidth;
+
+ return 1;
+}
+
+/* Return TRUE if rtx X is legal for use in a VORR or VBIC instruction. If
+ the immediate is valid, write a constant suitable for using as an operand
+ to VORR/VBIC/VAND/VORN to *MODCONST and the corresponding element width to
+ *ELEMENTWIDTH. See neon_valid_immediate for description of INVERSE. */
+
+int
+neon_immediate_valid_for_logic (rtx op, enum machine_mode mode, int inverse,
+ rtx *modconst, int *elementwidth)
+{
+ rtx tmpconst;
+ int tmpwidth;
+ int retval = neon_valid_immediate (op, mode, inverse, &tmpconst, &tmpwidth);
+
+ if (retval < 0 || retval > 5)
+ return 0;
+
+ if (modconst)
+ *modconst = tmpconst;
+
+ if (elementwidth)
+ *elementwidth = tmpwidth;
+
+ return 1;
+}
+
+/* Return a string suitable for output of Neon immediate logic operation
+ MNEM. */
+
+char *
+neon_output_logic_immediate (const char *mnem, rtx *op2, enum machine_mode mode,
+ int inverse, int quad)
+{
+ int width, is_valid;
+ static char templ[40];
+
+ is_valid = neon_immediate_valid_for_logic (*op2, mode, inverse, op2, &width);
+
+ gcc_assert (is_valid != 0);
+
+ if (quad)
+ sprintf (templ, "%s.i%d\t%%q0, %%2", mnem, width);
+ else
+ sprintf (templ, "%s.i%d\t%%P0, %%2", mnem, width);
+
+ return templ;
+}
+
+/* Output a sequence of pairwise operations to implement a reduction.
+ NOTE: We do "too much work" here, because pairwise operations work on two
+ registers-worth of operands in one go. Unfortunately we can't exploit those
+ extra calculations to do the full operation in fewer steps, I don't think.
+ Although all vector elements of the result but the first are ignored, we
+ actually calculate the same result in each of the elements. An alternative
+ such as initially loading a vector with zero to use as each of the second
+ operands would use up an additional register and take an extra instruction,
+ for no particular gain. */
+
+void
+neon_pairwise_reduce (rtx op0, rtx op1, enum machine_mode mode,
+ rtx (*reduc) (rtx, rtx, rtx))
+{
+ enum machine_mode inner = GET_MODE_INNER (mode);
+ unsigned int i, parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (inner);
+ rtx tmpsum = op1;
+
+ for (i = parts / 2; i >= 1; i /= 2)
+ {
+ rtx dest = (i == 1) ? op0 : gen_reg_rtx (mode);
+ emit_insn (reduc (dest, tmpsum, tmpsum));
+ tmpsum = dest;
+ }
+}
+
+/* Initialise a vector with non-constant elements. FIXME: We can do better
+ than the current implementation (building a vector on the stack and then
+ loading it) in many cases. See rs6000.c. */
+
+void
+neon_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner = GET_MODE_INNER (mode);
+ unsigned int i, n_elts = GET_MODE_NUNITS (mode);
+ rtx mem;
+
+ gcc_assert (VECTOR_MODE_P (mode));
+
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ for (i = 0; i < n_elts; i++)
+ emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
+ XVECEXP (vals, 0, i));
+
+ emit_move_insn (target, mem);
+}
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* Predicates for `match_operand' and `match_operator'. */
+
+/* Return nonzero if OP is a valid Cirrus memory address pattern. */
+int
+cirrus_memory_offset (rtx op)
+{
+ /* Reject eliminable registers. */
+ if (! (reload_in_progress || reload_completed)
+ && ( reg_mentioned_p (frame_pointer_rtx, op)
+ || reg_mentioned_p (arg_pointer_rtx, op)
+ || reg_mentioned_p (virtual_incoming_args_rtx, op)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, op)
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
+ || reg_mentioned_p (virtual_stack_vars_rtx, op)))
+ return 0;
+
+ if (GET_CODE (op) == MEM)
+ {
+ rtx ind;
+
+ ind = XEXP (op, 0);
+
+ /* Match: (mem (reg)). */
+ if (GET_CODE (ind) == REG)
+ return 1;
+
+ /* Match:
+ (mem (plus (reg)
+ (const))). */
+ if (GET_CODE (ind) == PLUS
+ && GET_CODE (XEXP (ind, 0)) == REG
+ && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
+ && GET_CODE (XEXP (ind, 1)) == CONST_INT)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Return TRUE if OP is a valid coprocessor memory address pattern.
+ WB is true if full writeback address modes are allowed and is false
+ if limited writeback address modes (POST_INC and PRE_DEC) are
+ allowed. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+int
+arm_coproc_mem_operand (rtx op, bool wb)
+{
+ rtx ind;
+
+ /* Reject eliminable registers. */
+ if (! (reload_in_progress || reload_completed)
+ && ( reg_mentioned_p (frame_pointer_rtx, op)
+ || reg_mentioned_p (arg_pointer_rtx, op)
+ || reg_mentioned_p (virtual_incoming_args_rtx, op)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, op)
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
+ || reg_mentioned_p (virtual_stack_vars_rtx, op)))
+ return FALSE;
+
+ /* Constants are converted into offsets from labels. */
+ if (GET_CODE (op) != MEM)
+ return FALSE;
+
+ ind = XEXP (op, 0);
+
+ if (reload_completed
+ && (GET_CODE (ind) == LABEL_REF
+ || (GET_CODE (ind) == CONST
+ && GET_CODE (XEXP (ind, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
+ return TRUE;
+
+ /* Match: (mem (reg)). */
+ if (GET_CODE (ind) == REG)
+ return arm_address_register_rtx_p (ind, 0);
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Autoincremment addressing modes. POST_INC and PRE_DEC are
+ acceptable in any case (subject to verification by
+ arm_address_register_rtx_p). We need WB to be true to accept
+ PRE_INC and POST_DEC. */
+ if (GET_CODE (ind) == POST_INC
+ || GET_CODE (ind) == PRE_DEC
+ || (wb
+ && (GET_CODE (ind) == PRE_INC
+ || GET_CODE (ind) == POST_DEC)))
+ return arm_address_register_rtx_p (XEXP (ind, 0), 0);
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ if (wb
+ && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
+ && arm_address_register_rtx_p (XEXP (ind, 0), 0)
+ && GET_CODE (XEXP (ind, 1)) == PLUS
+ && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
+ ind = XEXP (ind, 1);
+
+ /* Match:
+ (plus (reg)
+ (const)). */
+ if (GET_CODE (ind) == PLUS
+ && GET_CODE (XEXP (ind, 0)) == REG
+ && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
+ && GET_CODE (XEXP (ind, 1)) == CONST_INT
+ && INTVAL (XEXP (ind, 1)) > -1024
+ && INTVAL (XEXP (ind, 1)) < 1024
+ && (INTVAL (XEXP (ind, 1)) & 3) == 0)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Return TRUE if OP is a memory operand which we can load or store a vector
+ to/from. If CORE is true, we're moving from ARM registers not Neon
+ registers. */
+int
+neon_vector_mem_operand (rtx op, bool core)
+{
+ rtx ind;
+
+ /* Reject eliminable registers. */
+ if (! (reload_in_progress || reload_completed)
+ && ( reg_mentioned_p (frame_pointer_rtx, op)
+ || reg_mentioned_p (arg_pointer_rtx, op)
+ || reg_mentioned_p (virtual_incoming_args_rtx, op)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, op)
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
+ || reg_mentioned_p (virtual_stack_vars_rtx, op)))
+ return FALSE;
+
+ /* Constants are converted into offsets from labels. */
+ if (GET_CODE (op) != MEM)
+ return FALSE;
+
+ ind = XEXP (op, 0);
+
+ if (reload_completed
+ && (GET_CODE (ind) == LABEL_REF
+ || (GET_CODE (ind) == CONST
+ && GET_CODE (XEXP (ind, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
+ return TRUE;
+
+ /* Match: (mem (reg)). */
+ if (GET_CODE (ind) == REG)
+ return arm_address_register_rtx_p (ind, 0);
+
+ /* Allow post-increment with Neon registers. */
+ if (!core && GET_CODE (ind) == POST_INC)
+ return arm_address_register_rtx_p (XEXP (ind, 0), 0);
+
+#if 0
+ /* FIXME: We can support this too if we use VLD1/VST1. */
+ if (!core
+ && GET_CODE (ind) == POST_MODIFY
+ && arm_address_register_rtx_p (XEXP (ind, 0), 0)
+ && GET_CODE (XEXP (ind, 1)) == PLUS
+ && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
+ ind = XEXP (ind, 1);
+#endif
+
+ /* Match:
+ (plus (reg)
+ (const)). */
+ if (!core
+ && GET_CODE (ind) == PLUS
+ && GET_CODE (XEXP (ind, 0)) == REG
+ && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
+ /* APPLE LOCAL begin 6160917 */
+ /* Make call consistent with the ones used in neon_reload_{in,out} */
+ && arm_legitimate_index_p (GET_MODE (op), XEXP (ind, 1), SET, 0))
+ /* APPLE LOCAL end 6160917 */
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if OP is a mem suitable for loading/storing a Neon struct
+ type. */
+int
+neon_struct_mem_operand (rtx op)
+{
+ rtx ind;
+
+ /* Reject eliminable registers. */
+ if (! (reload_in_progress || reload_completed)
+ && ( reg_mentioned_p (frame_pointer_rtx, op)
+ || reg_mentioned_p (arg_pointer_rtx, op)
+ || reg_mentioned_p (virtual_incoming_args_rtx, op)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, op)
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
+ || reg_mentioned_p (virtual_stack_vars_rtx, op)))
+ return FALSE;
+
+ /* Constants are converted into offsets from labels. */
+ if (GET_CODE (op) != MEM)
+ return FALSE;
+
+ ind = XEXP (op, 0);
+
+ if (reload_completed
+ && (GET_CODE (ind) == LABEL_REF
+ || (GET_CODE (ind) == CONST
+ && GET_CODE (XEXP (ind, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
+ return TRUE;
+
+ /* Match: (mem (reg)). */
+ if (GET_CODE (ind) == REG)
+ return arm_address_register_rtx_p (ind, 0);
+
+ return FALSE;
+}
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* Return true if X is a register that will be eliminated later on. */
+int
+arm_eliminable_register (rtx x)
+{
+ return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
+ || REGNO (x) == ARG_POINTER_REGNUM
+ || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (x) <= LAST_VIRTUAL_REGISTER));
+}
+
+/* Return GENERAL_REGS if a scratch register required to reload x to/from
+ coprocessor registers. Otherwise return NO_REGS. */
+
+enum reg_class
+coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
+{
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (TARGET_NEON
+ && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ && neon_vector_mem_operand (x, FALSE))
+ return NO_REGS;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
+ return NO_REGS;
+
+ return GENERAL_REGS;
+}
+
+/* Values which must be returned in the most-significant end of the return
+ register. */
+
+static bool
+arm_return_in_msb (tree valtype)
+{
+ return (TARGET_AAPCS_BASED
+ && BYTES_BIG_ENDIAN
+ && (AGGREGATE_TYPE_P (valtype)
+ || TREE_CODE (valtype) == COMPLEX_TYPE));
+}
+
+/* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
+ Use by the Cirrus Maverick code which has to workaround
+ a hardware bug triggered by such instructions. */
+static bool
+arm_memory_load_p (rtx insn)
+{
+ rtx body, lhs, rhs;;
+
+ if (insn == NULL_RTX || GET_CODE (insn) != INSN)
+ return false;
+
+ body = PATTERN (insn);
+
+ if (GET_CODE (body) != SET)
+ return false;
+
+ lhs = XEXP (body, 0);
+ rhs = XEXP (body, 1);
+
+ lhs = REG_OR_SUBREG_RTX (lhs);
+
+ /* If the destination is not a general purpose
+ register we do not have to worry. */
+ if (GET_CODE (lhs) != REG
+ || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
+ return false;
+
+ /* As well as loads from memory we also have to react
+ to loads of invalid constants which will be turned
+ into loads from the minipool. */
+ return (GET_CODE (rhs) == MEM
+ || GET_CODE (rhs) == SYMBOL_REF
+ || note_invalid_constants (insn, -1, false));
+}
+
+/* Return TRUE if INSN is a Cirrus instruction. */
+static bool
+arm_cirrus_insn_p (rtx insn)
+{
+ enum attr_cirrus attr;
+
+ /* get_attr cannot accept USE or CLOBBER. */
+ if (!insn
+ || GET_CODE (insn) != INSN
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return 0;
+
+ attr = get_attr_cirrus (insn);
+
+ return attr != CIRRUS_NOT;
+}
+
+/* Cirrus reorg for invalid instruction combinations. */
+static void
+cirrus_reorg (rtx first)
+{
+ enum attr_cirrus attr;
+ rtx body = PATTERN (first);
+ rtx t;
+ int nops;
+
+ /* Any branch must be followed by 2 non Cirrus instructions. */
+ if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
+ {
+ nops = 0;
+ t = next_nonnote_insn (first);
+
+ if (arm_cirrus_insn_p (t))
+ ++ nops;
+
+ if (arm_cirrus_insn_p (next_nonnote_insn (t)))
+ ++ nops;
+
+ while (nops --)
+ emit_insn_after (gen_nop (), first);
+
+ return;
+ }
+
+ /* (float (blah)) is in parallel with a clobber. */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
+ body = XVECEXP (body, 0, 0);
+
+ if (GET_CODE (body) == SET)
+ {
+ rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
+
+ /* cfldrd, cfldr64, cfstrd, cfstr64 must
+ be followed by a non Cirrus insn. */
+ if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
+ {
+ if (arm_cirrus_insn_p (next_nonnote_insn (first)))
+ emit_insn_after (gen_nop (), first);
+
+ return;
+ }
+ else if (arm_memory_load_p (first))
+ {
+ unsigned int arm_regno;
+
+ /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
+ ldr/cfmv64hr combination where the Rd field is the same
+ in both instructions must be split with a non Cirrus
+ insn. Example:
+
+ ldr r0, blah
+ nop
+ cfmvsr mvf0, r0. */
+
+ /* Get Arm register number for ldr insn. */
+ if (GET_CODE (lhs) == REG)
+ arm_regno = REGNO (lhs);
+ else
+ {
+ gcc_assert (GET_CODE (rhs) == REG);
+ arm_regno = REGNO (rhs);
+ }
+
+ /* Next insn. */
+ first = next_nonnote_insn (first);
+
+ if (! arm_cirrus_insn_p (first))
+ return;
+
+ body = PATTERN (first);
+
+ /* (float (blah)) is in parallel with a clobber. */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
+ body = XVECEXP (body, 0, 0);
+
+ if (GET_CODE (body) == FLOAT)
+ body = XEXP (body, 0);
+
+ if (get_attr_cirrus (first) == CIRRUS_MOVE
+ && GET_CODE (XEXP (body, 1)) == REG
+ && arm_regno == REGNO (XEXP (body, 1)))
+ emit_insn_after (gen_nop (), first);
+
+ return;
+ }
+ }
+
+ /* get_attr cannot accept USE or CLOBBER. */
+ if (!first
+ || GET_CODE (first) != INSN
+ || GET_CODE (PATTERN (first)) == USE
+ || GET_CODE (PATTERN (first)) == CLOBBER)
+ return;
+
+ attr = get_attr_cirrus (first);
+
+ /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
+ must be followed by a non-coprocessor instruction. */
+ if (attr == CIRRUS_COMPARE)
+ {
+ nops = 0;
+
+ t = next_nonnote_insn (first);
+
+ if (arm_cirrus_insn_p (t))
+ ++ nops;
+
+ if (arm_cirrus_insn_p (next_nonnote_insn (t)))
+ ++ nops;
+
+ while (nops --)
+ emit_insn_after (gen_nop (), first);
+
+ return;
+ }
+}
+
+/* APPLE LOCAL begin ARM -mdynamic-no-pic support */
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (rtx x)
+{
+ return symbol_mentioned_with_filter (x, 0);
+}
+
+/* Return TRUE if X references a non-local SYMBOL_REF. */
+int
+non_local_symbol_mentioned_p (rtx x)
+{
+ return symbol_mentioned_with_filter (x, 1);
+}
+
+/* Return TRUE if X references a SYMBOL_REF. If filter_local is set,
+ then references to local symbols (as per machopic_data_defined_p)
+ are ignored. */
+static int
+symbol_mentioned_with_filter (rtx x, int filter_local)
+{
+ const char * fmt;
+ int i;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+#if TARGET_MACHO
+ if (filter_local && machopic_data_defined_p (x))
+ return 0;
+ else
+#endif
+ return 1;
+ }
+
+ /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
+ are constant offsets, not symbols. */
+ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
+ return 0;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (symbol_mentioned_with_filter (XVECEXP (x, i, j),
+ filter_local))
+ return 1;
+ }
+ else if (fmt[i] == 'e'
+ && symbol_mentioned_with_filter (XEXP (x, i),
+ filter_local))
+ return 1;
+ }
+
+ return 0;
+}
+/* APPLE LOCAL end ARM -mdynmaic-no-pic support */
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (rtx x)
+{
+ const char * fmt;
+ int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return 1;
+
+ /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
+ instruction, but they are constant offsets, not symbols. */
+ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
+ return 0;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+tls_mentioned_p (rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case CONST:
+ return tls_mentioned_p (XEXP (x, 0));
+
+ case UNSPEC:
+ if (XINT (x, 1) == UNSPEC_TLS)
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Must not copy a SET whose source operand is PC-relative. */
+
+static bool
+arm_cannot_copy_insn_p (rtx insn)
+{
+ rtx pat = PATTERN (insn);
+
+ /* APPLE LOCAL begin ARM pic support */
+ if (GET_CODE (pat) == SET)
+ {
+ rtx rhs = SET_SRC (pat);
+ rtx lhs = SET_DEST (pat);
+
+ if (GET_CODE (rhs) == UNSPEC
+ && XINT (rhs, 1) == UNSPEC_PIC_BASE)
+ return TRUE;
+
+ if (GET_CODE (rhs) == MEM
+ && GET_CODE (XEXP (rhs, 0)) == UNSPEC
+ && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
+ return TRUE;
+
+ if (GET_CODE (lhs) == MEM
+ && GET_CODE (XEXP (lhs, 0)) == UNSPEC
+ && XINT (XEXP (lhs, 0), 1) == UNSPEC_PIC_BASE)
+ return TRUE;
+ }
+ /* APPLE LOCAL end ARM pic support */
+
+ if (GET_CODE (pat) == PARALLEL
+ && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
+ {
+ rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
+
+ if (GET_CODE (rhs) == UNSPEC
+ && XINT (rhs, 1) == UNSPEC_PIC_BASE)
+ return TRUE;
+
+ if (GET_CODE (rhs) == MEM
+ && GET_CODE (XEXP (rhs, 0)) == UNSPEC
+ && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+enum rtx_code
+minmax_code (rtx x)
+{
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case SMAX:
+ return GE;
+ case SMIN:
+ return LE;
+ case UMIN:
+ return LEU;
+ case UMAX:
+ return GEU;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return 1 if memory locations are adjacent. */
+int
+adjacent_mem_locations (rtx a, rtx b)
+{
+ /* We don't guarantee to preserve the order of these memory refs. */
+ if (volatile_refs_p (a) || volatile_refs_p (b))
+ return 0;
+
+ if ((GET_CODE (XEXP (a, 0)) == REG
+ || (GET_CODE (XEXP (a, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
+ && (GET_CODE (XEXP (b, 0)) == REG
+ || (GET_CODE (XEXP (b, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ {
+ HOST_WIDE_INT val0 = 0, val1 = 0;
+ rtx reg0, reg1;
+ int val_diff;
+
+ if (GET_CODE (XEXP (a, 0)) == PLUS)
+ {
+ reg0 = XEXP (XEXP (a, 0), 0);
+ val0 = INTVAL (XEXP (XEXP (a, 0), 1));
+ }
+ else
+ reg0 = XEXP (a, 0);
+
+ if (GET_CODE (XEXP (b, 0)) == PLUS)
+ {
+ reg1 = XEXP (XEXP (b, 0), 0);
+ val1 = INTVAL (XEXP (XEXP (b, 0), 1));
+ }
+ else
+ reg1 = XEXP (b, 0);
+
+ /* Don't accept any offset that will require multiple
+ instructions to handle, since this would cause the
+ arith_adjacentmem pattern to output an overlong sequence. */
+ if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
+ return 0;
+
+ /* Don't allow an eliminable register: register elimination can make
+ the offset too large. */
+ if (arm_eliminable_register (reg0))
+ return 0;
+
+ val_diff = val1 - val0;
+
+ if (arm_ld_sched)
+ {
+ /* If the target has load delay slots, then there's no benefit
+ to using an ldm instruction unless the offset is zero and
+ we are optimizing for size. */
+ return (optimize_size && (REGNO (reg0) == REGNO (reg1))
+ && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
+ && (val_diff == 4 || val_diff == -4));
+ }
+
+ return ((REGNO (reg0) == REGNO (reg1))
+ && (val_diff == 4 || val_diff == -4));
+ }
+
+ return 0;
+}
+
+int
+load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
+ HOST_WIDE_INT *load_offset)
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present,
+ though could be easily extended if required. */
+ gcc_assert (nops >= 2 && nops <= 4);
+
+ /* Loop over the operands and check that the memory references are
+ suitable (i.e. immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg (operands + (nops + i));
+
+ gcc_assert (GET_CODE (operands[nops + i]) == MEM);
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO (reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != (int) REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, or if it overwrites the
+ base register but isn't the last insn in the list, then
+ we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
+ || (i != nops - 1 && unsorted_regs[i] == base_reg))
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* ldmia */
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
+ return 2; /* ldmib */
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* ldmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* ldmdb */
+
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
+ if the offset isn't small enough. The reason 2 ldrs are faster
+ is because these ARMs are able to do more than one cache access
+ in a single cycle. The ARM9 and StrongARM have Harvard caches,
+ whilst the ARM8 has a double bandwidth cache. This means that
+ these cores can do both an instruction fetch and a data fetch in
+ a single cycle, so the trick of calculating the address into a
+ scratch register (one of the result regs) and then doing a load
+ multiple actually becomes slower (and no smaller in code size).
+ That is the transformation
+
+ ldr rd1, [rbase + offset]
+ ldr rd2, [rbase + offset + 4]
+
+ to
+
+ add rd1, rbase, offset
+ ldmia rd1, {rd1, rd2}
+
+ produces worse code -- '3 cycles + any stalls on rd2' instead of
+ '2 cycles + any stalls on rd2'. On ARMs with only one cache
+ access per cycle, the first sequence could never complete in less
+ than 6 cycles, whereas the ldm sequence would only take 5 and
+ would make better use of sequential accesses if not hitting the
+ cache.
+
+ We cheat here and test 'arm_ld_sched' which we currently know to
+ only be true for the ARM8, ARM9 and StrongARM. If this ever
+ changes, then the test below needs to be reworked. */
+ if (nops == 2 && arm_ld_sched)
+ return 0;
+
+ /* Can't do it without setting up the offset, only do this if it takes
+ no more than one insn. */
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
+ || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
+}
+
+const char *
+emit_ldm_seq (rtx *operands, int nops)
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "ldm%(ia%)\t");
+ break;
+
+ case 2:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "ldm%(ib%)\t");
+ break;
+
+ case 3:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "ldm%(da%)\t");
+ break;
+
+ case 4:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "ldm%(db%)\t");
+ break;
+
+ case 5:
+ if (offset >= 0)
+ sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) offset);
+ else
+ sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) -offset);
+ output_asm_insn (buf, operands);
+ base_reg = regs[0];
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "ldm%(ia%)\t");
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole ldm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
+ HOST_WIDE_INT * load_offset)
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ gcc_assert (nops >= 2 && nops <= 4);
+
+ /* Loop over the operands and check that the memory references are
+ suitable (i.e. immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg (operands + (nops + i));
+
+ gcc_assert (GET_CODE (operands[nops + i]) == MEM);
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO (reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != (int) REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, then we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* stmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* stmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* stmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* stmdb */
+
+ return 0;
+}
+
+const char *
+emit_stm_seq (rtx *operands, int nops)
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "stm%(ia%)\t");
+ break;
+
+ case 2:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "stm%(ib%)\t");
+ break;
+
+ case 3:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "stm%(da%)\t");
+ break;
+
+ case 4:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (buf, "stm%(db%)\t");
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole stm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+/* Routines for use in generating RTL. */
+
+rtx
+arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
+ int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
+{
+ HOST_WIDE_INT offset = *offsetp;
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem, addr;
+
+ /* XScale has load-store double instructions, but they have stricter
+ alignment requirements than load-store multiple, so we cannot
+ use them.
+
+ For XScale ldm requires 2 + NREGS cycles to complete and blocks
+ the pipeline until completion.
+
+ NREGS CYCLES
+ 1 3
+ 2 4
+ 3 5
+ 4 6
+
+ An ldr instruction takes 1-3 cycles, but does not block the
+ pipeline.
+
+ NREGS CYCLES
+ 1 1-3
+ 2 2-6
+ 3 3-9
+ 4 4-12
+
+ Best case ldr will always win. However, the more ldr instructions
+ we issue, the less likely we are to be able to schedule them well.
+ Using ldr instructions also increases code size.
+
+ As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
+ for counts of 3 or 4 regs. */
+ if (arm_tune_xscale && count <= 2 && ! optimize_size)
+ {
+ rtx seq;
+
+ start_sequence ();
+
+ for (i = 0; i < count; i++)
+ {
+ addr = plus_constant (from, i * 4 * sign);
+ mem = adjust_automodify_address (basemem, SImode, addr, offset);
+ emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
+ offset += 4 * sign;
+ }
+
+ if (write_back)
+ {
+ emit_move_insn (from, plus_constant (from, count * 4 * sign));
+ *offsetp = offset;
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ return seq;
+ }
+
+ result = gen_rtx_PARALLEL (VOIDmode,
+ rtvec_alloc (count + (write_back ? 1 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ addr = plus_constant (from, j * 4 * sign);
+ mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
+ XVECEXP (result, 0, i)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
+ offset += 4 * sign;
+ }
+
+ if (write_back)
+ *offsetp = offset;
+
+ return result;
+}
+
+rtx
+arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
+ int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
+{
+ HOST_WIDE_INT offset = *offsetp;
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem, addr;
+
+ /* See arm_gen_load_multiple for discussion of
+ the pros/cons of ldm/stm usage for XScale. */
+ if (arm_tune_xscale && count <= 2 && ! optimize_size)
+ {
+ rtx seq;
+
+ start_sequence ();
+
+ for (i = 0; i < count; i++)
+ {
+ addr = plus_constant (to, i * 4 * sign);
+ mem = adjust_automodify_address (basemem, SImode, addr, offset);
+ emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
+ offset += 4 * sign;
+ }
+
+ if (write_back)
+ {
+ emit_move_insn (to, plus_constant (to, count * 4 * sign));
+ *offsetp = offset;
+ }
+
+ seq = get_insns ();
+ end_sequence ();
+
+ return seq;
+ }
+
+ result = gen_rtx_PARALLEL (VOIDmode,
+ rtvec_alloc (count + (write_back ? 1 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx_SET (VOIDmode, to,
+ plus_constant (to, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ addr = plus_constant (to, j * 4 * sign);
+ mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
+ XVECEXP (result, 0, i)
+ = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
+ offset += 4 * sign;
+ }
+
+ if (write_back)
+ *offsetp = offset;
+
+ return result;
+}
+
+int
+arm_gen_movmemqi (rtx *operands)
+{
+ HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
+ HOST_WIDE_INT srcoffset, dstoffset;
+ int i;
+ rtx src, dst, srcbase, dstbase;
+ rtx part_bytes_reg = NULL;
+ rtx mem;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || INTVAL (operands[2]) > 64
+ || INTVAL (operands[3]) & 3)
+ return 0;
+
+ /* APPLE LOCAL begin ARM use memcpy more at -Os */
+ /* At -Os we consider the size of repeated lod/sto vs memcpy call. Both ways
+ require getting source and dest addresses into regs. Beyond that memcpy
+ is 2 insns; lod/sto is at least 2, maybe more. But lod/sto is faster so
+ we prefer that when it is only 2 insns; that occurs when the size is
+ 1, 2, 4, 8, 12, or 16 only. */
+ if (optimize_size
+ && INTVAL (operands[2]) != 1
+ && INTVAL (operands[2]) != 2
+ && INTVAL (operands[2]) != 4
+ && INTVAL (operands[2]) != 8
+ && INTVAL (operands[2]) != 12
+ && INTVAL (operands[2]) != 16)
+ return 0;
+ /* APPLE LOCAL end ARM use memcpy more at -Os */
+
+ dstbase = operands[0];
+ srcbase = operands[1];
+
+ dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
+ src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
+
+ in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
+ out_words_to_go = INTVAL (operands[2]) / 4;
+ last_bytes = INTVAL (operands[2]) & 3;
+ dstoffset = srcoffset = 0;
+
+ if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
+ part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
+
+ for (i = 0; in_words_to_go >= 2; i+=4)
+ {
+ if (in_words_to_go > 4)
+ emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
+ srcbase, &srcoffset));
+ else
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ FALSE, srcbase, &srcoffset));
+
+ if (out_words_to_go)
+ {
+ if (out_words_to_go > 4)
+ emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
+ dstbase, &dstoffset));
+ else if (out_words_to_go != 1)
+ emit_insn (arm_gen_store_multiple (0, out_words_to_go,
+ dst, TRUE,
+ (last_bytes == 0
+ ? FALSE : TRUE),
+ dstbase, &dstoffset));
+ else
+ {
+ mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
+ emit_move_insn (mem, gen_rtx_REG (SImode, 0));
+ if (last_bytes != 0)
+ {
+ emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
+ dstoffset += 4;
+ }
+ }
+ }
+
+ in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
+ out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
+ }
+
+ /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
+ if (out_words_to_go)
+ {
+ rtx sreg;
+
+ mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
+ sreg = copy_to_reg (mem);
+
+ mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
+ emit_move_insn (mem, sreg);
+ in_words_to_go--;
+
+ gcc_assert (!in_words_to_go); /* Sanity check */
+ }
+
+ if (in_words_to_go)
+ {
+ gcc_assert (in_words_to_go > 0);
+
+ mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
+ part_bytes_reg = copy_to_mode_reg (SImode, mem);
+ }
+
+ gcc_assert (!last_bytes || part_bytes_reg);
+
+ if (BYTES_BIG_ENDIAN && last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ /* The bytes we want are in the top end of the word. */
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
+ GEN_INT (8 * (4 - last_bytes))));
+ part_bytes_reg = tmp;
+
+ while (last_bytes)
+ {
+ mem = adjust_automodify_address (dstbase, QImode,
+ plus_constant (dst, last_bytes - 1),
+ dstoffset + last_bytes - 1);
+ emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
+
+ if (--last_bytes)
+ {
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+
+ }
+ else
+ {
+ if (last_bytes > 1)
+ {
+ mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
+ emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
+ last_bytes -= 2;
+ if (last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (dst, dst, const2_rtx));
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
+ part_bytes_reg = tmp;
+ dstoffset += 2;
+ }
+ }
+
+ if (last_bytes)
+ {
+ mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
+ emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
+ }
+ }
+
+ return 1;
+}
+
+/* Select a dominance comparison mode if possible for a test of the general
+ form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
+ COND_OR == DOM_CC_X_AND_Y => (X && Y)
+ COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
+ COND_OR == DOM_CC_X_OR_Y => (X || Y)
+ In all cases OP will be either EQ or NE, but we don't need to know which
+ here. If we are unable to support a dominance comparison we return
+ CC mode. This will then fail to match for the RTL expressions that
+ generate this call. */
+enum machine_mode
+arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
+{
+ enum rtx_code cond1, cond2;
+ int swapped = 0;
+
+ /* Currently we will probably get the wrong result if the individual
+ comparisons are not simple. This also ensures that it is safe to
+ reverse a comparison if necessary. */
+ if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
+ != CCmode)
+ || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
+ != CCmode))
+ return CCmode;
+
+ /* The if_then_else variant of this tests the second condition if the
+ first passes, but is true if the first fails. Reverse the first
+ condition to get a true "inclusive-or" expression. */
+ if (cond_or == DOM_CC_NX_OR_Y)
+ cond1 = reverse_condition (cond1);
+
+ /* If the comparisons are not equal, and one doesn't dominate the other,
+ then we can't do this. */
+ if (cond1 != cond2
+ && !comparison_dominates_p (cond1, cond2)
+ && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
+ return CCmode;
+
+ if (swapped)
+ {
+ enum rtx_code temp = cond1;
+ cond1 = cond2;
+ cond2 = temp;
+ }
+
+ /* APPLE LOCAL begin 7174451 */
+ /* Punt for the unordered floating point comparisons */
+ if (cond1 == UNGT || cond1 == UNGE || cond1 == UNLT || cond1 == UNLE
+ || cond1 == UNEQ || cond1 == LTGT)
+ return CCmode;
+ /* APPLE LOCAL end 7174451 */
+
+ switch (cond1)
+ {
+ case EQ:
+ if (cond_or == DOM_CC_X_AND_Y)
+ return CC_DEQmode;
+
+ switch (cond2)
+ {
+ case EQ: return CC_DEQmode;
+ case LE: return CC_DLEmode;
+ case LEU: return CC_DLEUmode;
+ case GE: return CC_DGEmode;
+ case GEU: return CC_DGEUmode;
+ default: gcc_unreachable ();
+ }
+
+ case LT:
+ if (cond_or == DOM_CC_X_AND_Y)
+ return CC_DLTmode;
+
+ switch (cond2)
+ {
+ case LT:
+ return CC_DLTmode;
+ case LE:
+ return CC_DLEmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
+
+ case GT:
+ if (cond_or == DOM_CC_X_AND_Y)
+ return CC_DGTmode;
+
+ switch (cond2)
+ {
+ case GT:
+ return CC_DGTmode;
+ case GE:
+ return CC_DGEmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
+
+ case LTU:
+ if (cond_or == DOM_CC_X_AND_Y)
+ return CC_DLTUmode;
+
+ switch (cond2)
+ {
+ case LTU:
+ return CC_DLTUmode;
+ case LEU:
+ return CC_DLEUmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
+
+ case GTU:
+ if (cond_or == DOM_CC_X_AND_Y)
+ return CC_DGTUmode;
+
+ switch (cond2)
+ {
+ case GTU:
+ return CC_DGTUmode;
+ case GEU:
+ return CC_DGEUmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* The remaining cases only occur when both comparisons are the
+ same. */
+ case NE:
+ gcc_assert (cond1 == cond2);
+ return CC_DNEmode;
+
+ case LE:
+ gcc_assert (cond1 == cond2);
+ return CC_DLEmode;
+
+ case GE:
+ gcc_assert (cond1 == cond2);
+ return CC_DGEmode;
+
+ case LEU:
+ gcc_assert (cond1 == cond2);
+ return CC_DLEUmode;
+
+ case GEU:
+ gcc_assert (cond1 == cond2);
+ return CC_DGEUmode;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+enum machine_mode
+arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ switch (op)
+ {
+ case EQ:
+ case NE:
+ case UNORDERED:
+ case ORDERED:
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case UNEQ:
+ case LTGT:
+ return CCFPmode;
+
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
+ return CCFPmode;
+ return CCFPEmode;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* A compare with a shifted operand. Because of canonicalization, the
+ comparison will have to be swapped when we emit the assembler. */
+ if (GET_MODE (y) == SImode && GET_CODE (y) == REG
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
+ || GET_CODE (x) == ROTATERT))
+ return CC_SWPmode;
+
+ /* This operation is performed swapped, but since we only rely on the Z
+ flag we don't need an additional mode. */
+ if (GET_MODE (y) == SImode && REG_P (y)
+ && GET_CODE (x) == NEG
+ && (op == EQ || op == NE))
+ return CC_Zmode;
+
+ /* This is a special case that is used by combine to allow a
+ comparison of a shifted byte load to be split into a zero-extend
+ followed by a comparison of the shifted integer (only valid for
+ equalities and unsigned inequalities). */
+ if (GET_MODE (x) == SImode
+ && GET_CODE (x) == ASHIFT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
+ && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
+ && (op == EQ || op == NE
+ || op == GEU || op == GTU || op == LTU || op == LEU)
+ && GET_CODE (y) == CONST_INT)
+ return CC_Zmode;
+
+ /* A construct for a conditional compare, if the false arm contains
+ 0, then both conditions must be true, otherwise either condition
+ must be true. Not all conditions are possible, so CCmode is
+ returned if it can't be done. */
+ if (GET_CODE (x) == IF_THEN_ELSE
+ && (XEXP (x, 2) == const0_rtx
+ || XEXP (x, 2) == const1_rtx)
+ && COMPARISON_P (XEXP (x, 0))
+ && COMPARISON_P (XEXP (x, 1)))
+ return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
+ INTVAL (XEXP (x, 2)));
+
+ /* Alternate canonicalizations of the above. These are somewhat cleaner. */
+ if (GET_CODE (x) == AND
+ && COMPARISON_P (XEXP (x, 0))
+ && COMPARISON_P (XEXP (x, 1)))
+ return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
+ DOM_CC_X_AND_Y);
+
+ if (GET_CODE (x) == IOR
+ && COMPARISON_P (XEXP (x, 0))
+ && COMPARISON_P (XEXP (x, 1)))
+ return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
+ DOM_CC_X_OR_Y);
+
+ /* An operation (on Thumb) where we want to test for a single bit.
+ This is done by shifting that bit up into the top bit of a
+ scratch register; we can then branch on the sign bit. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1
+ && GET_MODE (x) == SImode
+ && (op == EQ || op == NE)
+ && GET_CODE (x) == ZERO_EXTRACT
+ && XEXP (x, 1) == const1_rtx)
+ return CC_Nmode;
+
+ /* An operation that sets the condition codes as a side-effect, the
+ V flag is not set correctly, so we can only use comparisons where
+ this doesn't matter. (For LT and GE we can use "mi" and "pl"
+ instead.) */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? Does the ZERO_EXTRACT case really apply to thumb2? */
+ if (GET_MODE (x) == SImode
+ && y == const0_rtx
+ && (op == EQ || op == NE || op == LT || op == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == AND || GET_CODE (x) == IOR
+ || GET_CODE (x) == XOR || GET_CODE (x) == MULT
+ || GET_CODE (x) == NOT || GET_CODE (x) == NEG
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == ROTATERT
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ || (TARGET_32BIT && GET_CODE (x) == ZERO_EXTRACT)))
+ return CC_NOOVmode;
+
+ if (GET_MODE (x) == QImode && (op == EQ || op == NE))
+ return CC_Zmode;
+
+ if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
+ && GET_CODE (x) == PLUS
+ && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
+ return CC_Cmode;
+
+ return CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. FP means this is a
+ floating point compare: I don't think that it is needed on the arm. */
+rtx
+arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+
+ emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
+
+ return cc_reg;
+}
+
+/* Generate a sequence of insns that will generate the correct return
+ address mask depending on the physical architecture that the program
+ is running on. */
+rtx
+arm_gen_return_addr_mask (void)
+{
+ rtx reg = gen_reg_rtx (Pmode);
+
+ emit_insn (gen_return_addr_mask (reg));
+ return reg;
+}
+
+void
+arm_reload_in_hi (rtx *operands)
+{
+ rtx ref = operands[1];
+ rtx base, scratch;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (ref) == SUBREG)
+ {
+ offset = SUBREG_BYTE (ref);
+ ref = SUBREG_REG (ref);
+ }
+
+ if (GET_CODE (ref) == REG)
+ {
+ /* We have a pseudo which has been spilt onto the stack; there
+ are two cases here: the first where there is a simple
+ stack-slot replacement and a second where the stack-slot is
+ out of range, or is used as a subreg. */
+ if (reg_equiv_mem[REGNO (ref)])
+ {
+ ref = reg_equiv_mem[REGNO (ref)];
+ base = find_replacement (&XEXP (ref, 0));
+ }
+ else
+ /* The slot is out of range, or was dressed up in a SUBREG. */
+ base = reg_equiv_address[REGNO (ref)];
+ }
+ else
+ base = find_replacement (&XEXP (ref, 0));
+
+ /* Handle the case where the address is too complex to be offset by 1. */
+ if (GET_CODE (base) == MINUS
+ || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ {
+ rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+
+ emit_set_insn (base_plus, base);
+ base = base_plus;
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ /* The addend must be CONST_INT, or we would have dealt with it above. */
+ HOST_WIDE_INT hi, lo;
+
+ offset += INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+
+ /* Rework the address into a legal sequence of insns. */
+ /* Valid range for lo is -4095 -> 4095 */
+ lo = (offset >= 0
+ ? (offset & 0xfff)
+ : -((-offset) & 0xfff));
+
+ /* Corner case, if lo is the max offset then we would be out of range
+ once we have added the additional 1 below, so bump the msb into the
+ pre-loading insn(s). */
+ if (lo == 4095)
+ lo &= 0x7ff;
+
+ hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
+ ^ (HOST_WIDE_INT) 0x80000000)
+ - (HOST_WIDE_INT) 0x80000000);
+
+ gcc_assert (hi + lo == offset);
+
+ if (hi != 0)
+ {
+ rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+
+ /* Get the base address; addsi3 knows how to handle constants
+ that require more than one insn. */
+ emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
+ base = base_plus;
+ offset = lo;
+ }
+ }
+
+ /* Operands[2] may overlap operands[0] (though it won't overlap
+ operands[1]), that's why we asked for a DImode reg -- so we can
+ use the bit that does not overlap. */
+ if (REGNO (operands[2]) == REGNO (operands[0]))
+ scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+ else
+ scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
+
+ emit_insn (gen_zero_extendqisi2 (scratch,
+ gen_rtx_MEM (QImode,
+ plus_constant (base,
+ offset))));
+ emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
+ gen_rtx_MEM (QImode,
+ plus_constant (base,
+ offset + 1))));
+ if (!BYTES_BIG_ENDIAN)
+ emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
+ gen_rtx_IOR (SImode,
+ gen_rtx_ASHIFT
+ (SImode,
+ gen_rtx_SUBREG (SImode, operands[0], 0),
+ GEN_INT (8)),
+ scratch));
+ else
+ emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
+ gen_rtx_IOR (SImode,
+ gen_rtx_ASHIFT (SImode, scratch,
+ GEN_INT (8)),
+ gen_rtx_SUBREG (SImode, operands[0], 0)));
+}
+
+/* Handle storing a half-word to memory during reload by synthesizing as two
+ byte stores. Take care not to clobber the input values until after we
+ have moved them somewhere safe. This code assumes that if the DImode
+ scratch in operands[2] overlaps either the input value or output address
+ in some way, then that value must die in this insn (we absolutely need
+ two scratch registers for some corner cases). */
+void
+arm_reload_out_hi (rtx *operands)
+{
+ rtx ref = operands[0];
+ rtx outval = operands[1];
+ rtx base, scratch;
+ HOST_WIDE_INT offset = 0;
+
+ if (GET_CODE (ref) == SUBREG)
+ {
+ offset = SUBREG_BYTE (ref);
+ ref = SUBREG_REG (ref);
+ }
+
+ if (GET_CODE (ref) == REG)
+ {
+ /* We have a pseudo which has been spilt onto the stack; there
+ are two cases here: the first where there is a simple
+ stack-slot replacement and a second where the stack-slot is
+ out of range, or is used as a subreg. */
+ if (reg_equiv_mem[REGNO (ref)])
+ {
+ ref = reg_equiv_mem[REGNO (ref)];
+ base = find_replacement (&XEXP (ref, 0));
+ }
+ else
+ /* The slot is out of range, or was dressed up in a SUBREG. */
+ base = reg_equiv_address[REGNO (ref)];
+ }
+ else
+ base = find_replacement (&XEXP (ref, 0));
+
+ scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
+
+ /* Handle the case where the address is too complex to be offset by 1. */
+ if (GET_CODE (base) == MINUS
+ || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ {
+ rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+
+ /* Be careful not to destroy OUTVAL. */
+ if (reg_overlap_mentioned_p (base_plus, outval))
+ {
+ /* Updating base_plus might destroy outval, see if we can
+ swap the scratch and base_plus. */
+ if (!reg_overlap_mentioned_p (scratch, outval))
+ {
+ rtx tmp = scratch;
+ scratch = base_plus;
+ base_plus = tmp;
+ }
+ else
+ {
+ rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
+
+ /* Be conservative and copy OUTVAL into the scratch now,
+ this should only be necessary if outval is a subreg
+ of something larger than a word. */
+ /* XXX Might this clobber base? I can't see how it can,
+ since scratch is known to overlap with OUTVAL, and
+ must be wider than a word. */
+ emit_insn (gen_movhi (scratch_hi, outval));
+ outval = scratch_hi;
+ }
+ }
+
+ emit_set_insn (base_plus, base);
+ base = base_plus;
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ /* The addend must be CONST_INT, or we would have dealt with it above. */
+ HOST_WIDE_INT hi, lo;
+
+ offset += INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+
+ /* Rework the address into a legal sequence of insns. */
+ /* Valid range for lo is -4095 -> 4095 */
+ lo = (offset >= 0
+ ? (offset & 0xfff)
+ : -((-offset) & 0xfff));
+
+ /* Corner case, if lo is the max offset then we would be out of range
+ once we have added the additional 1 below, so bump the msb into the
+ pre-loading insn(s). */
+ if (lo == 4095)
+ lo &= 0x7ff;
+
+ hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
+ ^ (HOST_WIDE_INT) 0x80000000)
+ - (HOST_WIDE_INT) 0x80000000);
+
+ gcc_assert (hi + lo == offset);
+
+ if (hi != 0)
+ {
+ rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+
+ /* Be careful not to destroy OUTVAL. */
+ if (reg_overlap_mentioned_p (base_plus, outval))
+ {
+ /* Updating base_plus might destroy outval, see if we
+ can swap the scratch and base_plus. */
+ if (!reg_overlap_mentioned_p (scratch, outval))
+ {
+ rtx tmp = scratch;
+ scratch = base_plus;
+ base_plus = tmp;
+ }
+ else
+ {
+ rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
+
+ /* Be conservative and copy outval into scratch now,
+ this should only be necessary if outval is a
+ subreg of something larger than a word. */
+ /* XXX Might this clobber base? I can't see how it
+ can, since scratch is known to overlap with
+ outval. */
+ emit_insn (gen_movhi (scratch_hi, outval));
+ outval = scratch_hi;
+ }
+ }
+
+ /* Get the base address; addsi3 knows how to handle constants
+ that require more than one insn. */
+ emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
+ base = base_plus;
+ offset = lo;
+ }
+ }
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode,
+ plus_constant (base, offset + 1)),
+ gen_lowpart (QImode, outval)));
+ emit_insn (gen_lshrsi3 (scratch,
+ gen_rtx_SUBREG (SImode, outval, 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
+ gen_lowpart (QImode, scratch)));
+ }
+ else
+ {
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
+ gen_lowpart (QImode, outval)));
+ emit_insn (gen_lshrsi3 (scratch,
+ gen_rtx_SUBREG (SImode, outval, 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode,
+ plus_constant (base, offset + 1)),
+ gen_lowpart (QImode, scratch)));
+ }
+}
+
+/* Return true if a type must be passed in memory. For AAPCS, small aggregates
+ (padded to the size of a word) should be passed in a register. */
+
+static bool
+arm_must_pass_in_stack (enum machine_mode mode, tree type)
+{
+ if (TARGET_AAPCS_BASED)
+ return must_pass_in_stack_var_size (mode, type);
+ else
+ return must_pass_in_stack_var_size_or_pad (mode, type);
+}
+
+
+/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
+ Return true if an argument passed on the stack should be padded upwards,
+ i.e. if the least-significant byte has useful data.
+ For legacy APCS ABIs we use the default. For AAPCS based ABIs small
+ aggregate types are placed in the lowest memory address. */
+
+bool
+arm_pad_arg_upward (enum machine_mode mode, tree type)
+{
+ if (!TARGET_AAPCS_BASED)
+ return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
+
+ if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
+ return false;
+
+ return true;
+}
+
+
+/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
+ For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
+ byte of the register has useful data, and return the opposite if the
+ most significant byte does.
+ For AAPCS, small aggregates and small complex types are always padded
+ upwards. */
+
+bool
+arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type, int first ATTRIBUTE_UNUSED)
+{
+ if (TARGET_AAPCS_BASED
+ && BYTES_BIG_ENDIAN
+ && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
+ && int_size_in_bytes (type) <= 4)
+ return true;
+
+ /* Otherwise, use default padding. */
+ return !BYTES_BIG_ENDIAN;
+}
+
+
+/* Print a symbolic form of X to the debug file, F. */
+static void
+arm_print_value (FILE *f, rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
+ return;
+
+ case CONST_DOUBLE:
+ fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
+ return;
+
+ case CONST_VECTOR:
+ {
+ int i;
+
+ fprintf (f, "<");
+ for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
+ {
+ fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
+ if (i < (CONST_VECTOR_NUNITS (x) - 1))
+ fputc (',', f);
+ }
+ fprintf (f, ">");
+ }
+ return;
+
+ case CONST_STRING:
+ fprintf (f, "\"%s\"", XSTR (x, 0));
+ return;
+
+ case SYMBOL_REF:
+ fprintf (f, "`%s'", XSTR (x, 0));
+ return;
+
+ case LABEL_REF:
+ fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
+ return;
+
+ case CONST:
+ arm_print_value (f, XEXP (x, 0));
+ return;
+
+ case PLUS:
+ arm_print_value (f, XEXP (x, 0));
+ fprintf (f, "+");
+ arm_print_value (f, XEXP (x, 1));
+ return;
+
+ case PC:
+ fprintf (f, "pc");
+ return;
+
+ default:
+ fprintf (f, "????");
+ return;
+ }
+}
+
+/* Routines for manipulation of the constant pool. */
+
+/* Arm instructions cannot load a large constant directly into a
+ register; they have to come from a pc relative load. The constant
+ must therefore be placed in the addressable range of the pc
+ relative load. Depending on the precise pc relative load
+ instruction the range is somewhere between 256 bytes and 4k. This
+ means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow
+ things down and make the code larger.
+
+ Normally we can hide the table after an existing unconditional
+ branch so that there is no interruption of the flow, but in the
+ worst case the code looks like this:
+
+ ldr rn, L1
+ ...
+ b L2
+ align
+ L1: .long value
+ L2:
+ ...
+
+ ldr rn, L3
+ ...
+ b L4
+ align
+ L3: .long value
+ L4:
+ ...
+
+ We fix this by performing a scan after scheduling, which notices
+ which instructions need to have their operands fetched from the
+ constant table and builds the table.
+
+ The algorithm starts by building a table of all the constants that
+ need fixing up and all the natural barriers in the function (places
+ where a constant table can be dropped without breaking the flow).
+ For each fixup we note how far the pc-relative replacement will be
+ able to reach and the offset of the instruction into the function.
+
+ Having built the table we then group the fixes together to form
+ tables that are as large as possible (subject to addressing
+ constraints) and emit each table of constants after the last
+ barrier that is within range of all the instructions in the group.
+ If a group does not contain a barrier, then we forcibly create one
+ by inserting a jump instruction into the flow. Once the table has
+ been inserted, the insns are then modified to reference the
+ relevant entry in the pool.
+
+ Possible enhancements to the algorithm (not implemented) are:
+
+ 1) For some processors and object formats, there may be benefit in
+ aligning the pools to the start of cache lines; this alignment
+ would need to be taken into account when calculating addressability
+ of a pool. */
+
+/* These typedefs are located at the start of this file, so that
+ they can be used in the prototypes there. This comment is to
+ remind readers of that fact so that the following structures
+ can be understood more easily.
+
+ typedef struct minipool_node Mnode;
+ typedef struct minipool_fixup Mfix; */
+
+struct minipool_node
+{
+ /* Doubly linked chain of entries. */
+ Mnode * next;
+ Mnode * prev;
+ /* The maximum offset into the code that this entry can be placed. While
+ pushing fixes for forward references, all entries are sorted in order
+ of increasing max_address. */
+ HOST_WIDE_INT max_address;
+ /* Similarly for an entry inserted for a backwards ref. */
+ HOST_WIDE_INT min_address;
+ /* The number of fixes referencing this entry. This can become zero
+ if we "unpush" an entry. In this case we ignore the entry when we
+ come to emit the code. */
+ int refcount;
+ /* The offset from the start of the minipool. */
+ HOST_WIDE_INT offset;
+ /* The value in table. */
+ rtx value;
+ /* The mode of value. */
+ enum machine_mode mode;
+ /* The size of the value. With iWMMXt enabled
+ sizes > 4 also imply an alignment of 8-bytes. */
+ int fix_size;
+};
+
+struct minipool_fixup
+{
+ Mfix * next;
+ rtx insn;
+ HOST_WIDE_INT address;
+ rtx * loc;
+ enum machine_mode mode;
+ int fix_size;
+ rtx value;
+ Mnode * minipool;
+ HOST_WIDE_INT forwards;
+ HOST_WIDE_INT backwards;
+};
+
+/* Fixes less than a word need padding out to a word boundary. */
+#define MINIPOOL_FIX_SIZE(mode) \
+ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
+
+/* APPLE LOCAL begin ARM 4790140 compact switch tables */
+/* The miniLisp in attributes doesn't seem to be up to extracting
+ a numeric datum from the argument; do it in code. */
+void
+arm_adjust_insn_length (rtx insn, int *length)
+{
+ rtx body = PATTERN (insn);
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+
+ /* Add two bytes to the length of conditionally executed Thumb-2
+ instructions for the IT instruction. */
+ if (TARGET_THUMB2 && GET_CODE (PATTERN (insn)) == COND_EXEC)
+ {
+ *length += 2;
+ return;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ if (GET_CODE (body) == UNSPEC_VOLATILE
+ /* APPLE LOCAL 7083296 Build without warnings. */
+ && XINT (body, 1) == VUNSPEC_POOL_STRING)
+ {
+ int len = TREE_STRING_LENGTH (SYMBOL_REF_DECL
+ (XVECEXP (body, 0, 0)));
+ len = (len + 3) & ~3;
+ *length = len;
+ }
+ if (!TARGET_THUMB2 && GET_CODE (body) == ADDR_DIFF_VEC)
+ {
+ /* The obvious sizeof(elt)*nelts, plus sizeof(elt) for the
+ count. */
+ int len = (XVECLEN (body, 1) + 1) * GET_MODE_SIZE (GET_MODE (body));
+ int insn_size = (TARGET_THUMB) ? 2 : 4;
+
+ /* 32-bit thumb tables can have one halfword of padding.
+ If we knew the alignment + offset now, we could be correct
+ about this calculation. Instead, we have to be
+ pessimistic. */
+ if (TARGET_THUMB
+ && GET_MODE_SIZE (GET_MODE (body)) == 4)
+ len += 2;
+
+ /* Round up to a multiple of instruction size. */
+ len = ((len + insn_size - 1) / insn_size) * insn_size;
+ *length = len;
+ }
+ if (TARGET_THUMB
+ /* APPLE LOCAL 6279481 */
+ && !TARGET_32BIT
+ && GET_CODE (body) == UNSPEC_VOLATILE
+ /* APPLE LOCAL 7083296 Build without warnings. */
+ && XINT (body, 1) == VUNSPEC_EPILOGUE)
+ {
+ *length = handle_thumb_unexpanded_epilogue (false);
+ }
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_zero_extendhisi2
+ || INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_zero_extendhisi2_v6)
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ {
+ rtx mem = XEXP (XEXP (body, 1), 0);
+ if (GET_CODE (mem) == REG || GET_CODE (mem) == SUBREG)
+ *length = 2;
+ else
+ {
+ gcc_assert (GET_CODE (mem) == MEM);
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == PLUS
+ && GET_CODE (XEXP (mem, 0)) == REG
+ && REGNO (XEXP (mem, 0)) == SP_REGNUM)
+ *length = 4;
+ else
+ *length = 2;
+ }
+ }
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (INSN_CODE (insn) == CODE_FOR_thumb1_extendhisi2
+ || INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_extendhisi2_insn_v6)
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ {
+ rtx mem = XEXP (XEXP (XVECEXP (body, 0, 0), 1), 0);
+ if (GET_CODE (mem) == REG || GET_CODE (mem) == SUBREG)
+ *length = 2;
+ else
+ {
+ gcc_assert (GET_CODE (mem) == MEM);
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+ *length = 4;
+ if (GET_CODE (mem) == LABEL_REF)
+ *length = 2;
+ if (GET_CODE (mem) == PLUS)
+ {
+ if (GET_CODE (XEXP (mem, 0)) == LABEL_REF
+ && GET_CODE (XEXP (mem, 1)) == CONST_INT)
+ *length = 2;
+ if (GET_CODE (XEXP (mem, 1)) == REG)
+ *length = 2;
+ }
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_extendqisi2)
+ {
+ rtx mem = XEXP (XEXP (body, 1), 0);
+ if (GET_CODE (mem) == REG || GET_CODE (mem) == SUBREG)
+ *length = 2;
+ else
+ {
+ gcc_assert (GET_CODE (mem) == MEM);
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == LABEL_REF)
+ *length = 2;
+ else if (GET_CODE (mem) == PLUS
+ && GET_CODE (XEXP (mem, 0)) == LABEL_REF)
+ *length = 2;
+ /* The "operand matches V constraint" case is not handled explicitly;
+ this can only generate valid code if the address is REG + REG,
+ so assume this is the case and let the code below handle it. */
+ else if (GET_CODE (mem) == PLUS)
+ {
+ if (GET_CODE (XEXP (mem, 0)) == REG)
+ {
+ if (GET_CODE (XEXP (mem, 1)) == REG)
+ *length = 2;
+ else if (REGNO (XEXP (mem, 0)) == REGNO (XEXP (body, 0)))
+ *length = 6;
+ else
+ *length = 4;
+ }
+ else
+ {
+ gcc_assert (GET_CODE (XEXP (mem, 1)) == REG);
+ if (REGNO (XEXP (mem, 1)) == REGNO (XEXP (body, 0)))
+ *length = 6;
+ else
+ *length = 4;
+ }
+ }
+ else if (GET_CODE (mem) == REG && REGNO (XEXP (body, 0)) == REGNO (mem))
+ *length = 6;
+ else
+ *length = 4;
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_extendqisi2_v6)
+ {
+ rtx mem = XEXP (XEXP (body, 1), 0);
+ if (GET_CODE (mem) == REG || GET_CODE (mem) == SUBREG)
+ *length = 2;
+ else
+ {
+ gcc_assert (GET_CODE (mem) == MEM);
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+ if (GET_CODE (mem) == LABEL_REF)
+ *length = 2;
+ else if (GET_CODE (mem) == PLUS
+ && GET_CODE (XEXP (mem, 0)) == LABEL_REF)
+ *length = 2;
+ /* The "operand matches V constraint" case is not handled explicitly;
+ this can only generate valid code if the address is REG + REG,
+ so assume this is the case and let the code below handle it. */
+ else if (GET_CODE (mem) == PLUS)
+ {
+ if (GET_CODE (XEXP (mem, 0)) == REG)
+ {
+ if (GET_CODE (XEXP (mem, 1)) == REG)
+ *length = 2;
+ else if (REGNO (XEXP (mem, 0)) == REGNO (XEXP (body, 0)))
+ *length = 4;
+ else
+ *length = 4;
+ }
+ else
+ {
+ gcc_assert (GET_CODE (XEXP (mem, 1)) == REG);
+ if (REGNO (XEXP (mem, 1)) == REGNO (XEXP (body, 0)))
+ *length = 4;
+ else
+ *length = 4;
+ }
+ }
+ else if (GET_CODE (mem) == REG && REGNO (XEXP (body, 0)) == REGNO (mem))
+ *length = 4;
+ else
+ *length = 4;
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_movhi_insn)
+ {
+ rtx mem = XEXP (body, 1);
+ if (GET_CODE (mem) != MEM)
+ *length = 2;
+ else if (GET_CODE (XEXP (mem, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (mem, 0), 0)) == REG
+ && REGNO (XEXP (XEXP (mem, 0), 0)) == SP_REGNUM)
+ *length = 4;
+ else
+ *length = 2;
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (INSN_CODE (insn) == CODE_FOR_adjustable_thumb1_movdi_insn)
+ {
+ rtx op0 = XEXP (body, 0);
+ rtx op1 = XEXP (body, 1);
+
+ /* case 3 */
+ if (GET_CODE (op0) == MEM &&
+ (GET_CODE (XEXP (op0, 0)) == PRE_INC
+ || GET_CODE (XEXP (op0, 0)) == POST_INC))
+ *length = 2;
+ /* case 4 */
+ else if (GET_CODE (op1) == MEM &&
+ (GET_CODE (XEXP (op1, 0)) == PRE_INC
+ || GET_CODE (XEXP (op1, 0)) == POST_INC))
+ *length = 2;
+ /* case 2 */
+ else if (GET_CODE (op1) == CONST_INT
+ && !const_ok_for_arm (INTVAL (op1))
+ && INTVAL (op1) >= -4095
+ && INTVAL (op1) <= 4095
+ && thumb_low_register_operand (op0, GET_MODE (op0)))
+ *length = 6;
+ /* case 0, 1, 6, 7 */
+ else if (GET_CODE (op1) != MEM)
+ *length = 4;
+ /* case 5 */
+ else
+ {
+ rtx addr = XEXP (op1, 0);
+ if (GET_CODE (addr) == REG)
+ *length = 4;
+ else if (GET_CODE (addr) == CONST)
+ *length = 4;
+ else if (GET_CODE (addr) == PLUS)
+ {
+ rtx base = XEXP (addr, 0);
+ rtx offset = XEXP (addr, 1);
+ if (CONSTANT_P (base))
+ {
+ rtx temp = base;
+ base = offset;
+ offset = temp;
+ }
+ if (GET_CODE (offset) == REG)
+ *length = 6;
+ else
+ *length = 4;
+ }
+ else if (GET_CODE (addr) == LABEL_REF)
+ *length = 4;
+ else
+ abort ();
+ }
+ }
+}
+/* APPLE LOCAL end ARM 4790140 compact switch tables */
+
+static Mnode * minipool_vector_head;
+static Mnode * minipool_vector_tail;
+static rtx minipool_vector_label;
+static int minipool_pad;
+
+/* The linked list of all minipool fixes required for this function. */
+Mfix * minipool_fix_head;
+Mfix * minipool_fix_tail;
+/* The fix entry for the current minipool, once it has been placed. */
+Mfix * minipool_barrier;
+
+/* Determines if INSN is the start of a jump table. Returns the end
+ of the TABLE or NULL_RTX. */
+static rtx
+is_jump_table (rtx insn)
+{
+ rtx table;
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && JUMP_LABEL (insn) != NULL
+ && ((table = next_real_insn (JUMP_LABEL (insn)))
+ == next_real_insn (insn))
+ && table != NULL
+ && GET_CODE (table) == JUMP_INSN
+ && (GET_CODE (PATTERN (table)) == ADDR_VEC
+ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
+ return table;
+
+ return NULL_RTX;
+}
+
+#ifndef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+#endif
+
+static HOST_WIDE_INT
+get_jump_table_size (rtx insn)
+{
+ /* ADDR_VECs only take room if read-only data does into the text
+ section. */
+ if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
+ {
+ rtx body = PATTERN (insn);
+ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ HOST_WIDE_INT size;
+ HOST_WIDE_INT modesize;
+
+ modesize = GET_MODE_SIZE (GET_MODE (body));
+ size = modesize * XVECLEN (body, elt);
+ switch (modesize)
+ {
+ case 1:
+ /* Round up size of TBB table to a hafword boundary. */
+ size = (size + 1) & ~(HOST_WIDE_INT)1;
+ break;
+ case 2:
+ /* No padding neccessary for TBH. */
+ break;
+ case 4:
+ /* Add two bytes for alignment on Thumb. */
+ if (TARGET_THUMB)
+ size += 2;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return size;
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+
+ return 0;
+}
+
+/* Move a minipool fix MP from its current location to before MAX_MP.
+ If MAX_MP is NULL, then MP doesn't need moving, but the addressing
+ constraints may need updating. */
+static Mnode *
+move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
+ HOST_WIDE_INT max_address)
+{
+ /* The code below assumes these are different. */
+ gcc_assert (mp != max_mp);
+
+ if (max_mp == NULL)
+ {
+ if (max_address < mp->max_address)
+ mp->max_address = max_address;
+ }
+ else
+ {
+ if (max_address > max_mp->max_address - mp->fix_size)
+ mp->max_address = max_mp->max_address - mp->fix_size;
+ else
+ mp->max_address = max_address;
+
+ /* Unlink MP from its current position. Since max_mp is non-null,
+ mp->prev must be non-null. */
+ mp->prev->next = mp->next;
+ if (mp->next != NULL)
+ mp->next->prev = mp->prev;
+ else
+ minipool_vector_tail = mp->prev;
+
+ /* Re-insert it before MAX_MP. */
+ mp->next = max_mp;
+ mp->prev = max_mp->prev;
+ max_mp->prev = mp;
+
+ if (mp->prev != NULL)
+ mp->prev->next = mp;
+ else
+ minipool_vector_head = mp;
+ }
+
+ /* Save the new entry. */
+ max_mp = mp;
+
+ /* Scan over the preceding entries and adjust their addresses as
+ required. */
+ while (mp->prev != NULL
+ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
+ {
+ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
+ mp = mp->prev;
+ }
+
+ return max_mp;
+}
+
+/* Add a constant to the minipool for a forward reference. Returns the
+ node added or NULL if the constant will not fit in this pool. */
+static Mnode *
+add_minipool_forward_ref (Mfix *fix)
+{
+ /* If set, max_mp is the first pool_entry that has a lower
+ constraint than the one we are trying to add. */
+ Mnode * max_mp = NULL;
+ HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
+ Mnode * mp;
+
+ /* If the minipool starts before the end of FIX->INSN then this FIX
+ can not be placed into the current pool. Furthermore, adding the
+ new constant pool entry may cause the pool to start FIX_SIZE bytes
+ earlier. */
+ if (minipool_vector_head &&
+ (fix->address + get_attr_length (fix->insn)
+ >= minipool_vector_head->max_address - fix->fix_size))
+ return NULL;
+
+ /* Scan the pool to see if a constant with the same value has
+ already been added. While we are doing this, also note the
+ location where we must insert the constant if it doesn't already
+ exist. */
+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+ {
+ if (GET_CODE (fix->value) == GET_CODE (mp->value)
+ && fix->mode == mp->mode
+ && (GET_CODE (fix->value) != CODE_LABEL
+ || (CODE_LABEL_NUMBER (fix->value)
+ == CODE_LABEL_NUMBER (mp->value)))
+ && rtx_equal_p (fix->value, mp->value))
+ {
+ /* More than one fix references this entry. */
+ mp->refcount++;
+ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
+ }
+
+ /* Note the insertion point if necessary. */
+ if (max_mp == NULL
+ && mp->max_address > max_address)
+ max_mp = mp;
+
+ /* If we are inserting an 8-bytes aligned quantity and
+ we have not already found an insertion point, then
+ make sure that all such 8-byte aligned quantities are
+ placed at the start of the pool. */
+ if (ARM_DOUBLEWORD_ALIGN
+ && max_mp == NULL
+ && fix->fix_size == 8
+ && mp->fix_size != 8)
+ {
+ max_mp = mp;
+ max_address = mp->max_address;
+ }
+ }
+
+ /* The value is not currently in the minipool, so we need to create
+ a new entry for it. If MAX_MP is NULL, the entry will be put on
+ the end of the list since the placement is less constrained than
+ any existing entry. Otherwise, we insert the new fix before
+ MAX_MP and, if necessary, adjust the constraints on the other
+ entries. */
+ mp = XNEW (Mnode);
+ mp->fix_size = fix->fix_size;
+ mp->mode = fix->mode;
+ mp->value = fix->value;
+ mp->refcount = 1;
+ /* Not yet required for a backwards ref. */
+ mp->min_address = -65536;
+
+ if (max_mp == NULL)
+ {
+ mp->max_address = max_address;
+ mp->next = NULL;
+ mp->prev = minipool_vector_tail;
+
+ if (mp->prev == NULL)
+ {
+ minipool_vector_head = mp;
+ minipool_vector_label = gen_label_rtx ();
+ }
+ else
+ mp->prev->next = mp;
+
+ minipool_vector_tail = mp;
+ }
+ else
+ {
+ if (max_address > max_mp->max_address - mp->fix_size)
+ mp->max_address = max_mp->max_address - mp->fix_size;
+ else
+ mp->max_address = max_address;
+
+ mp->next = max_mp;
+ mp->prev = max_mp->prev;
+ max_mp->prev = mp;
+ if (mp->prev != NULL)
+ mp->prev->next = mp;
+ else
+ minipool_vector_head = mp;
+ }
+
+ /* Save the new entry. */
+ max_mp = mp;
+
+ /* Scan over the preceding entries and adjust their addresses as
+ required. */
+ while (mp->prev != NULL
+ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
+ {
+ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
+ mp = mp->prev;
+ }
+
+ return max_mp;
+}
+
+static Mnode *
+move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
+ HOST_WIDE_INT min_address)
+{
+ HOST_WIDE_INT offset;
+
+ /* The code below assumes these are different. */
+ gcc_assert (mp != min_mp);
+
+ if (min_mp == NULL)
+ {
+ if (min_address > mp->min_address)
+ mp->min_address = min_address;
+ }
+ else
+ {
+ /* We will adjust this below if it is too loose. */
+ mp->min_address = min_address;
+
+ /* Unlink MP from its current position. Since min_mp is non-null,
+ mp->next must be non-null. */
+ mp->next->prev = mp->prev;
+ if (mp->prev != NULL)
+ mp->prev->next = mp->next;
+ else
+ minipool_vector_head = mp->next;
+
+ /* Reinsert it after MIN_MP. */
+ mp->prev = min_mp;
+ mp->next = min_mp->next;
+ min_mp->next = mp;
+ if (mp->next != NULL)
+ mp->next->prev = mp;
+ else
+ minipool_vector_tail = mp;
+ }
+
+ min_mp = mp;
+
+ offset = 0;
+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+ {
+ mp->offset = offset;
+ if (mp->refcount > 0)
+ offset += mp->fix_size;
+
+ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
+ mp->next->min_address = mp->min_address + mp->fix_size;
+ }
+
+ return min_mp;
+}
+
+/* Add a constant to the minipool for a backward reference. Returns the
+ node added or NULL if the constant will not fit in this pool.
+
+ Note that the code for insertion for a backwards reference can be
+ somewhat confusing because the calculated offsets for each fix do
+ not take into account the size of the pool (which is still under
+ construction. */
+static Mnode *
+add_minipool_backward_ref (Mfix *fix)
+{
+ /* If set, min_mp is the last pool_entry that has a lower constraint
+ than the one we are trying to add. */
+ Mnode *min_mp = NULL;
+ /* This can be negative, since it is only a constraint. */
+ HOST_WIDE_INT min_address = fix->address - fix->backwards;
+ Mnode *mp;
+
+ /* If we can't reach the current pool from this insn, or if we can't
+ insert this entry at the end of the pool without pushing other
+ fixes out of range, then we don't try. This ensures that we
+ can't fail later on. */
+ if (min_address >= minipool_barrier->address
+ || (minipool_vector_tail->min_address + fix->fix_size
+ >= minipool_barrier->address))
+ return NULL;
+
+ /* Scan the pool to see if a constant with the same value has
+ already been added. While we are doing this, also note the
+ location where we must insert the constant if it doesn't already
+ exist. */
+ for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
+ {
+ if (GET_CODE (fix->value) == GET_CODE (mp->value)
+ && fix->mode == mp->mode
+ && (GET_CODE (fix->value) != CODE_LABEL
+ || (CODE_LABEL_NUMBER (fix->value)
+ == CODE_LABEL_NUMBER (mp->value)))
+ && rtx_equal_p (fix->value, mp->value)
+ /* Check that there is enough slack to move this entry to the
+ end of the table (this is conservative). */
+ && (mp->max_address
+ > (minipool_barrier->address
+ + minipool_vector_tail->offset
+ + minipool_vector_tail->fix_size)))
+ {
+ mp->refcount++;
+ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
+ }
+
+ if (min_mp != NULL)
+ mp->min_address += fix->fix_size;
+ else
+ {
+ /* Note the insertion point if necessary. */
+ if (mp->min_address < min_address)
+ {
+ /* For now, we do not allow the insertion of 8-byte alignment
+ requiring nodes anywhere but at the start of the pool. */
+ if (ARM_DOUBLEWORD_ALIGN
+ && fix->fix_size == 8 && mp->fix_size != 8)
+ return NULL;
+ else
+ min_mp = mp;
+ }
+ else if (mp->max_address
+ < minipool_barrier->address + mp->offset + fix->fix_size)
+ {
+ /* Inserting before this entry would push the fix beyond
+ its maximum address (which can happen if we have
+ re-located a forwards fix); force the new fix to come
+ after it. */
+ min_mp = mp;
+ min_address = mp->min_address + fix->fix_size;
+ }
+ /* If we are inserting an 8-bytes aligned quantity and
+ we have not already found an insertion point, then
+ make sure that all such 8-byte aligned quantities are
+ placed at the start of the pool. */
+ else if (ARM_DOUBLEWORD_ALIGN
+ && min_mp == NULL
+ && fix->fix_size == 8
+ && mp->fix_size < 8)
+ {
+ min_mp = mp;
+ min_address = mp->min_address + fix->fix_size;
+ }
+ }
+ }
+
+ /* We need to create a new entry. */
+ mp = XNEW (Mnode);
+ mp->fix_size = fix->fix_size;
+ mp->mode = fix->mode;
+ mp->value = fix->value;
+ mp->refcount = 1;
+ mp->max_address = minipool_barrier->address + 65536;
+
+ mp->min_address = min_address;
+
+ if (min_mp == NULL)
+ {
+ mp->prev = NULL;
+ mp->next = minipool_vector_head;
+
+ if (mp->next == NULL)
+ {
+ minipool_vector_tail = mp;
+ minipool_vector_label = gen_label_rtx ();
+ }
+ else
+ mp->next->prev = mp;
+
+ minipool_vector_head = mp;
+ }
+ else
+ {
+ mp->next = min_mp->next;
+ mp->prev = min_mp;
+ min_mp->next = mp;
+
+ if (mp->next != NULL)
+ mp->next->prev = mp;
+ else
+ minipool_vector_tail = mp;
+ }
+
+ /* Save the new entry. */
+ min_mp = mp;
+
+ if (mp->prev)
+ mp = mp->prev;
+ else
+ mp->offset = 0;
+
+ /* Scan over the following entries and adjust their offsets. */
+ while (mp->next != NULL)
+ {
+ if (mp->next->min_address < mp->min_address + mp->fix_size)
+ mp->next->min_address = mp->min_address + mp->fix_size;
+
+ if (mp->refcount)
+ mp->next->offset = mp->offset + mp->fix_size;
+ else
+ mp->next->offset = mp->offset;
+
+ mp = mp->next;
+ }
+
+ return min_mp;
+}
+
+static void
+assign_minipool_offsets (Mfix *barrier)
+{
+ HOST_WIDE_INT offset = 0;
+ Mnode *mp;
+
+ minipool_barrier = barrier;
+
+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+ {
+ mp->offset = offset;
+
+ if (mp->refcount > 0)
+ offset += mp->fix_size;
+ }
+}
+
+/* Output the literal table */
+static void
+dump_minipool (rtx scan)
+{
+ Mnode * mp;
+ Mnode * nmp;
+ int align64 = 0;
+
+ if (ARM_DOUBLEWORD_ALIGN)
+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+ if (mp->refcount > 0 && mp->fix_size == 8)
+ {
+ align64 = 1;
+ break;
+ }
+
+ if (dump_file)
+ fprintf (dump_file,
+ ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
+ INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
+ scan = emit_label_after (minipool_vector_label, scan);
+
+ for (mp = minipool_vector_head; mp != NULL; mp = nmp)
+ {
+ if (mp->refcount > 0)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ ";; Offset %u, min %ld, max %ld ",
+ (unsigned) mp->offset, (unsigned long) mp->min_address,
+ (unsigned long) mp->max_address);
+ arm_print_value (dump_file, mp->value);
+ fputc ('\n', dump_file);
+ }
+
+ switch (mp->fix_size)
+ {
+#ifdef HAVE_consttable_1
+ case 1:
+ scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
+ break;
+
+#endif
+#ifdef HAVE_consttable_2
+ case 2:
+ scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
+ break;
+
+#endif
+#ifdef HAVE_consttable_4
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
+ break;
+
+#endif
+#ifdef HAVE_consttable_8
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
+ break;
+
+#endif
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#ifdef HAVE_consttable_16
+ case 16:
+ scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
+ break;
+
+#endif
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ nmp = mp->next;
+ free (mp);
+ }
+
+ minipool_vector_head = minipool_vector_tail = NULL;
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+}
+
+/* Return the cost of forcibly inserting a barrier after INSN. */
+static int
+arm_barrier_cost (rtx insn)
+{
+ /* Basing the location of the pool on the loop depth is preferable,
+ but at the moment, the basic block information seems to be
+ corrupt by this stage of the compilation. */
+ int base_cost = 50;
+ rtx next = next_nonnote_insn (insn);
+
+ if (next != NULL && GET_CODE (next) == CODE_LABEL)
+ base_cost -= 20;
+
+ switch (GET_CODE (insn))
+ {
+ case CODE_LABEL:
+ /* It will always be better to place the table before the label, rather
+ than after it. */
+ return 50;
+
+ case INSN:
+ case CALL_INSN:
+ return base_cost;
+
+ case JUMP_INSN:
+ return base_cost - 10;
+
+ default:
+ return base_cost + 10;
+ }
+}
+
+/* Find the best place in the insn stream in the range
+ (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
+ Create the barrier by inserting a jump and add a new fix entry for
+ it. */
+static Mfix *
+create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
+{
+ HOST_WIDE_INT count = 0;
+ rtx barrier;
+ rtx from = fix->insn;
+ /* The instruction after which we will insert the jump. */
+ rtx selected = NULL;
+ int selected_cost;
+ /* The address at which the jump instruction will be placed. */
+ HOST_WIDE_INT selected_address;
+ Mfix * new_fix;
+ HOST_WIDE_INT max_count = max_address - fix->address;
+ rtx label = gen_label_rtx ();
+
+ selected_cost = arm_barrier_cost (from);
+ selected_address = fix->address;
+
+ while (from && count < max_count)
+ {
+ rtx tmp;
+ int new_cost;
+
+ /* This code shouldn't have been called if there was a natural barrier
+ within range. */
+ gcc_assert (GET_CODE (from) != BARRIER);
+
+ /* Count the length of this insn. */
+ count += get_attr_length (from);
+
+ /* APPLE LOCAL begin ARM 6008578 */
+ if (LABEL_P (from))
+ count += get_label_pad (from, fix->address + count);
+ /* APPLE LOCAL end ARM 6008578 */
+
+ /* If there is a jump table, add its length. */
+ tmp = is_jump_table (from);
+ if (tmp != NULL)
+ {
+ count += get_jump_table_size (tmp);
+
+ /* Jump tables aren't in a basic block, so base the cost on
+ the dispatch insn. If we select this location, we will
+ still put the pool after the table. */
+ new_cost = arm_barrier_cost (from);
+
+ if (count < max_count
+ && (!selected || new_cost <= selected_cost))
+ {
+ selected = tmp;
+ selected_cost = new_cost;
+ selected_address = fix->address + count;
+ }
+
+ /* Continue after the dispatch table. */
+ from = NEXT_INSN (tmp);
+ continue;
+ }
+
+ new_cost = arm_barrier_cost (from);
+
+ if (count < max_count
+ && (!selected || new_cost <= selected_cost))
+ {
+ selected = from;
+ selected_cost = new_cost;
+ selected_address = fix->address + count;
+ }
+
+ from = NEXT_INSN (from);
+ }
+
+ /* Make sure that we found a place to insert the jump. */
+ gcc_assert (selected);
+
+ /* Create a new JUMP_INSN that branches around a barrier. */
+ from = emit_jump_insn_after (gen_jump (label), selected);
+ JUMP_LABEL (from) = label;
+ barrier = emit_barrier_after (from);
+ emit_label_after (label, barrier);
+
+ /* Create a minipool barrier entry for the new barrier. */
+ new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
+ new_fix->insn = barrier;
+ new_fix->address = selected_address;
+ new_fix->next = fix->next;
+ fix->next = new_fix;
+
+ return new_fix;
+}
+
+/* Record that there is a natural barrier in the insn stream at
+ ADDRESS. */
+static void
+push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
+{
+ Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
+
+ fix->insn = insn;
+ fix->address = address;
+
+ fix->next = NULL;
+ if (minipool_fix_head != NULL)
+ minipool_fix_tail->next = fix;
+ else
+ minipool_fix_head = fix;
+
+ minipool_fix_tail = fix;
+}
+
+/* Record INSN, which will need fixing up to load a value from the
+ minipool. ADDRESS is the offset of the insn since the start of the
+ function; LOC is a pointer to the part of the insn which requires
+ fixing; VALUE is the constant that must be loaded, which is of type
+ MODE. */
+static void
+push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
+ enum machine_mode mode, rtx value)
+{
+ Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
+
+#ifdef AOF_ASSEMBLER
+ /* PIC symbol references need to be converted into offsets into the
+ based area. */
+ /* XXX This shouldn't be done here. */
+ if (flag_pic && GET_CODE (value) == SYMBOL_REF)
+ value = aof_pic_entry (value);
+#endif /* AOF_ASSEMBLER */
+
+ fix->insn = insn;
+ fix->address = address;
+ fix->loc = loc;
+ fix->mode = mode;
+ fix->fix_size = MINIPOOL_FIX_SIZE (mode);
+ fix->value = value;
+ fix->forwards = get_attr_pool_range (insn);
+ fix->backwards = get_attr_neg_pool_range (insn);
+ fix->minipool = NULL;
+
+ /* If an insn doesn't have a range defined for it, then it isn't
+ expecting to be reworked by this code. Better to stop now than
+ to generate duff assembly code. */
+ gcc_assert (fix->forwards || fix->backwards);
+
+ /* If an entry requires 8-byte alignment then assume all constant pools
+ require 4 bytes of padding. Trying to do this later on a per-pool
+ basis is awkward because existing pool entries have to be modified. */
+ if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
+ minipool_pad = 4;
+
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
+ GET_MODE_NAME (mode),
+ INSN_UID (insn), (unsigned long) address,
+ -1 * (long)fix->backwards, (long)fix->forwards);
+ arm_print_value (dump_file, fix->value);
+ fprintf (dump_file, "\n");
+ }
+
+ /* Add it to the chain of fixes. */
+ fix->next = NULL;
+
+ if (minipool_fix_head != NULL)
+ minipool_fix_tail->next = fix;
+ else
+ minipool_fix_head = fix;
+
+ minipool_fix_tail = fix;
+}
+
+/* Return the cost of synthesizing a 64-bit constant VAL inline.
+ Returns the number of insns needed, or 99 if we don't know how to
+ do it. */
+int
+arm_const_double_inline_cost (rtx val)
+{
+ rtx lowpart, highpart;
+ enum machine_mode mode;
+
+ mode = GET_MODE (val);
+
+ if (mode == VOIDmode)
+ mode = DImode;
+
+ gcc_assert (GET_MODE_SIZE (mode) == 8);
+
+ lowpart = gen_lowpart (SImode, val);
+ highpart = gen_highpart_mode (SImode, mode, val);
+
+ gcc_assert (GET_CODE (lowpart) == CONST_INT);
+ gcc_assert (GET_CODE (highpart) == CONST_INT);
+
+ return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
+ NULL_RTX, NULL_RTX, 0, 0)
+ + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
+ NULL_RTX, NULL_RTX, 0, 0));
+}
+
+/* APPLE LOCAL begin 5831562 long long constants */
+/* Return true if a 64-bit constant consists of two 32-bit halves,
+ each of which is a valid immediate data-processing operand.
+ (This differs from other 64-bit evaluations in that ~const is
+ not considered.)
+*/
+
+bool
+const64_ok_for_arm_immediate (rtx val)
+{
+ rtx lowpart, highpart;
+ enum machine_mode mode;
+
+ if (!TARGET_ARM)
+ return false;
+
+ mode = GET_MODE (val);
+
+ if (mode == VOIDmode)
+ mode = DImode;
+
+ gcc_assert (GET_MODE_SIZE (mode) == 8);
+
+ lowpart = gen_lowpart (SImode, val);
+ highpart = gen_highpart_mode (SImode, mode, val);
+
+ gcc_assert (GET_CODE (lowpart) == CONST_INT);
+ gcc_assert (GET_CODE (highpart) == CONST_INT);
+
+ return (const_ok_for_arm (INTVAL (lowpart))
+ && const_ok_for_arm (INTVAL (highpart)));
+}
+
+/* As above, but allow for constants whose negative value
+ fits as well. Both halves must match either as themselves
+ or as negated. */
+bool
+const64_ok_for_arm_add (rtx val)
+{
+ rtx lowpart, highpart, lowpart_neg, highpart_neg, val_neg;
+ enum machine_mode mode;
+
+ if (!TARGET_ARM)
+ return false;
+
+ mode = GET_MODE (val);
+
+ if (mode == VOIDmode)
+ mode = DImode;
+
+ gcc_assert (GET_MODE_SIZE (mode) == 8);
+
+ lowpart = gen_lowpart (SImode, val);
+ highpart = gen_highpart_mode (SImode, mode, val);
+
+ val_neg = negate_rtx (mode, val);
+ lowpart_neg = gen_lowpart (SImode, val_neg);
+ highpart_neg = gen_highpart_mode (SImode, mode, val_neg);
+
+ gcc_assert (GET_CODE (lowpart) == CONST_INT);
+ gcc_assert (GET_CODE (highpart) == CONST_INT);
+
+ return ((const_ok_for_arm (INTVAL (lowpart))
+ && const_ok_for_arm (INTVAL (highpart)))
+ || (const_ok_for_arm (INTVAL (lowpart_neg))
+ && const_ok_for_arm (INTVAL (highpart_neg))));
+}
+/* APPLE LOCAL end 5831562 long long constants */
+
+/* Return true if it is worthwhile to split a 64-bit constant into two
+ 32-bit operations. This is the case if optimizing for size, or
+ if we have load delay slots, or if one 32-bit part can be done with
+ a single data operation. */
+bool
+arm_const_double_by_parts (rtx val)
+{
+ enum machine_mode mode = GET_MODE (val);
+ rtx part;
+
+ if (optimize_size || arm_ld_sched)
+ return true;
+
+ if (mode == VOIDmode)
+ mode = DImode;
+
+ part = gen_highpart_mode (SImode, mode, val);
+
+ gcc_assert (GET_CODE (part) == CONST_INT);
+
+ if (const_ok_for_arm (INTVAL (part))
+ || const_ok_for_arm (~INTVAL (part)))
+ return true;
+
+ part = gen_lowpart (SImode, val);
+
+ gcc_assert (GET_CODE (part) == CONST_INT);
+
+ if (const_ok_for_arm (INTVAL (part))
+ || const_ok_for_arm (~INTVAL (part)))
+ return true;
+
+ return false;
+}
+
+/* Scan INSN and note any of its operands that need fixing.
+ If DO_PUSHES is false we do not actually push any of the fixups
+ needed. The function returns TRUE if any fixups were needed/pushed.
+ This is used by arm_memory_load_p() which needs to know about loads
+ of constants that will be converted into minipool loads. */
+static bool
+note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
+{
+ bool result = false;
+ int opno;
+
+ extract_insn (insn);
+
+ if (!constrain_operands (1))
+ fatal_insn_not_found (insn);
+
+ if (recog_data.n_alternatives == 0)
+ return false;
+
+ /* Fill in recog_op_alt with information about the constraints of
+ this insn. */
+ preprocess_constraints ();
+
+ for (opno = 0; opno < recog_data.n_operands; opno++)
+ {
+ /* Things we need to fix can only occur in inputs. */
+ if (recog_data.operand_type[opno] != OP_IN)
+ continue;
+
+ /* If this alternative is a memory reference, then any mention
+ of constants in this alternative is really to fool reload
+ into allowing us to accept one there. We need to fix them up
+ now so that we output the right code. */
+ if (recog_op_alt[opno][which_alternative].memory_ok)
+ {
+ rtx op = recog_data.operand[opno];
+
+ if (CONSTANT_P (op))
+ {
+ if (do_pushes)
+ push_minipool_fix (insn, address, recog_data.operand_loc[opno],
+ recog_data.operand_mode[opno], op);
+ result = true;
+ }
+ else if (GET_CODE (op) == MEM
+ && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
+ {
+ if (do_pushes)
+ {
+ rtx cop = avoid_constant_pool_reference (op);
+
+ /* Casting the address of something to a mode narrower
+ than a word can cause avoid_constant_pool_reference()
+ to return the pool reference itself. That's no good to
+ us here. Lets just hope that we can use the
+ constant pool value directly. */
+ if (op == cop)
+ cop = get_pool_constant (XEXP (op, 0));
+
+ push_minipool_fix (insn, address,
+ recog_data.operand_loc[opno],
+ recog_data.operand_mode[opno], cop);
+ }
+
+ result = true;
+ }
+ }
+ }
+
+ return result;
+}
+
+/* APPLE LOCAL begin ARM 6008578 */
+/* Return the bytes of padding that will be inserted to align
+ the label INSN given the current pc ADDRESS. */
+static HOST_WIDE_INT get_label_pad (rtx insn, HOST_WIDE_INT address)
+{
+ int label_align, max_skip;
+ unsigned HOST_WIDE_INT align_mask;
+ int pad_needed;
+
+ gcc_assert (LABEL_P (insn));
+
+ label_align = LABEL_ALIGN_LOG (insn);
+ max_skip = LABEL_MAX_SKIP (insn);
+ align_mask = ((unsigned int) 1 << label_align) - 1;
+
+ /* Already aligned. */
+ if ((address & align_mask) == 0)
+ return 0;
+
+ pad_needed = ((address | align_mask) + 1) - address;
+
+ /* We would have to insert more than max_skip bytes to
+ align this label. */
+ if (max_skip && (pad_needed > max_skip))
+ return 0;
+
+ return pad_needed;
+}
+/* APPLE LOCAL end ARM 6008578 */
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+static void
+arm_reorg (void)
+{
+ rtx insn;
+ HOST_WIDE_INT address = 0;
+ Mfix * fix;
+
+ minipool_fix_head = minipool_fix_tail = NULL;
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* This is actually lurking bug I think, alignment matters. */
+ if (TARGET_THUMB)
+ address = count_thumb_unexpanded_prologue ();
+/* APPLE LOCAL end ARM compact switch tables */
+
+ /* The first insn must always be a note, or the code below won't
+ scan it properly. */
+ insn = get_insns ();
+ gcc_assert (GET_CODE (insn) == NOTE);
+ minipool_pad = 0;
+
+ /* Scan all the insns and record the operands that will need fixing. */
+ for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
+ {
+ if (TARGET_CIRRUS_FIX_INVALID_INSNS
+ && (arm_cirrus_insn_p (insn)
+ || GET_CODE (insn) == JUMP_INSN
+ || arm_memory_load_p (insn)))
+ cirrus_reorg (insn);
+
+ if (GET_CODE (insn) == BARRIER)
+ push_minipool_barrier (insn, address);
+ /* APPLE LOCAL begin ARM 6008578 */
+ else if (LABEL_P (insn))
+ address += get_label_pad (insn, address);
+ /* APPLE LOCAL end ARM 6008578 */
+ else if (INSN_P (insn))
+ {
+ rtx table;
+
+ note_invalid_constants (insn, address, true);
+ address += get_attr_length (insn);
+
+ /* If the insn is a vector jump, add the size of the table
+ and skip the table. */
+ if ((table = is_jump_table (insn)) != NULL)
+ {
+ address += get_jump_table_size (table);
+ insn = table;
+ }
+ }
+ }
+
+ fix = minipool_fix_head;
+
+ /* Now scan the fixups and perform the required changes. */
+ while (fix)
+ {
+ Mfix * ftmp;
+ Mfix * fdel;
+ Mfix * last_added_fix;
+ Mfix * last_barrier = NULL;
+ Mfix * this_fix;
+
+ /* Skip any further barriers before the next fix. */
+ while (fix && GET_CODE (fix->insn) == BARRIER)
+ fix = fix->next;
+
+ /* No more fixes. */
+ if (fix == NULL)
+ break;
+
+ last_added_fix = NULL;
+
+ for (ftmp = fix; ftmp; ftmp = ftmp->next)
+ {
+ if (GET_CODE (ftmp->insn) == BARRIER)
+ {
+ if (ftmp->address >= minipool_vector_head->max_address)
+ break;
+
+ last_barrier = ftmp;
+ }
+ else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
+ break;
+
+ last_added_fix = ftmp; /* Keep track of the last fix added. */
+ }
+
+ /* If we found a barrier, drop back to that; any fixes that we
+ could have reached but come after the barrier will now go in
+ the next mini-pool. */
+ if (last_barrier != NULL)
+ {
+ /* Reduce the refcount for those fixes that won't go into this
+ pool after all. */
+ for (fdel = last_barrier->next;
+ fdel && fdel != ftmp;
+ fdel = fdel->next)
+ {
+ fdel->minipool->refcount--;
+ fdel->minipool = NULL;
+ }
+
+ ftmp = last_barrier;
+ }
+ else
+ {
+ /* ftmp is first fix that we can't fit into this pool and
+ there no natural barriers that we could use. Insert a
+ new barrier in the code somewhere between the previous
+ fix and this one, and arrange to jump around it. */
+ HOST_WIDE_INT max_address;
+
+ /* The last item on the list of fixes must be a barrier, so
+ we can never run off the end of the list of fixes without
+ last_barrier being set. */
+ gcc_assert (ftmp);
+
+ max_address = minipool_vector_head->max_address;
+ /* Check that there isn't another fix that is in range that
+ we couldn't fit into this pool because the pool was
+ already too large: we need to put the pool before such an
+ instruction. The pool itself may come just after the
+ fix because create_fix_barrier also allows space for a
+ jump instruction. */
+ if (ftmp->address < max_address)
+ max_address = ftmp->address + 1;
+
+ last_barrier = create_fix_barrier (last_added_fix, max_address);
+ }
+
+ assign_minipool_offsets (last_barrier);
+
+ while (ftmp)
+ {
+ if (GET_CODE (ftmp->insn) != BARRIER
+ && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
+ == NULL))
+ break;
+
+ ftmp = ftmp->next;
+ }
+
+ /* Scan over the fixes we have identified for this pool, fixing them
+ up and adding the constants to the pool itself. */
+ for (this_fix = fix; this_fix && ftmp != this_fix;
+ this_fix = this_fix->next)
+ if (GET_CODE (this_fix->insn) != BARRIER)
+ {
+ rtx addr
+ = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
+ minipool_vector_label),
+ this_fix->minipool->offset);
+ *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
+ }
+
+ dump_minipool (last_barrier->insn);
+ fix = ftmp;
+ }
+
+ /* From now on we must synthesize any constants that we can't handle
+ directly. This can happen if the RTL gets split during final
+ instruction generation. */
+ after_arm_reorg = 1;
+
+ /* Free the minipool memory. */
+ obstack_free (&minipool_obstack, minipool_startobj);
+}
+
+/* Routines to output assembly language. */
+
+/* If the rtx is the correct value then return the string of the number.
+ In this way we can ensure that valid double constants are generated even
+ when cross compiling. */
+const char *
+fp_immediate_constant (rtx x)
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fp_consts_inited)
+ init_fp_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fp[i]))
+ return strings_fp[i];
+
+ gcc_unreachable ();
+}
+
+/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
+static const char *
+fp_const_from_val (REAL_VALUE_TYPE *r)
+{
+ int i;
+
+ if (!fp_consts_inited)
+ init_fp_table ();
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (*r, values_fp[i]))
+ return strings_fp[i];
+
+ gcc_unreachable ();
+}
+
+/* Output the operands of a LDM/STM instruction to STREAM.
+ MASK is the ARM register set mask of which only bits 0-15 are important.
+ REG is the base register, either the frame pointer or the stack pointer,
+ INSTR is the possibly suffixed load or store instruction. */
+
+static void
+print_multi_reg (FILE *stream, const char *instr, unsigned reg,
+ unsigned long mask)
+{
+ unsigned i;
+ bool not_first = FALSE;
+
+ fputc ('\t', stream);
+ asm_fprintf (stream, instr, reg);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ fputc ('{', stream);
+
+ for (i = 0; i <= LAST_ARM_REGNUM; i++)
+ if (mask & (1 << i))
+ {
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (not_first)
+ fprintf (stream, ", ");
+
+ asm_fprintf (stream, "%r", i);
+ not_first = TRUE;
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ }
+
+ fprintf (stream, "}\n");
+}
+
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Output a FLDMD instruction to STREAM.
+ BASE if the register containing the address.
+ REG and COUNT specify the register range.
+ Extra registers may be added to avoid hardware bugs.
+
+ We output FLDMD even for ARMv5 VFP implementations. Although
+ FLDMD is technically not supported until ARMv6, it is believed
+ that all VFP implementations support its use in this context. */
+
+static void
+vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+{
+ int i;
+
+ /* Workaround ARM10 VFPr1 bug. */
+ if (count == 2 && !arm_arch6)
+ {
+ if (reg == 15)
+ reg--;
+ count++;
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* FLDMD may not load more than 16 doubleword registers at a time. Split the
+ load into multiple parts if we have to handle more than 16 registers. */
+ if (count > 16)
+ {
+ vfp_output_fldmd (stream, base, reg, 16);
+ vfp_output_fldmd (stream, base, reg + 16, count - 16);
+ return;
+ }
+
+ fputc ('\t', stream);
+ asm_fprintf (stream, "fldmfdd\t%r!, {", base);
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ for (i = reg; i < reg + count; i++)
+ {
+ if (i > reg)
+ fputs (", ", stream);
+ asm_fprintf (stream, "d%d", i);
+ }
+ fputs ("}\n", stream);
+
+}
+
+
+/* Output the assembly for a store multiple. */
+
+const char *
+/* APPLE LOCAL v7 support. Merge from mainline */
+vfp_output_fstmd (rtx * operands)
+{
+ char pattern[100];
+ int p;
+ int base;
+ int i;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ strcpy (pattern, "fstmfdd\t%m0!, {%P1");
+ p = strlen (pattern);
+
+ gcc_assert (GET_CODE (operands[1]) == REG);
+
+ base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ p += sprintf (&pattern[p], ", d%d", base + i);
+ }
+ strcpy (&pattern[p], "}");
+
+ output_asm_insn (pattern, operands);
+ return "";
+}
+
+
+/* Emit RTL to save block of VFP register pairs to the stack. Returns the
+ number of bytes pushed. */
+
+static int
+/* APPLE LOCAL v7 support. Merge from mainline */
+vfp_emit_fstmd (int base_reg, int count)
+{
+ rtx par;
+ rtx dwarf;
+ rtx tmp, reg;
+ int i;
+
+ /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
+ register pairs are stored by a store multiple insn. We avoid this
+ by pushing an extra pair. */
+ if (count == 2 && !arm_arch6)
+ {
+ if (base_reg == LAST_VFP_REGNUM - 3)
+ base_reg -= 2;
+ count++;
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* FSTMD may not store more than 16 doubleword registers at once. Split
+ larger stores into multiple parts (up to a maximum of two, in
+ practice). */
+ if (count > 16)
+ {
+ int saved;
+ /* NOTE: base_reg is an internal register number, so each D register
+ counts as 2. */
+ saved = vfp_emit_fstmd (base_reg + 32, count - 16);
+ saved += vfp_emit_fstmd (base_reg, 16);
+ return saved;
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
+
+ reg = gen_rtx_REG (DFmode, base_reg);
+ base_reg += 2;
+
+ XVECEXP (par, 0, 0)
+ = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (BLKmode,
+ gen_rtx_PRE_DEC (BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx_UNSPEC (BLKmode,
+ gen_rtvec (1, reg),
+ UNSPEC_PUSH_MULT));
+
+ tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ plus_constant (stack_pointer_rtx, -(count * 8)));
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, 0) = tmp;
+
+ tmp = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (DFmode, stack_pointer_rtx),
+ reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, 1) = tmp;
+
+ for (i = 1; i < count; i++)
+ {
+ reg = gen_rtx_REG (DFmode, base_reg);
+ base_reg += 2;
+ XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
+
+ tmp = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (DFmode,
+ plus_constant (stack_pointer_rtx,
+ i * 8)),
+ reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, i + 1) = tmp;
+ }
+
+ par = emit_insn (par);
+ REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+ REG_NOTES (par));
+ RTX_FRAME_RELATED_P (par) = 1;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ return count * 8;
+}
+
+
+/* Output a 'call' insn. */
+const char *
+output_call (rtx *operands)
+{
+ gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
+
+ /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
+ if (REGNO (operands[0]) == LR_REGNUM)
+ {
+ operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
+ output_asm_insn ("mov%?\t%0, %|lr", operands);
+ }
+
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+
+ if (TARGET_INTERWORK || arm_arch4t)
+ output_asm_insn ("bx%?\t%0", operands);
+ else
+ output_asm_insn ("mov%?\t%|pc, %0", operands);
+
+ return "";
+}
+
+/* Output a 'call' insn that is a reference in memory. */
+const char *
+output_call_mem (rtx *operands)
+{
+ if (TARGET_INTERWORK && !arm_arch5)
+ {
+ output_asm_insn ("ldr%?\t%|ip, %0", operands);
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("bx%?\t%|ip", operands);
+ }
+ else if (regno_use_in (LR_REGNUM, operands[0]))
+ {
+ /* LR is used in the memory address. We load the address in the
+ first instruction. It's safe to use IP as the target of the
+ load since the call will kill it anyway. */
+ output_asm_insn ("ldr%?\t%|ip, %0", operands);
+ if (arm_arch5)
+ output_asm_insn ("blx%?\t%|ip", operands);
+ else
+ {
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ if (arm_arch4t)
+ output_asm_insn ("bx%?\t%|ip", operands);
+ else
+ output_asm_insn ("mov%?\t%|pc, %|ip", operands);
+ }
+ }
+ else
+ {
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("ldr%?\t%|pc, %0", operands);
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpa registers.
+ OPERANDS[0] is an fpa register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+const char *
+output_mov_long_double_fpa_from_arm (rtx *operands)
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[3];
+
+ gcc_assert (arm_reg0 != IP_REGNUM);
+
+ ops[0] = gen_rtx_REG (SImode, arm_reg0);
+ ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
+ output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
+
+ return "";
+}
+
+/* Output a move from an fpa register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpa register. */
+const char *
+output_mov_long_double_arm_from_fpa (rtx *operands)
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[3];
+
+ gcc_assert (arm_reg0 != IP_REGNUM);
+
+ ops[0] = gen_rtx_REG (SImode, arm_reg0);
+ ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
+ return "";
+}
+
+/* Output a move from arm registers to arm registers of a long double
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source. */
+const char *
+output_mov_long_double_arm_from_arm (rtx *operands)
+{
+ /* We have to be careful here because the two might overlap. */
+ int dest_start = REGNO (operands[0]);
+ int src_start = REGNO (operands[1]);
+ rtx ops[2];
+ int i;
+
+ if (dest_start < src_start)
+ {
+ for (i = 0; i < 3; i++)
+ {
+ ops[0] = gen_rtx_REG (SImode, dest_start + i);
+ ops[1] = gen_rtx_REG (SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+ else
+ {
+ for (i = 2; i >= 0; i--)
+ {
+ ops[0] = gen_rtx_REG (SImode, dest_start + i);
+ ops[1] = gen_rtx_REG (SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpa registers.
+ OPERANDS[0] is an fpa register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+const char *
+output_mov_double_fpa_from_arm (rtx *operands)
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[2];
+
+ gcc_assert (arm_reg0 != IP_REGNUM);
+
+ ops[0] = gen_rtx_REG (SImode, arm_reg0);
+ ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1}", ops);
+ output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
+ return "";
+}
+
+/* Output a move from an fpa register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpa register. */
+const char *
+output_mov_double_arm_from_fpa (rtx *operands)
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[2];
+
+ gcc_assert (arm_reg0 != IP_REGNUM);
+
+ ops[0] = gen_rtx_REG (SImode, arm_reg0);
+ ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
+ output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1}", ops);
+ return "";
+}
+
+/* Output a move between double words.
+ It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
+ or MEM<-REG and all MEMs must be offsettable addresses. */
+const char *
+output_move_double (rtx *operands)
+{
+ enum rtx_code code0 = GET_CODE (operands[0]);
+ enum rtx_code code1 = GET_CODE (operands[1]);
+ rtx otherops[3];
+
+ if (code0 == REG)
+ {
+ int reg0 = REGNO (operands[0]);
+
+ otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
+
+ gcc_assert (code1 == MEM); /* Constraints should ensure this. */
+
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case REG:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+ break;
+
+ case PRE_INC:
+ gcc_assert (TARGET_LDRD);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
+ break;
+
+ case PRE_DEC:
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
+ else
+ output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ break;
+
+ case POST_INC:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
+ break;
+
+ case POST_DEC:
+ gcc_assert (TARGET_LDRD);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
+ break;
+
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
+ otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
+ {
+ if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
+ {
+ /* Registers overlap so split out the increment. */
+ output_asm_insn ("add%?\t%1, %1, %2", otherops);
+ output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
+ }
+ else
+ {
+ /* IWMMXT allows offsets larger than ARM ldrd can handle,
+ fix these up with a pair of ldr. */
+ if (TARGET_ARM && GET_CODE (otherops[2]) == CONST_INT
+ && (INTVAL(otherops[2]) <= -256
+ || INTVAL(otherops[2]) >= 256))
+ {
+ output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
+ otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
+ output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
+ }
+ else
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ }
+ }
+ else
+ {
+ /* IWMMXT allows offsets larger than ARM ldrd can handle,
+ fix these up with a pair of ldr. */
+ if (TARGET_ARM && GET_CODE (otherops[2]) == CONST_INT
+ && (INTVAL(otherops[2]) <= -256
+ || INTVAL(otherops[2]) >= 256))
+ {
+ otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
+ output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
+ otherops[0] = operands[0];
+ output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
+ }
+ else
+ /* We only allow constant increments, so this is safe. */
+ output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
+ break;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? This needs checking for thumb2. */
+ default:
+ if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
+ GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
+ {
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (operands[1], 0), 0);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ switch ((int) INTVAL (otherops[2]))
+ {
+ case -8:
+ output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
+ return "";
+ case -4:
+ if (TARGET_THUMB2)
+ break;
+ output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
+ return "";
+ case 4:
+ if (TARGET_THUMB2)
+ break;
+ output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
+ return "";
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+ if (TARGET_LDRD
+ && (GET_CODE (otherops[2]) == REG
+ || (GET_CODE (otherops[2]) == CONST_INT
+ && INTVAL (otherops[2]) > -256
+ && INTVAL (otherops[2]) < 256)))
+ {
+ if (reg_overlap_mentioned_p (otherops[0],
+ otherops[2]))
+ {
+ /* Swap base and index registers over to
+ avoid a conflict. */
+ otherops[1] = XEXP (XEXP (operands[1], 0), 1);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 0);
+ }
+ /* If both registers conflict, it will usually
+ have been fixed by a splitter. */
+ if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
+ {
+ output_asm_insn ("add%?\t%1, %1, %2", otherops);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldr%(d%)\t%0, [%1]",
+ otherops);
+ }
+ else
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
+ return "";
+ }
+
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ return "ldm%(ia%)\t%0, %M0";
+ }
+ else
+ {
+ otherops[1] = adjust_address (operands[1], SImode, 4);
+ /* Take care of overlapping base/data reg. */
+ if (reg_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Constraints should ensure this. */
+ gcc_assert (code0 == MEM && code1 == REG);
+ gcc_assert (REGNO (operands[1]) != IP_REGNUM);
+
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case REG:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ break;
+
+ case PRE_INC:
+ gcc_assert (TARGET_LDRD);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
+ break;
+
+ case PRE_DEC:
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
+ else
+ output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ break;
+
+ case POST_INC:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
+ break;
+
+ case POST_DEC:
+ gcc_assert (TARGET_LDRD);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
+ break;
+
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ otherops[0] = operands[1];
+ otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
+ otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* IWMMXT allows offsets larger than ARM ldrd can handle,
+ fix these up with a pair of ldr. */
+ if (TARGET_ARM && GET_CODE (otherops[2]) == CONST_INT
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ && (INTVAL(otherops[2]) <= -256
+ || INTVAL(otherops[2]) >= 256))
+ {
+ rtx reg1;
+ reg1 = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
+ if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
+ {
+ output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
+ otherops[0] = reg1;
+ output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
+ }
+ else
+ {
+ otherops[0] = reg1;
+ output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
+ otherops[0] = operands[1];
+ output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
+ }
+ }
+ else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
+ else
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
+ break;
+
+ case PLUS:
+ otherops[2] = XEXP (XEXP (operands[0], 0), 1);
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
+ {
+ case -8:
+ output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
+ return "";
+
+ case -4:
+ if (TARGET_THUMB2)
+ break;
+ output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
+ return "";
+
+ case 4:
+ if (TARGET_THUMB2)
+ break;
+ output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
+ return "";
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+ if (TARGET_LDRD
+ && (GET_CODE (otherops[2]) == REG
+ || (GET_CODE (otherops[2]) == CONST_INT
+ && INTVAL (otherops[2]) > -256
+ && INTVAL (otherops[2]) < 256)))
+ {
+ otherops[0] = operands[1];
+ otherops[1] = XEXP (XEXP (operands[0], 0), 0);
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
+ return "";
+ }
+ /* Fall through */
+
+ default:
+ otherops[0] = adjust_address (operands[0], SImode, 4);
+ otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%1, %0", otherops);
+ }
+ }
+
+ return "";
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Output a move, load or store for quad-word vectors in ARM registers. Only
+ handles MEMs accepted by neon_vector_mem_operand with CORE=true. */
+
+const char *
+output_move_quad (rtx *operands)
+{
+ if (REG_P (operands[0]))
+ {
+ /* Load, or reg->reg move. */
+
+ if (MEM_P (operands[1]))
+ {
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case REG:
+ output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ rtx ops[2];
+ int dest, src, i;
+
+ gcc_assert (REG_P (operands[1]));
+
+ dest = REGNO (operands[0]);
+ src = REGNO (operands[1]);
+
+ /* This seems pretty dumb, but hopefully GCC won't try to do it
+ very often. */
+ if (dest < src)
+ for (i = 0; i < 4; i++)
+ {
+ ops[0] = gen_rtx_REG (SImode, dest + i);
+ ops[1] = gen_rtx_REG (SImode, src + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ else
+ for (i = 3; i >= 0; i--)
+ {
+ ops[0] = gen_rtx_REG (SImode, dest + i);
+ ops[1] = gen_rtx_REG (SImode, src + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+ }
+ else
+ {
+ gcc_assert (MEM_P (operands[0]));
+ gcc_assert (REG_P (operands[1]));
+ gcc_assert (!reg_overlap_mentioned_p (operands[1], operands[0]));
+
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case REG:
+ output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ return "";
+}
+
+/* Output a VFP load or store instruction. */
+
+const char *
+output_move_vfp (rtx *operands)
+{
+ rtx reg, mem, addr, ops[2];
+ int load = REG_P (operands[0]);
+ int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
+ int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
+ const char *template;
+ char buff[50];
+ enum machine_mode mode;
+
+ reg = operands[!load];
+ mem = operands[load];
+
+ mode = GET_MODE (reg);
+
+ gcc_assert (REG_P (reg));
+ gcc_assert (IS_VFP_REGNUM (REGNO (reg)));
+ gcc_assert (mode == SFmode
+ || mode == DFmode
+ || mode == SImode
+ || mode == DImode
+ || (TARGET_NEON && VALID_NEON_DREG_MODE (mode)));
+ gcc_assert (MEM_P (mem));
+
+ addr = XEXP (mem, 0);
+
+ switch (GET_CODE (addr))
+ {
+ case PRE_DEC:
+ template = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
+ ops[0] = XEXP (addr, 0);
+ ops[1] = reg;
+ break;
+
+ case POST_INC:
+ template = "f%smia%c%%?\t%%0!, {%%%s1}%s";
+ ops[0] = XEXP (addr, 0);
+ ops[1] = reg;
+ break;
+
+ default:
+ template = "f%s%c%%?\t%%%s0, %%1%s";
+ ops[0] = reg;
+ ops[1] = mem;
+ break;
+ }
+
+ sprintf (buff, template,
+ load ? "ld" : "st",
+ dp ? 'd' : 's',
+ dp ? "P" : "",
+ integer_p ? "\t%@ int" : "");
+ output_asm_insn (buff, ops);
+
+ return "";
+}
+
+/* Output a Neon quad-word load or store, or a load or store for
+ larger structure modes. We could also support post-modify
+ forms using VLD1/VST1, but we don't do that yet.
+ WARNING, FIXME: The ordering of elements in memory is going to be weird in
+ big-endian mode at present, because we use VSTM instead of VST1, to make
+ it easy to make vector stores via ARM registers write values in the same
+ order as stores direct from Neon registers. For example, the byte ordering
+ of a quadword vector with 16-byte elements like this:
+
+ [e7:e6:e5:e4:e3:e2:e1:e0] (highest-numbered element first)
+
+ will be (with lowest address first, h = most-significant byte,
+ l = least-significant byte of element):
+
+ [e3h, e3l, e2h, e2l, e1h, e1l, e0h, e0l,
+ e7h, e7l, e6h, e6l, e5h, e5l, e4h, e4l]
+
+ When necessary, quadword registers (dN, dN+1) are moved to ARM registers from
+ rN in the order:
+
+ dN -> (rN+1, rN), dN+1 -> (rN+3, rN+2)
+
+ So that STM/LDM can be used on vectors in ARM registers, and the same memory
+ layout will result as if VSTM/VLDM were used.
+
+ This memory format (in BE mode) is very likely to change in the future. */
+
+const char *
+output_move_neon (rtx *operands)
+{
+ rtx reg, mem, addr, ops[2];
+ int regno, load = REG_P (operands[0]);
+ const char *template;
+ char buff[50];
+ enum machine_mode mode;
+
+ reg = operands[!load];
+ mem = operands[load];
+
+ mode = GET_MODE (reg);
+
+ gcc_assert (REG_P (reg));
+ regno = REGNO (reg);
+ gcc_assert (VFP_REGNO_OK_FOR_DOUBLE (regno)
+ || NEON_REGNO_OK_FOR_QUAD (regno));
+ gcc_assert (VALID_NEON_DREG_MODE (mode)
+ || VALID_NEON_QREG_MODE (mode)
+ || VALID_NEON_STRUCT_MODE (mode));
+ gcc_assert (MEM_P (mem));
+
+ addr = XEXP (mem, 0);
+
+ /* Strip off const from addresses like (const (plus (...))). */
+ if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS)
+ addr = XEXP (addr, 0);
+
+ switch (GET_CODE (addr))
+ {
+ case POST_INC:
+ /* FIXME: We should be using vld1/vst1 here in BE mode? */
+ template = "v%smia%%?\t%%0!, %%h1";
+ ops[0] = XEXP (addr, 0);
+ ops[1] = reg;
+ break;
+
+ case POST_MODIFY:
+ /* FIXME: Not currently enabled in neon_vector_mem_operand. */
+ gcc_unreachable ();
+
+ case LABEL_REF:
+ case PLUS:
+ {
+ int nregs = HARD_REGNO_NREGS (REGNO (reg), mode) / 2;
+ int i;
+ int overlap = -1;
+ for (i = 0; i < nregs; i++)
+ {
+ /* We're only using DImode here because it's a convenient size.
+ FIXME: This will need updating if the memory format of vectors
+ changes. */
+ ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * i);
+ ops[1] = adjust_address (mem, SImode, 8 * i);
+ if (reg_overlap_mentioned_p (ops[0], mem))
+ {
+ gcc_assert (overlap == -1);
+ overlap = i;
+ }
+ else
+ {
+ sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
+ output_asm_insn (buff, ops);
+ }
+ }
+ if (overlap != -1)
+ {
+ ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * overlap);
+ ops[1] = adjust_address (mem, SImode, 8 * overlap);
+ sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
+ output_asm_insn (buff, ops);
+ }
+
+ return "";
+ }
+
+ default:
+ /* FIXME: See POST_INC. */
+ template = "v%smia%%?\t%%m0, %%h1";
+ ops[0] = mem;
+ ops[1] = reg;
+ }
+
+ sprintf (buff, template, load ? "ld" : "st");
+ output_asm_insn (buff, ops);
+
+ return "";
+}
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* Output an ADD r, s, #n where n may be too big for one instruction.
+ If adding zero to one register, output nothing. */
+const char *
+output_add_immediate (rtx *operands)
+{
+ HOST_WIDE_INT n = INTVAL (operands[2]);
+
+ if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (n < 0)
+ output_multi_immediate (operands,
+ "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
+ -n);
+ else
+ output_multi_immediate (operands,
+ "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
+ n);
+ }
+
+ return "";
+}
+
+/* Output a multiple immediate operation.
+ OPERANDS is the vector of operands referred to in the output patterns.
+ INSTR1 is the output pattern to use for the first constant.
+ INSTR2 is the output pattern to use for subsequent constants.
+ IMMED_OP is the index of the constant slot in OPERANDS.
+ N is the constant value. */
+static const char *
+output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
+ int immed_op, HOST_WIDE_INT n)
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ n &= 0xffffffff;
+#endif
+
+ if (n == 0)
+ {
+ /* Quick and easy output. */
+ operands[immed_op] = const0_rtx;
+ output_asm_insn (instr1, operands);
+ }
+ else
+ {
+ int i;
+ const char * instr = instr1;
+
+ /* Note that n is never zero here (which would give no output). */
+ for (i = 0; i < 32; i += 2)
+ {
+ if (n & (3 << i))
+ {
+ operands[immed_op] = GEN_INT (n & (255 << i));
+ output_asm_insn (instr, operands);
+ instr = instr2;
+ i += 6;
+ }
+ }
+ }
+
+ return "";
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Return the name of a shifter operation. */
+static const char *
+arm_shift_nmem(enum rtx_code code)
+{
+ switch (code)
+ {
+ case ASHIFT:
+ return ARM_LSL_NAME;
+
+ case ASHIFTRT:
+ return "asr";
+
+ case LSHIFTRT:
+ return "lsr";
+
+ case ROTATERT:
+ return "ror";
+
+ default:
+ abort();
+ }
+}
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Return the appropriate ARM instruction for the operation code.
+ The returned result should not be overwritten. OP is the rtx of the
+ operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
+ was shifted. */
+const char *
+arithmetic_instr (rtx op, int shift_first_arg)
+{
+ switch (GET_CODE (op))
+ {
+ case PLUS:
+ return "add";
+
+ case MINUS:
+ return shift_first_arg ? "rsb" : "sub";
+
+ case IOR:
+ return "orr";
+
+ case XOR:
+ return "eor";
+
+ case AND:
+ return "and";
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATERT:
+ return arm_shift_nmem(GET_CODE(op));
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Ensure valid constant shifts and return the appropriate shift mnemonic
+ for the operation code. The returned result should not be overwritten.
+ OP is the rtx code of the shift.
+ On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
+ shift. */
+static const char *
+shift_op (rtx op, HOST_WIDE_INT *amountp)
+{
+ const char * mnem;
+ enum rtx_code code = GET_CODE (op);
+
+ switch (GET_CODE (XEXP (op, 1)))
+ {
+ case REG:
+ case SUBREG:
+ *amountp = -1;
+ break;
+
+ case CONST_INT:
+ *amountp = INTVAL (XEXP (op, 1));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ switch (code)
+ {
+ case ROTATE:
+ gcc_assert (*amountp != -1);
+ *amountp = 32 - *amountp;
+ code = ROTATERT;
+
+ /* Fall through. */
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATERT:
+ mnem = arm_shift_nmem(code);
+ break;
+
+ case MULT:
+ /* We never have to worry about the amount being other than a
+ power of 2, since this case can never be reloaded from a reg. */
+ gcc_assert (*amountp != -1);
+ *amountp = int_log2 (*amountp);
+ return ARM_LSL_NAME;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (*amountp != -1)
+ {
+ /* This is not 100% correct, but follows from the desire to merge
+ multiplication by a power of 2 with the recognizer for a
+ shift. >=32 is not a valid shift for "lsl", so we must try and
+ output a shift that produces the correct arithmetical result.
+ Using lsr #32 is identical except for the fact that the carry bit
+ is not set correctly if we set the flags; but we never use the
+ carry bit from such an operation, so we can ignore that. */
+ if (code == ROTATERT)
+ /* Rotate is just modulo 32. */
+ *amountp &= 31;
+ else if (*amountp != (*amountp & 31))
+ {
+ if (code == ASHIFT)
+ mnem = "lsr";
+ *amountp = 32;
+ }
+
+ /* Shifts of 0 are no-ops. */
+ if (*amountp == 0)
+ return NULL;
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ return mnem;
+}
+
+/* Obtain the shift from the POWER of two. */
+
+static HOST_WIDE_INT
+int_log2 (HOST_WIDE_INT power)
+{
+ HOST_WIDE_INT shift = 0;
+
+ while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
+ {
+ gcc_assert (shift <= 31);
+ shift++;
+ }
+
+ return shift;
+}
+
+/* Output a .ascii pseudo-op, keeping track of lengths. This is
+ because /bin/as is horribly restrictive. The judgement about
+ whether or not each character is 'printable' (and can be output as
+ is) or not (and must be printed with an octal escape) must be made
+ with reference to the *host* character set -- the situation is
+ similar to that discussed in the comments above pp_c_char in
+ c-pretty-print.c. */
+
+#define MAX_ASCII_LEN 51
+
+void
+output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
+{
+ int i;
+ int len_so_far = 0;
+
+ fputs ("\t.ascii\t\"", stream);
+
+ for (i = 0; i < len; i++)
+ {
+ int c = p[i];
+
+ if (len_so_far >= MAX_ASCII_LEN)
+ {
+ fputs ("\"\n\t.ascii\t\"", stream);
+ len_so_far = 0;
+ }
+
+ if (ISPRINT (c))
+ {
+ if (c == '\\' || c == '\"')
+ {
+ putc ('\\', stream);
+ len_so_far++;
+ }
+ putc (c, stream);
+ len_so_far++;
+ }
+ else
+ {
+ fprintf (stream, "\\%03o", c);
+ len_so_far += 4;
+ }
+ }
+
+ fputs ("\"\n", stream);
+}
+
+/* Compute the register save mask for registers 0 through 12
+ inclusive. This code is used by arm_compute_save_reg_mask. */
+
+static unsigned long
+arm_compute_save_reg0_reg12_mask (void)
+{
+ unsigned long func_type = arm_current_func_type ();
+ unsigned long save_reg_mask = 0;
+ unsigned int reg;
+
+ if (IS_INTERRUPT (func_type))
+ {
+ unsigned int max_reg;
+ /* Interrupt functions must not corrupt any registers,
+ even call clobbered ones. If this is a leaf function
+ we can just examine the registers used by the RTL, but
+ otherwise we have to assume that whatever function is
+ called might clobber anything, and so we have to save
+ all the call-clobbered registers as well. */
+ if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
+ /* FIQ handlers have registers r8 - r12 banked, so
+ we only need to check r0 - r7, Normal ISRs only
+ bank r14 and r15, so we must check up to r12.
+ r13 is the stack pointer which is always preserved,
+ so we do not need to consider it here. */
+ max_reg = 7;
+ else
+ max_reg = 12;
+
+ for (reg = 0; reg <= max_reg; reg++)
+ if (regs_ever_live[reg]
+ || (! current_function_is_leaf && call_used_regs [reg]))
+ save_reg_mask |= (1 << reg);
+
+ /* Also save the pic base register if necessary. */
+ if (flag_pic
+ && !TARGET_SINGLE_PIC_BASE
+ && arm_pic_register != INVALID_REGNUM
+ && current_function_uses_pic_offset_table)
+ save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
+ }
+ else
+ {
+ /* APPLE LOCAL begin ARM custom frame layout */
+ /* In the normal case we only need to save those registers
+ which are call saved and which are used by this function. */
+ for (reg = 0; reg <= 11; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs [reg])
+ save_reg_mask |= (1 << reg);
+
+ /* Handle the frame pointer as a special case. */
+ if (frame_pointer_needed)
+ save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
+ /* APPLE LOCAL end ARM use custom frame layout */
+
+ /* If we aren't loading the PIC register,
+ don't stack it even though it may be live. */
+ if (flag_pic
+ && !TARGET_SINGLE_PIC_BASE
+ && arm_pic_register != INVALID_REGNUM
+ && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
+ || current_function_uses_pic_offset_table))
+ save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* The prologue will copy SP into R0, so save it. */
+ if (IS_STACKALIGN (func_type))
+ save_reg_mask |= 1;
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+
+ /* Save registers so the exception handler can modify them. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i;
+
+ for (i = 0; ; i++)
+ {
+ reg = EH_RETURN_DATA_REGNO (i);
+ if (reg == INVALID_REGNUM)
+ break;
+ save_reg_mask |= 1 << reg;
+ }
+ }
+
+ return save_reg_mask;
+}
+
+/* Compute a bit mask of which registers need to be
+ saved on the stack for the current function. */
+
+static unsigned long
+arm_compute_save_reg_mask (void)
+{
+ unsigned int save_reg_mask = 0;
+ unsigned long func_type = arm_current_func_type ();
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ unsigned int reg;
+
+ if (IS_NAKED (func_type))
+ /* This should never really happen. */
+ return 0;
+
+ /* APPLE LOCAL begin ARM use custom frame layout */
+ /* Volatile functions do not return, so there
+ is no need to save any other registers. */
+ if (!IS_VOLATILE (func_type))
+ save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
+ /* APPLE LOCAL end ARM use custom frame layout */
+
+ /* Decide if we need to save the link register.
+ Interrupt routines have their own banked link register,
+ so they never need to save it.
+ Otherwise if we do not use the link register we do not need to save
+ it. If we are pushing other registers onto the stack however, we
+ can save an instruction in the epilogue by pushing the link register
+ now and then popping it back into the PC. This incurs extra memory
+ accesses though, so we only do it when optimizing for size, and only
+ if we know that we will not need a fancy return sequence. */
+ if (regs_ever_live [LR_REGNUM]
+ || (save_reg_mask
+ && optimize_size
+ && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
+ && !current_function_calls_eh_return))
+ save_reg_mask |= 1 << LR_REGNUM;
+
+ if (cfun->machine->lr_save_eliminated)
+ save_reg_mask &= ~ (1 << LR_REGNUM);
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ if (frame_pointer_needed)
+ save_reg_mask |= (1 << LR_REGNUM | 1 << HARD_FRAME_POINTER_REGNUM);
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ if (TARGET_REALLY_IWMMXT
+ /* APPLE LOCAL ARM custom frame layout */
+ && (!IS_VOLATILE (func_type))
+ && ((bit_count (save_reg_mask)
+ + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* moved definition of 'reg' to function level scope */
+ /* The total number of registers that are going to be pushed
+ onto the stack is odd. We need to ensure that the stack
+ is 64-bit aligned before we start to save iWMMXt registers,
+ and also before we start to create locals. (A local variable
+ might be a double or long long which we will load/store using
+ an iWMMXt instruction). Therefore we need to push another
+ ARM register, so that the stack will be 64-bit aligned. We
+ try to avoid using the arg registers (r0 -r3) as they might be
+ used to pass values in a tail call. */
+ for (reg = 4; reg <= 12; reg++)
+ if ((save_reg_mask & (1 << reg)) == 0)
+ break;
+
+ if (reg <= 12)
+ save_reg_mask |= (1 << reg);
+ else
+ {
+ cfun->machine->sibcall_blocked = 1;
+ save_reg_mask |= (1 << 3);
+ }
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* We may need to push an additional register for use initializing the
+ PIC base register. */
+ if (TARGET_THUMB2 && IS_NESTED (func_type) && flag_pic
+ && (save_reg_mask & THUMB2_WORK_REGS) == 0)
+ {
+ reg = thumb_find_work_register (1 << 4);
+ if (!call_used_regs[reg])
+ save_reg_mask |= (1 << reg);
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ return save_reg_mask;
+}
+
+
+/* Compute a bit mask of which registers need to be
+ saved on the stack for the current function. */
+static unsigned long
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_compute_save_reg_mask (void)
+{
+ unsigned long mask;
+ unsigned reg;
+
+ mask = 0;
+ for (reg = 0; reg < 12; reg ++)
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ mask |= 1 << reg;
+
+ /* APPLE LOCAL begin ARM thumb requires FP */
+ if (frame_pointer_needed)
+ mask |= 1 << THUMB_HARD_FRAME_POINTER_REGNUM;
+ /* APPLE LOCAL end ARM thumb requires FP */
+
+ if (flag_pic
+ && !TARGET_SINGLE_PIC_BASE
+ && arm_pic_register != INVALID_REGNUM
+ && current_function_uses_pic_offset_table)
+ mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
+
+ /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
+ if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
+ mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
+
+ /* LR will also be pushed if any lo regs are pushed. */
+ if (mask & 0xff || thumb_force_lr_save ())
+ mask |= (1 << LR_REGNUM);
+
+ /* Make sure we have a low work register if we need one.
+ We will need one if we are going to push a high register,
+ but we are not currently intending to push a low register. */
+ if ((mask & 0xff) == 0
+ && ((mask & 0x0f00) || TARGET_BACKTRACE))
+ {
+ /* Use thumb_find_work_register to choose which register
+ we will use. If the register is live then we will
+ have to push it. Use LAST_LO_REGNUM as our fallback
+ choice for the register to select. */
+ /* APPLE LOCAL ARM thumb requires FP */
+ reg = thumb_find_work_register (1 << (LAST_LO_REGNUM - 1));
+
+ if (! call_used_regs[reg])
+ mask |= 1 << reg;
+ }
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ /* Also need a scratch register in the case where the frame size is
+ too big for the subtract instruction. This is not exactly the right
+ computation for frame size, there's a circular dependency on which
+ registers get saved, but it should catch most of the problem cases
+ and there is (very inefficient) code to handle the rare case where
+ we didn't allocate a scratch reg and need one. */
+ if (frame_pointer_needed && ((mask & 0x70) == 0)
+ && (ROUND_UP_WORD (get_frame_size ())
+ + current_function_outgoing_args_size) >= 512)
+ mask |= 1 << (LAST_LO_REGNUM - 1);
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ return mask;
+}
+
+
+/* Return the number of bytes required to save VFP registers. */
+static int
+arm_get_vfp_saved_size (void)
+{
+ unsigned int regno;
+ int count;
+ int saved;
+
+ saved = 0;
+ /* Space for saved VFP registers. */
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ {
+ count = 0;
+ for (regno = FIRST_VFP_REGNUM;
+ regno < LAST_VFP_REGNUM;
+ regno += 2)
+ {
+ if ((!regs_ever_live[regno] || call_used_regs[regno])
+ && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
+ {
+ if (count > 0)
+ {
+ /* Workaround ARM10 VFPr1 bug. */
+ if (count == 2 && !arm_arch6)
+ count++;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ saved += count * 8;
+ }
+ count = 0;
+ }
+ else
+ count++;
+ }
+ if (count > 0)
+ {
+ if (count == 2 && !arm_arch6)
+ count++;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ saved += count * 8;
+ }
+ }
+ return saved;
+}
+
+
+/* Generate a function exit sequence. If REALLY_RETURN is false, then do
+ everything bar the final return instruction. */
+const char *
+output_return_instruction (rtx operand, int really_return, int reverse)
+{
+ char conditional[10];
+ char instr[100];
+ unsigned reg;
+ unsigned long live_regs_mask;
+ unsigned long func_type;
+ arm_stack_offsets *offsets;
+
+ func_type = arm_current_func_type ();
+
+ if (IS_NAKED (func_type))
+ return "";
+
+ if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
+ {
+ /* If this function was declared non-returning, and we have
+ found a tail call, then we have to trust that the called
+ function won't return. */
+ if (really_return)
+ {
+ rtx ops[2];
+
+ /* Otherwise, trap an attempted return by aborting. */
+ ops[0] = operand;
+ ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
+ : "abort");
+ assemble_external_libcall (ops[1]);
+ output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
+ }
+
+ return "";
+ }
+
+ gcc_assert (!current_function_calls_alloca || really_return);
+
+ sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
+
+ return_used_this_function = 1;
+
+ live_regs_mask = arm_compute_save_reg_mask ();
+
+ if (live_regs_mask)
+ {
+ const char * return_reg;
+
+ /* If we do not have any special requirements for function exit
+ (e.g. interworking, or ISR) then we can load the return address
+ directly into the PC. Otherwise we must load it into LR. */
+ if (really_return
+ /* APPLE LOCAL ARM interworking */
+ && (! TARGET_INTERWORK || arm_arch5))
+ return_reg = reg_names[PC_REGNUM];
+ else
+ return_reg = reg_names[LR_REGNUM];
+
+ if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
+ {
+ /* There are three possible reasons for the IP register
+ being saved. 1) a stack frame was created, in which case
+ IP contains the old stack pointer, or 2) an ISR routine
+ corrupted it, or 3) it was saved to align the stack on
+ iWMMXt. In case 1, restore IP into SP, otherwise just
+ restore IP. */
+ if (frame_pointer_needed)
+ {
+ live_regs_mask &= ~ (1 << IP_REGNUM);
+ live_regs_mask |= (1 << SP_REGNUM);
+ }
+ else
+ gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
+ }
+
+ /* On some ARM architectures it is faster to use LDR rather than
+ LDM to load a single register. On other architectures, the
+ cost is the same. In 26 bit mode, or for exception handlers,
+ we have to use LDM to load the PC so that the CPSR is also
+ restored. */
+ for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
+ if (live_regs_mask == (1U << reg))
+ break;
+
+ if (reg <= LAST_ARM_REGNUM
+ && (reg != LR_REGNUM
+ || ! really_return
+ || ! IS_INTERRUPT (func_type)))
+ {
+ sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
+ (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
+ }
+ else
+ {
+ char *p;
+ int first = 1;
+
+ /* Generate the load multiple instruction to restore the
+ registers. Note we can get here, even if
+ frame_pointer_needed is true, but only if sp already
+ points to the base of the saved core registers. */
+ if (live_regs_mask & (1 << SP_REGNUM))
+ {
+ unsigned HOST_WIDE_INT stack_adjust;
+
+ offsets = arm_get_frame_offsets ();
+ stack_adjust = offsets->outgoing_args - offsets->saved_regs;
+ gcc_assert (stack_adjust == 0 || stack_adjust == 4);
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (stack_adjust && arm_arch5 && TARGET_ARM)
+ sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
+ else
+ {
+ /* If we can't use ldmib (SA110 bug),
+ then try to pop r3 instead. */
+ if (stack_adjust)
+ live_regs_mask |= 1 << 3;
+ sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
+ }
+ }
+ else
+ sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
+
+ p = instr + strlen (instr);
+
+ for (reg = 0; reg <= SP_REGNUM; reg++)
+ if (live_regs_mask & (1 << reg))
+ {
+ int l = strlen (reg_names[reg]);
+
+ if (first)
+ first = 0;
+ else
+ {
+ memcpy (p, ", ", 2);
+ p += 2;
+ }
+
+ memcpy (p, "%|", 2);
+ memcpy (p + 2, reg_names[reg], l);
+ p += l + 2;
+ }
+
+ if (live_regs_mask & (1 << LR_REGNUM))
+ {
+ sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
+ /* If returning from an interrupt, restore the CPSR. */
+ if (IS_INTERRUPT (func_type))
+ strcat (p, "^");
+ }
+ else
+ strcpy (p, "}");
+ }
+
+ output_asm_insn (instr, & operand);
+
+ /* See if we need to generate an extra instruction to
+ perform the actual function return. */
+ if (really_return
+ && func_type != ARM_FT_INTERWORKED
+ && (live_regs_mask & (1 << LR_REGNUM)) != 0)
+ {
+ /* The return has already been handled
+ by loading the LR into the PC. */
+ really_return = 0;
+ }
+ }
+
+ if (really_return)
+ {
+ switch ((int) ARM_FUNC_TYPE (func_type))
+ {
+ case ARM_FT_ISR:
+ case ARM_FT_FIQ:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? This is wrong for unified assembly syntax. */
+ sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
+ break;
+
+ case ARM_FT_INTERWORKED:
+ sprintf (instr, "bx%s\t%%|lr", conditional);
+ break;
+
+ case ARM_FT_EXCEPTION:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? This is wrong for unified assembly syntax. */
+ sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
+ break;
+
+ default:
+ /* Use bx if it's available. */
+ if (arm_arch5 || arm_arch4t)
+ sprintf (instr, "bx%s\t%%|lr", conditional);
+ else
+ sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
+ break;
+ }
+
+ output_asm_insn (instr, & operand);
+ }
+
+ return "";
+}
+
+/* Write the function name into the code section, directly preceding
+ the function prologue.
+
+ Code will be output similar to this:
+ t0
+ .ascii "arm_poke_function_name", 0
+ .align
+ t1
+ .word 0xff000000 + (t1 - t0)
+ arm_poke_function_name
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+
+ When performing a stack backtrace, code can inspect the value
+ of 'pc' stored at 'fp' + 0. If the trace function then looks
+ at location pc - 12 and the top 8 bits are set, then we know
+ that there is a function name embedded immediately preceding this
+ location and has length ((pc[-3]) & 0xff000000).
+
+ We assume that pc is declared as a pointer to an unsigned long.
+
+ It is of no benefit to output the function name if we are assembling
+ a leaf function. These function types will not contain a stack
+ backtrace structure, therefore it is not possible to determine the
+ function name. */
+void
+arm_poke_function_name (FILE *stream, const char *name)
+{
+ unsigned long alignlength;
+ unsigned long length;
+ rtx x;
+
+ length = strlen (name) + 1;
+ alignlength = ROUND_UP_WORD (length);
+
+ ASM_OUTPUT_ASCII (stream, name, length);
+ ASM_OUTPUT_ALIGN (stream, 2);
+ x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
+ assemble_aligned_integer (UNITS_PER_WORD, x);
+}
+
+/* Place some comments into the assembler stream
+ describing the current function. */
+static void
+arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
+{
+ unsigned long func_type;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ thumb1_output_function_prologue (f, frame_size);
+ return;
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ /* Sanity check. */
+ gcc_assert (!arm_ccfsm_state && !arm_target_insn);
+
+ func_type = arm_current_func_type ();
+
+ switch ((int) ARM_FUNC_TYPE (func_type))
+ {
+ default:
+ case ARM_FT_NORMAL:
+ break;
+ case ARM_FT_INTERWORKED:
+ asm_fprintf (f, "\t%@ Function supports interworking.\n");
+ break;
+ case ARM_FT_ISR:
+ asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
+ break;
+ case ARM_FT_FIQ:
+ asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
+ break;
+ case ARM_FT_EXCEPTION:
+ asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
+ break;
+ }
+
+ if (IS_NAKED (func_type))
+ asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
+
+ if (IS_VOLATILE (func_type))
+ asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
+
+ if (IS_NESTED (func_type))
+ asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (IS_STACKALIGN (func_type))
+ asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
+ current_function_args_size,
+ current_function_pretend_args_size, frame_size);
+
+ asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
+ frame_pointer_needed,
+ cfun->machine->uses_anonymous_args);
+
+ if (cfun->machine->lr_save_eliminated)
+ asm_fprintf (f, "\t%@ link register save eliminated.\n");
+
+ if (current_function_calls_eh_return)
+ asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
+
+#ifdef AOF_ASSEMBLER
+ if (flag_pic)
+ asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
+#endif
+
+ return_used_this_function = 0;
+}
+
+/* APPLE LOCAL begin 6902937 out of order VFP restore */
+static void
+arm_output_epilogue_vfp_restore (void)
+{
+ int reg, start_reg;
+ FILE * f = asm_out_file;
+ start_reg = LAST_VFP_REGNUM - 1;
+ for (reg = LAST_VFP_REGNUM - 1 ; reg >= FIRST_VFP_REGNUM; reg -= 2)
+ {
+ if ((!regs_ever_live[reg] || call_used_regs[reg])
+ && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
+ {
+ if (start_reg != reg)
+ vfp_output_fldmd (f, SP_REGNUM,
+ (reg - FIRST_VFP_REGNUM) / 2 + 1,
+ (start_reg - reg) / 2);
+ start_reg = reg - 2;
+ }
+ }
+ if (start_reg != reg)
+ vfp_output_fldmd (f, SP_REGNUM,
+ (reg - FIRST_VFP_REGNUM + 2) / 2 + 1,
+ (start_reg - reg) / 2);
+}
+/* APPLE LOCAL end 6902937 out of order VFP restore */
+
+const char *
+arm_output_epilogue (rtx sibling)
+{
+ int reg;
+ unsigned long saved_regs_mask;
+ unsigned long func_type;
+ /* Floats_offset is the offset from the "virtual" frame. In an APCS
+ frame that is $fp + 4 for a non-variadic function. */
+ int floats_offset = 0;
+ rtx operands[3];
+ FILE * f = asm_out_file;
+ unsigned int lrm_count = 0;
+ int really_return = (sibling == NULL);
+ int start_reg;
+ arm_stack_offsets *offsets;
+ /* APPLE LOCAL 6196857 use pop for thumb-2 epilogue */
+ const char *pop_insn;
+
+ /* If we have already generated the return instruction
+ then it is futile to generate anything else. */
+ if (use_return_insn (FALSE, sibling) && return_used_this_function)
+ return "";
+
+ func_type = arm_current_func_type ();
+
+ if (IS_NAKED (func_type))
+ /* Naked functions don't have epilogues. */
+ return "";
+
+ if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
+ {
+ rtx op;
+
+ /* A volatile function should never return. Call abort. */
+ op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
+ assemble_external_libcall (op);
+ output_asm_insn ("bl\t%a0", &op);
+
+ return "";
+ }
+
+ /* If we are throwing an exception, then we really must be doing a
+ return, so we can't tail-call. */
+ gcc_assert (!current_function_calls_eh_return || really_return);
+
+ offsets = arm_get_frame_offsets ();
+ saved_regs_mask = arm_compute_save_reg_mask ();
+
+ if (TARGET_IWMMXT)
+ lrm_count = bit_count (saved_regs_mask);
+
+ floats_offset = offsets->saved_args;
+ /* Compute how far away the floats will be. */
+ for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
+ if (saved_regs_mask & (1 << reg))
+ floats_offset += 4;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (frame_pointer_needed && TARGET_32BIT)
+ {
+ /* This variable is for the Virtual Frame Pointer, not VFP regs. */
+ int vfp_offset = offsets->frame;
+ /* APPLE LOCAL begin ARM custom frame layout */
+ unsigned long regs_above_fp =
+ inclusive_bitmask (ARM_HARD_FRAME_POINTER_REGNUM + 1, 11);
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
+ {
+ for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ floats_offset += 12;
+ asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
+ reg, FP_REGNUM, floats_offset - vfp_offset);
+ }
+ }
+ else
+ {
+ start_reg = LAST_FPA_REGNUM;
+
+ for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
+ {
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ floats_offset += 12;
+
+ /* We can't unstack more than four registers at once. */
+ if (start_reg - reg == 3)
+ {
+ asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
+ reg, FP_REGNUM, floats_offset - vfp_offset);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
+ reg + 1, start_reg - reg,
+ FP_REGNUM, floats_offset - vfp_offset);
+ start_reg = reg - 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
+ reg + 1, start_reg - reg,
+ FP_REGNUM, floats_offset - vfp_offset);
+ }
+
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ {
+ int saved_size;
+
+ /* APPLE LOCAL begin 4809156 */
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* The fldmd insns do not have base+offset addressing modes,
+ so we use SP to hold the address. (IP might have a
+ live value in it for indirect sibcalls, can't use that.) */
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ saved_size = arm_get_vfp_saved_size ();
+
+ if (saved_size > 0)
+ {
+ floats_offset += saved_size;
+ asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM,
+ FP_REGNUM, floats_offset - vfp_offset);
+ }
+ /* APPLE LOCAL 6902937 out of order VFP restore */
+ arm_output_epilogue_vfp_restore ();
+ /* APPLE LOCAL end 4809156 */
+ }
+
+ if (TARGET_IWMMXT)
+ {
+ /* The frame pointer is guaranteed to be non-double-word aligned.
+ This is because it is set to (old_stack_pointer - 4) and the
+ old_stack_pointer was double word aligned. Thus the offset to
+ the iWMMXt registers to be loaded must also be non-double-word
+ sized, so that the resultant address *is* double-word aligned.
+ We can ignore floats_offset since that was already included in
+ the live_regs_mask. */
+ lrm_count += (lrm_count % 2 ? 2 : 1);
+
+ for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
+ reg, FP_REGNUM, lrm_count * 4);
+ lrm_count += 2;
+ }
+ }
+
+ /* APPLE LOCAL ARM custom frame layout */
+ /* Removed lines. */
+
+ /* APPLE LOCAL begin ARM indirect sibcalls */
+ /* If we have an indirect sibcall that uses a reg saved across calls, that reg will
+ be clobbered when we pop the old value off the stack. Copy the value to IP
+ before doing the pop. */
+ if (sibling)
+ {
+ bool is_value;
+ int regno = indirect_sibreturn_reg (sibling, &is_value);
+ if (regno > 3 && regno != 12)
+ {
+ if (is_value)
+ XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ else
+ XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, regno);
+ }
+ if (regno == -1)
+ {
+ rtx stack_reg, offset;
+ offset = indirect_sibreturn_mem (sibling, &stack_reg, &is_value);
+ if (offset)
+ {
+ if (is_value)
+ XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ else
+ XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ asm_fprintf (f, "\tldr\t%r, [%r, #%wd]\n", IP_REGNUM,
+ REGNO (stack_reg), INTVAL (offset));
+ }
+ }
+ }
+ /* APPLE LOCAL end ARM indirect sibcalls */
+
+ /* We must use SP as the base register, because SP is one of the
+ registers being restored. If an interrupt or page fault
+ happens in the ldm instruction, the SP might or might not
+ have been restored. That would be bad, as then SP will no
+ longer indicate the safe area of stack, and we can get stack
+ corruption. Using SP as the base register means that it will
+ be reset correctly to the original value, should an interrupt
+ occur. If the stack pointer already points at the right
+ place, then omit the subtraction. */
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ if ((offsets->outgoing_args - offsets->saved_args
+ != (signed) bit_count (saved_regs_mask) * 4)
+ || ! current_function_sp_is_unchanging)
+ /* FP points 8 bytes into the frame. */
+ asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
+ (bit_count (saved_regs_mask) - 2) * 4);
+
+ /* If we can, restore the LR into the PC. */
+ if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
+ && really_return
+ && current_function_pretend_args_size == 0
+ && saved_regs_mask & (1 << LR_REGNUM)
+ && !current_function_calls_eh_return)
+ {
+ saved_regs_mask &= ~ (1 << LR_REGNUM);
+ saved_regs_mask |= (1 << PC_REGNUM);
+ }
+
+ /* We mustn't be trying to restore SP from the stack. */
+ gcc_assert (! (saved_regs_mask & (1 << SP_REGNUM)));
+
+ /* APPLE LOCAL 6196857 begin use pop for thumb-2 epilogue */
+ if (TARGET_ARM)
+ pop_insn = "ldmfd\t%r!, ";
+ else /* (TARGET_THUMB2) */
+ pop_insn = "pop\t";
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (saved_regs_mask & regs_above_fp)
+ {
+ print_multi_reg (f, pop_insn, SP_REGNUM,
+ saved_regs_mask & regs_above_fp);
+ print_multi_reg (f, pop_insn, SP_REGNUM,
+ saved_regs_mask & ~regs_above_fp);
+ }
+ else
+ print_multi_reg (f, pop_insn, SP_REGNUM, saved_regs_mask);
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL 6196857 begin use pop for thumb-2 epilogue */
+
+ if (current_function_pretend_args_size)
+ {
+ /* Unwind the pre-pushed regs. */
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (current_function_pretend_args_size);
+ output_add_immediate (operands);
+ }
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ if (IS_INTERRUPT (func_type))
+ /* Interrupt handlers will have pushed the
+ IP onto the stack, so restore it now. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, 1 << IP_REGNUM);
+ }
+ else
+ {
+ /* APPLE LOCAL begin ARM indirect sibcalls */
+ int ip_ok = 1;
+
+ /* If we have an indirect sibcall that uses a reg saved across calls, that reg will
+ be clobbered when we pop the old value off the stack. Copy the value to IP
+ before doing the pop. */
+ if (sibling)
+ {
+ bool is_value;
+ int regno = indirect_sibreturn_reg (sibling, &is_value);
+ if (regno > 3 && regno != 12)
+ {
+ ip_ok = 0;
+ if (is_value)
+ XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ else
+ XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, regno);
+ }
+ if (regno == -1)
+ {
+ rtx stack_reg, offset;
+ offset = indirect_sibreturn_mem (sibling, &stack_reg, &is_value);
+ if (offset)
+ {
+ ip_ok = 0;
+ if (is_value)
+ XEXP (XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 1), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ else
+ XEXP (XEXP (XVECEXP (PATTERN (sibling), 0, 0), 0), 0)
+ = gen_rtx_REG (SImode, IP_REGNUM);
+ asm_fprintf (f, "\tldr\t%r, [%r, #%wd]\n", IP_REGNUM,
+ REGNO (stack_reg), INTVAL (offset));
+ }
+ }
+ }
+
+ /* APPLE LOCAL begin ARM combine stack pop and register pop */
+ /* Code here is probably making overly specific assumptions about modes. */
+ /* Restore stack pointer if necessary. */
+ if (offsets->outgoing_args != offsets->saved_regs)
+ {
+ int delta = offsets->outgoing_args - offsets->saved_regs;
+ int maxpopsize;
+ tree rettype = TREE_TYPE (TREE_TYPE (current_function_decl));
+ /* We can use R0 through R3 for this purpose, but not any regs that
+ contain (part of) the return value. */
+ if (TYPE_MODE (rettype) == VOIDmode)
+ maxpopsize = 20;
+ else if (TYPE_MODE (rettype) == DFmode
+ || TYPE_MODE (rettype) == DImode)
+ maxpopsize = 12;
+ else
+ maxpopsize = 16;
+ /* We can also use R12 provided it was not used for the sibcall hack above,
+ and we are not saving any regs in the range R4...R11. In the latter case
+ they are stored on the stack below the "empty" spot used for R12 and
+ the saved values would get clobbered. */
+ if (saved_regs_mask
+ & ((1<<4) | (1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<9) | (1<<10) | (1<<11)))
+ ip_ok = 0;
+ if (!ip_ok)
+ maxpopsize -= 4;
+ if (optimize_size
+ && delta <= maxpopsize && delta % 4 == 0
+ && !TARGET_IWMMXT
+ && really_return
+ && TARGET_SOFT_FLOAT
+ && arm_fpu_arch == FPUTYPE_NONE
+ && !flag_pic
+ && !frame_pointer_needed)
+ {
+ int reg = ip_ok ? 12 : 3;
+ while (delta)
+ {
+ saved_regs_mask |= (1 << reg);
+ reg = (reg == 12) ? 3 : reg - 1;
+ delta -= 4;
+ }
+ }
+ else
+ {
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
+ output_add_immediate (operands);
+ }
+ }
+ /* APPLE LOCAL end ARM combine stack pop and register pop */
+ /* APPLE LOCAL end ARM indirect sibcalls */
+
+ if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
+ {
+ for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
+ reg, SP_REGNUM);
+ }
+ else
+ {
+ start_reg = FIRST_FPA_REGNUM;
+
+ for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
+ {
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ if (reg - start_reg == 3)
+ {
+ asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
+ start_reg, SP_REGNUM);
+ start_reg = reg + 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
+ start_reg, reg - start_reg,
+ SP_REGNUM);
+
+ start_reg = reg + 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
+ start_reg, reg - start_reg, SP_REGNUM);
+ }
+
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ {
+ /* APPLE LOCAL 6902937 out of order VFP restore */
+ arm_output_epilogue_vfp_restore ();
+ }
+ if (TARGET_IWMMXT)
+ for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
+
+ /* If we can, restore the LR into the PC. */
+ if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ && !IS_STACKALIGN (func_type)
+ && really_return
+ && current_function_pretend_args_size == 0
+ && saved_regs_mask & (1 << LR_REGNUM)
+ && !current_function_calls_eh_return)
+ {
+ saved_regs_mask &= ~ (1 << LR_REGNUM);
+ saved_regs_mask |= (1 << PC_REGNUM);
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Load the registers off the stack. If we only have one register
+ to load use the LDR instruction - it is faster. For Thumb-2
+ always use pop and the assembler will pick the best instruction.*/
+ if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM))
+ {
+ asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
+ }
+ else if (saved_regs_mask)
+ {
+ if (saved_regs_mask & (1 << SP_REGNUM))
+ /* Note - write back to the stack register is not enabled
+ (i.e. "ldmfd sp!..."). We know that the stack pointer is
+ in the list of registers and if we add writeback the
+ instruction becomes UNPREDICTABLE. */
+ print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask);
+ else if (TARGET_ARM)
+ print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask);
+ else
+ print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ if (current_function_pretend_args_size)
+ {
+ /* Unwind the pre-pushed regs. */
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (current_function_pretend_args_size);
+ output_add_immediate (operands);
+ }
+ }
+
+ /* We may have already restored PC directly from the stack. */
+ if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
+ return "";
+
+ /* Stack adjustment for exception handler. */
+ if (current_function_calls_eh_return)
+ asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
+ ARM_EH_STACKADJ_REGNUM);
+
+ /* Generate the return instruction. */
+ switch ((int) ARM_FUNC_TYPE (func_type))
+ {
+ case ARM_FT_ISR:
+ case ARM_FT_FIQ:
+ asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
+ break;
+
+ case ARM_FT_EXCEPTION:
+ asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
+ break;
+
+ case ARM_FT_INTERWORKED:
+ asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
+ break;
+
+ default:
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (IS_STACKALIGN (func_type))
+ {
+ /* See comment in arm_expand_prologue. */
+ asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, 0);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ if (arm_arch5 || arm_arch4t)
+ asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
+ else
+ asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
+ break;
+ }
+
+ return "";
+}
+
+static void
+arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
+{
+ arm_stack_offsets *offsets;
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ int regno;
+
+ /* Emit any call-via-reg trampolines that are needed for v4t support
+ of call_reg and call_value_reg type insns. */
+ for (regno = 0; regno < LR_REGNUM; regno++)
+ {
+ rtx label = cfun->machine->call_via[regno];
+
+ if (label != NULL)
+ {
+ switch_to_section (function_section (current_function_decl));
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (label));
+ asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
+ }
+ }
+
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_32BIT */
+ {
+ /* We need to take into account any stack-frame rounding. */
+ offsets = arm_get_frame_offsets ();
+
+ gcc_assert (!use_return_insn (FALSE, NULL)
+ || !return_used_this_function
+ || offsets->saved_regs == offsets->outgoing_args
+ || frame_pointer_needed);
+
+ /* Reset the ARM-specific per-function variables. */
+ after_arm_reorg = 0;
+ }
+
+/* APPLE LOCAL begin ARM label addresses */
+#if TARGET_MACHO
+ /* Mach-O doesn't support labels at the end of objects, so if
+ it looks like we might want one, insert a NOP. */
+ {
+ rtx insn = get_last_insn ();
+ while (insn
+ && NOTE_P (insn)
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
+ insn = PREV_INSN (insn);
+ if (insn
+ && (LABEL_P (insn)
+ || (NOTE_P (insn)
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
+ fputs ("\tnop\n", file);
+ }
+#endif
+/* APPLE LOCAL end ARM label addresses */
+}
+
+/* Generate and emit an insn that we will recognize as a push_multi.
+ Unfortunately, since this insn does not reflect very well the actual
+ semantics of the operation, we need to annotate the insn for the benefit
+ of DWARF2 frame unwind information. */
+static rtx
+emit_multi_reg_push (unsigned long mask)
+{
+ int num_regs = 0;
+ int num_dwarf_regs;
+ int i, j;
+ rtx par;
+ rtx dwarf;
+ int dwarf_par_index;
+ rtx tmp, reg;
+
+ for (i = 0; i <= LAST_ARM_REGNUM; i++)
+ if (mask & (1 << i))
+ num_regs++;
+
+ gcc_assert (num_regs && num_regs <= 16);
+
+ /* We don't record the PC in the dwarf frame information. */
+ num_dwarf_regs = num_regs;
+ if (mask & (1 << PC_REGNUM))
+ num_dwarf_regs--;
+
+ /* For the body of the insn we are going to generate an UNSPEC in
+ parallel with several USEs. This allows the insn to be recognized
+ by the push_multi pattern in the arm.md file. The insn looks
+ something like this:
+
+ (parallel [
+ (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
+ (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
+ (use (reg:SI 11 fp))
+ (use (reg:SI 12 ip))
+ (use (reg:SI 14 lr))
+ (use (reg:SI 15 pc))
+ ])
+
+ For the frame note however, we try to be more explicit and actually
+ show each register being stored into the stack frame, plus a (single)
+ decrement of the stack pointer. We do it this way in order to be
+ friendly to the stack unwinding code, which only wants to see a single
+ stack decrement per instruction. The RTL we generate for the note looks
+ something like this:
+
+ (sequence [
+ (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
+ (set (mem:SI (reg:SI sp)) (reg:SI r4))
+ (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
+ (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
+ (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
+ ])
+
+ This sequence is used both by the code to support stack unwinding for
+ exceptions handlers and the code to generate dwarf2 frame debugging. */
+
+ par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
+ dwarf_par_index = 1;
+
+ for (i = 0; i <= LAST_ARM_REGNUM; i++)
+ {
+ if (mask & (1 << i))
+ {
+ reg = gen_rtx_REG (SImode, i);
+
+ XVECEXP (par, 0, 0)
+ = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (BLKmode,
+ gen_rtx_PRE_DEC (BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx_UNSPEC (BLKmode,
+ gen_rtvec (1, reg),
+ UNSPEC_PUSH_MULT));
+
+ if (i != PC_REGNUM)
+ {
+ tmp = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (SImode, stack_pointer_rtx),
+ reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
+ dwarf_par_index++;
+ }
+
+ break;
+ }
+ }
+
+ for (j = 1, i++; j < num_regs; i++)
+ {
+ if (mask & (1 << i))
+ {
+ reg = gen_rtx_REG (SImode, i);
+
+ XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
+
+ if (i != PC_REGNUM)
+ {
+ tmp
+ = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (SImode,
+ plus_constant (stack_pointer_rtx,
+ 4 * j)),
+ reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
+ }
+
+ j++;
+ }
+ }
+
+ par = emit_insn (par);
+
+ tmp = gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx, -4 * num_regs));
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, 0) = tmp;
+
+ REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+ REG_NOTES (par));
+ return par;
+}
+
+/* Calculate the size of the return value that is passed in registers. */
+static int
+arm_size_return_regs (void)
+{
+ enum machine_mode mode;
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ return GET_MODE_SIZE (mode);
+}
+
+static rtx
+emit_sfm (int base_reg, int count)
+{
+ rtx par;
+ rtx dwarf;
+ rtx tmp, reg;
+ int i;
+
+ par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
+
+ reg = gen_rtx_REG (XFmode, base_reg++);
+
+ XVECEXP (par, 0, 0)
+ = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (BLKmode,
+ gen_rtx_PRE_DEC (BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx_UNSPEC (BLKmode,
+ gen_rtvec (1, reg),
+ UNSPEC_PUSH_MULT));
+ tmp = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (XFmode, stack_pointer_rtx), reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, 1) = tmp;
+
+ for (i = 1; i < count; i++)
+ {
+ reg = gen_rtx_REG (XFmode, base_reg++);
+ XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
+
+ tmp = gen_rtx_SET (VOIDmode,
+ gen_frame_mem (XFmode,
+ plus_constant (stack_pointer_rtx,
+ i * 12)),
+ reg);
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, i + 1) = tmp;
+ }
+
+ tmp = gen_rtx_SET (VOIDmode,
+ stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx, -12 * count));
+
+ RTX_FRAME_RELATED_P (tmp) = 1;
+ XVECEXP (dwarf, 0, 0) = tmp;
+
+ par = emit_insn (par);
+ REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+ REG_NOTES (par));
+ return par;
+}
+
+
+/* Return true if the current function needs to save/restore LR. */
+
+static bool
+thumb_force_lr_save (void)
+{
+ return !cfun->machine->lr_save_eliminated
+ && (!leaf_function_p ()
+ || thumb_far_jump_used_p ()
+ || regs_ever_live [LR_REGNUM]);
+}
+
+
+/* Compute the distance from register FROM to register TO.
+ These can be the arg pointer (26), the soft frame pointer (25),
+ the stack pointer (13) or the hard frame pointer (11).
+ In thumb mode r7 is used as the soft frame pointer, if needed.
+ Typical stack layout looks like this:
+
+ old stack pointer -> | |
+ ----
+ | | \
+ | | saved arguments for
+ | | vararg functions
+ | | /
+ --
+ hard FP & arg pointer -> | | \
+ | | stack
+ | | frame
+ | | /
+ --
+ | | \
+ | | call saved
+ | | registers
+ soft frame pointer -> | | /
+ --
+ | | \
+ | | local
+ | | variables
+ locals base pointer -> | | /
+ --
+ | | \
+ | | outgoing
+ | | arguments
+ current stack pointer -> | | /
+ --
+
+ For a given function some or all of these stack components
+ may not be needed, giving rise to the possibility of
+ eliminating some of the registers.
+
+ The values returned by this function must reflect the behavior
+ of arm_expand_prologue() and arm_compute_save_reg_mask().
+
+ The sign of the number returned reflects the direction of stack
+ growth, so the values are positive for all eliminations except
+ from the soft frame pointer to the hard frame pointer.
+
+ SFP may point just inside the local variables block to ensure correct
+ alignment. */
+
+
+/* Calculate stack offsets. These are used to calculate register elimination
+ offsets and in prologue/epilogue code. */
+
+static arm_stack_offsets *
+arm_get_frame_offsets (void)
+{
+ struct arm_stack_offsets *offsets;
+ unsigned long func_type;
+ int leaf;
+ int saved;
+ HOST_WIDE_INT frame_size;
+
+ offsets = &cfun->machine->stack_offsets;
+
+ /* We need to know if we are a leaf function. Unfortunately, it
+ is possible to be called after start_sequence has been called,
+ which causes get_insns to return the insns for the sequence,
+ not the function, which will cause leaf_function_p to return
+ the incorrect result.
+
+ to know about leaf functions once reload has completed, and the
+ frame size cannot be changed after that time, so we can safely
+ use the cached value. */
+
+ if (reload_completed)
+ return offsets;
+
+ /* Initially this is the size of the local variables. It will translated
+ into an offset once we have determined the size of preceding data. */
+ frame_size = ROUND_UP_WORD (get_frame_size ());
+
+ leaf = leaf_function_p ();
+
+ /* Space for variadic functions. */
+ offsets->saved_args = current_function_pretend_args_size;
+
+ /* APPLE LOCAL ARM custom frame layout */
+ offsets->frame = offsets->saved_args + (frame_pointer_needed ? 8 : 0);
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ unsigned int regno;
+
+ saved = bit_count (arm_compute_save_reg_mask ()) * 4;
+
+ /* We know that SP will be doubleword aligned on entry, and we must
+ preserve that condition at any subroutine call. We also require the
+ soft frame pointer to be doubleword aligned. */
+
+ if (TARGET_REALLY_IWMMXT)
+ {
+ /* Check for the call-saved iWMMXt registers. */
+ for (regno = FIRST_IWMMXT_REGNUM;
+ regno <= LAST_IWMMXT_REGNUM;
+ regno++)
+ if (regs_ever_live [regno] && ! call_used_regs [regno])
+ saved += 8;
+ }
+
+ func_type = arm_current_func_type ();
+ if (! IS_VOLATILE (func_type))
+ {
+ /* Space for saved FPA registers. */
+ for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ saved += 12;
+
+ /* Space for saved VFP registers. */
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ saved += arm_get_vfp_saved_size ();
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1 */
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ saved = bit_count (thumb1_compute_save_reg_mask ()) * 4;
+ if (TARGET_BACKTRACE)
+ saved += 16;
+ /* APPLE LOCAL begin 6465387 exception handling interworking VFP save */
+ /* Saved VFP registers in thumb mode aren't accounted for by
+ thumb1_compute_save_reg_mask() */
+ if (current_function_has_nonlocal_label && arm_arch6)
+ saved += 64;
+ /* APPLE LOCAL end 6465387 exception handling interworking VFP save */
+ }
+
+ /* Saved registers include the stack frame. */
+ offsets->saved_regs = offsets->saved_args + saved;
+ offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
+ /* A leaf function does not need any stack alignment if it has nothing
+ on the stack. */
+ if (leaf && frame_size == 0)
+ {
+ offsets->outgoing_args = offsets->soft_frame;
+ offsets->locals_base = offsets->soft_frame;
+ return offsets;
+ }
+
+ /* Ensure SFP has the correct alignment. */
+ if (ARM_DOUBLEWORD_ALIGN
+ && (offsets->soft_frame & 7))
+ offsets->soft_frame += 4;
+
+ offsets->locals_base = offsets->soft_frame + frame_size;
+ offsets->outgoing_args = (offsets->locals_base
+ + current_function_outgoing_args_size);
+
+ if (ARM_DOUBLEWORD_ALIGN)
+ {
+ /* Ensure SP remains doubleword aligned. */
+ if (offsets->outgoing_args & 7)
+ offsets->outgoing_args += 4;
+ gcc_assert (!(offsets->outgoing_args & 7));
+ }
+
+ return offsets;
+}
+
+
+/* Calculate the relative offsets for the different stack pointers. Positive
+ offsets are in the direction of stack growth. */
+
+HOST_WIDE_INT
+arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
+{
+ arm_stack_offsets *offsets;
+
+ offsets = arm_get_frame_offsets ();
+
+ /* OK, now we have enough information to compute the distances.
+ There must be an entry in these switch tables for each pair
+ of registers in ELIMINABLE_REGS, even if some of the entries
+ seem to be redundant or useless. */
+ switch (from)
+ {
+ case ARG_POINTER_REGNUM:
+ switch (to)
+ {
+ /* APPLE LOCAL ARM custom frame layout */
+ /* Removed lines. */
+
+ case FRAME_POINTER_REGNUM:
+ /* This is the reverse of the soft frame pointer
+ to hard frame pointer elimination below. */
+ return offsets->soft_frame - offsets->saved_args;
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ case HARD_FRAME_POINTER_REGNUM:
+ return offsets->frame - (offsets->saved_args + 4);
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ case STACK_POINTER_REGNUM:
+ /* If nothing has been pushed on the stack at all
+ then this will return -4. This *is* correct! */
+ return offsets->outgoing_args - (offsets->saved_args + 4);
+
+ default:
+ gcc_unreachable ();
+ }
+ gcc_unreachable ();
+
+ case FRAME_POINTER_REGNUM:
+ switch (to)
+ {
+ /* APPLE LOCAL begin ARM custom frame layout */
+ case HARD_FRAME_POINTER_REGNUM:
+ /* APPLE LOCAL end ARM custom frame layout */
+ /* The hard frame pointer points to the top entry in the
+ stack frame. The soft frame pointer to the bottom entry
+ in the stack frame. If there is no stack frame at all,
+ then they are identical. */
+
+ return offsets->frame - offsets->soft_frame;
+
+ case STACK_POINTER_REGNUM:
+ return offsets->outgoing_args - offsets->soft_frame;
+
+ default:
+ gcc_unreachable ();
+ }
+ gcc_unreachable ();
+
+ default:
+ /* You cannot eliminate from the stack pointer.
+ In theory you could eliminate from the hard frame
+ pointer to the stack pointer, but this will never
+ happen, since if a stack frame is not needed the
+ hard frame pointer will never be used. */
+ gcc_unreachable ();
+ }
+}
+
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Emit RTL to save coprocessor registers on funciton entry. Returns the
+ number of bytes pushed. */
+
+static int
+arm_save_coproc_regs(void)
+{
+ int saved_size = 0;
+ unsigned reg;
+ unsigned start_reg;
+ rtx insn;
+
+ for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs [reg])
+ {
+ insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
+ insn = gen_rtx_MEM (V2SImode, insn);
+ insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ saved_size += 8;
+ }
+
+ /* Save any floating point call-saved registers used by this
+ function. */
+ if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
+ {
+ for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
+ insn = gen_rtx_MEM (XFmode, insn);
+ insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ saved_size += 12;
+ }
+ }
+ else
+ {
+ start_reg = LAST_FPA_REGNUM;
+
+ for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
+ {
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ if (start_reg - reg == 3)
+ {
+ insn = emit_sfm (reg, 4);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ saved_size += 48;
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (start_reg != reg)
+ {
+ insn = emit_sfm (reg + 1, start_reg - reg);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ saved_size += (start_reg - reg) * 12;
+ }
+ start_reg = reg - 1;
+ }
+ }
+
+ if (start_reg != reg)
+ {
+ insn = emit_sfm (reg + 1, start_reg - reg);
+ saved_size += (start_reg - reg) * 12;
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ if (TARGET_HARD_FLOAT && TARGET_VFP)
+ {
+ start_reg = FIRST_VFP_REGNUM;
+
+ for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
+ {
+ if ((!regs_ever_live[reg] || call_used_regs[reg])
+ && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
+ {
+ if (start_reg != reg)
+ saved_size += vfp_emit_fstmd (start_reg,
+ (reg - start_reg) / 2);
+ start_reg = reg + 2;
+ }
+ }
+ if (start_reg != reg)
+ saved_size += vfp_emit_fstmd (start_reg,
+ (reg - start_reg) / 2);
+ }
+ return saved_size;
+}
+
+
+/* Set the Thumb frame pointer from the stack pointer. */
+
+static void
+thumb_set_frame_pointer (arm_stack_offsets *offsets)
+{
+ HOST_WIDE_INT amount;
+ rtx insn, dwarf;
+
+ amount = offsets->outgoing_args - offsets->locals_base;
+ if (amount < 1024)
+ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
+ stack_pointer_rtx, GEN_INT (amount)));
+ else
+ {
+ emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
+ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx,
+ stack_pointer_rtx));
+ dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
+ plus_constant (stack_pointer_rtx, amount));
+ RTX_FRAME_RELATED_P (dwarf) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+ REG_NOTES (insn));
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+}
+
+/* Generate the prologue instructions for entry into an ARM or Thumb-2
+ function. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+void
+arm_expand_prologue (void)
+{
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* Remove 'reg' variable */
+ rtx amount;
+ rtx insn;
+ rtx ip_rtx;
+ unsigned long live_regs_mask;
+ unsigned long func_type;
+ /* APPLE LOCAL ARM custom frame layout */
+ /* Remove unused variable definitions. */
+ int saved_regs = 0;
+ unsigned HOST_WIDE_INT args_to_push;
+ arm_stack_offsets *offsets;
+
+ func_type = arm_current_func_type ();
+
+ /* Naked functions don't have prologues. */
+ if (IS_NAKED (func_type))
+ return;
+
+ /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
+ args_to_push = current_function_pretend_args_size;
+
+ /* Compute which register we will have to save onto the stack. */
+ live_regs_mask = arm_compute_save_reg_mask ();
+
+ ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (IS_STACKALIGN (func_type))
+ {
+ rtx dwarf;
+ rtx r0;
+ rtx r1;
+ /* Handle a word-aligned stack pointer. We generate the following:
+
+ mov r0, sp
+ bic r1, r0, #7
+ mov sp, r1
+ <save and restore r0 in normal prologue/epilogue>
+ mov sp, r0
+ bx lr
+
+ The unwinder doesn't need to know about the stack realignment.
+ Just tell it we saved SP in r0. */
+ gcc_assert (TARGET_THUMB2 && !arm_arch_notm && args_to_push == 0);
+
+ r0 = gen_rtx_REG (SImode, 0);
+ r1 = gen_rtx_REG (SImode, 1);
+ dwarf = gen_rtx_UNSPEC (SImode, NULL_RTVEC, UNSPEC_STACK_ALIGN);
+ dwarf = gen_rtx_SET (VOIDmode, r0, dwarf);
+ insn = gen_movsi (r0, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ dwarf, REG_NOTES (insn));
+ emit_insn (insn);
+ emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7)));
+ emit_insn (gen_movsi (stack_pointer_rtx, r1));
+ }
+
+ if (frame_pointer_needed && TARGET_ARM)
+ {
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ if (IS_INTERRUPT (func_type))
+ {
+ /* Interrupt functions must not corrupt any registers.
+ Creating a frame pointer however, corrupts the IP
+ register, so we must push it first. */
+ insn = emit_multi_reg_push (1 << IP_REGNUM);
+
+ /* Do not set RTX_FRAME_RELATED_P on this insn.
+ The dwarf stack unwinding code only wants to see one
+ stack decrement per function, and this is not it. If
+ this instruction is labeled as being part of the frame
+ creation sequence then dwarf2out_frame_debug_expr will
+ die when it encounters the assignment of IP to FP
+ later on, since the use of SP here establishes SP as
+ the CFA register and not IP.
+
+ Anyway this instruction is not really part of the stack
+ frame creation although it is part of the prologue. */
+ }
+ /* APPLE LOCAL begin ARM custom frame layout */
+ else if (IS_NESTED (func_type))
+ {
+ /* Our prologue doesn't corrupt IP, so no need to save it. */
+ }
+ /* APPLE LOCAL end ARM custom frame layout */
+ }
+
+ if (args_to_push)
+ {
+ /* Push the argument registers, or reserve space for them. */
+ if (cfun->machine->uses_anonymous_args)
+ insn = emit_multi_reg_push
+ ((0xf0 >> (args_to_push / 4)) & 0xf);
+ else
+ insn = emit_insn
+ (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- args_to_push)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* If this is an interrupt service routine, and the link register
+ is going to be pushed, and we are not creating a stack frame,
+ (which would involve an extra push of IP and a pop in the epilogue)
+ subtracting four from LR now will mean that the function return
+ can be done with a single instruction. */
+ if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
+ && (live_regs_mask & (1 << LR_REGNUM)) != 0
+ && ! frame_pointer_needed)
+ {
+ rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
+
+ emit_set_insn (lr, plus_constant (lr, -4));
+ }
+
+ /* APPLE LOCAL begin ARM peephole combine reg store and stack push */
+ offsets = arm_get_frame_offsets ();
+
+ if (live_regs_mask)
+ {
+ saved_regs += bit_count (live_regs_mask) * 4;
+
+ /* Space optimization: if we need a small amount of stack space, and
+ we're going to do a push, push some extra registers rather than
+ doing a separate subtract. We can safely push R0 thru R3. We can
+ also use R12 provided no regs in the range R4..R11 are being saved.
+ (Their saved values would be below the value of R12 on the stack,
+ and would get clobbered.) */
+ /* The conditions here are probably overly restrictive. */
+ if (optimize_size
+ && !flag_pic
+ && !frame_pointer_needed
+ && arm_fpu_arch == FPUTYPE_NONE
+ && TARGET_SOFT_FLOAT
+ && !TARGET_IWMMXT)
+ {
+ int ip_ok = 1;
+ int delta = offsets->outgoing_args - offsets->saved_args - saved_regs;
+ if (delta < 0)
+ abort();
+ if (live_regs_mask
+ & ((1<<4) | (1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<9) | (1<<10) | (1<<11)))
+ ip_ok = 0;
+ if (delta <= (ip_ok ? 20 : 16) && delta % 4 == 0)
+ {
+ int reg = (ip_ok ? 12 : 3);
+ while (delta)
+ {
+ delta -= 4;
+ live_regs_mask |= (1<<reg);
+ reg = (reg == 12) ? 3 : reg - 1;
+ saved_regs += 4;
+ }
+ }
+ }
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ if (frame_pointer_needed)
+ {
+ unsigned long regs_above_fp =
+ inclusive_bitmask (ARM_HARD_FRAME_POINTER_REGNUM + 1, 11);
+ unsigned long initial_push_regs = live_regs_mask
+ & ~regs_above_fp;
+ unsigned long second_push_regs = live_regs_mask
+ & regs_above_fp;
+
+ /* Save everything up to the FP, and the LR */
+ insn = emit_multi_reg_push (initial_push_regs);
+ /* rdar://6148015 */
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Configure FP to point to the saved FP. */
+ insn = emit_insn (
+ gen_addsi3 (hard_frame_pointer_rtx, stack_pointer_rtx,
+ GEN_INT ((bit_count (initial_push_regs) - 2)
+ * 4)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Prevent attempts to optimize away the frame pointer. */
+ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
+
+ /* Push remaining regs. */
+ if (second_push_regs)
+ {
+ insn = emit_multi_reg_push (second_push_regs);
+ /* rdar://6148015 */
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ else
+ {
+ insn = emit_multi_reg_push (live_regs_mask);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ /* APPLE LOCAL end ARM custom frame layout */
+ }
+ /* APPLE LOCAL end ARM peephole combine reg store and stack push */
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (! IS_VOLATILE (func_type))
+ saved_regs += arm_save_coproc_regs ();
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ /* APPLE LOCAL ARM custom frame layout */
+ /* Removed lines. */
+
+ /* APPLE LOCAL ARM peephole combine reg store and stack push */
+ /* Remove call to arm_get_frame_offsets. */
+ if (offsets->outgoing_args != offsets->saved_args + saved_regs)
+ {
+ /* This add can produce multiple insns for a large constant, so we
+ need to get tricky. */
+ rtx last = get_last_insn ();
+
+ amount = GEN_INT (offsets->saved_args + saved_regs
+ - offsets->outgoing_args);
+
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ amount));
+ do
+ {
+ last = last ? NEXT_INSN (last) : get_insns ();
+ RTX_FRAME_RELATED_P (last) = 1;
+ }
+ while (last != insn);
+
+ /* If the frame pointer is needed, emit a special barrier that
+ will prevent the scheduler from moving stores to the frame
+ before the stack adjustment. */
+ if (frame_pointer_needed)
+ insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
+ hard_frame_pointer_rtx));
+ }
+
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* APPLE LOCAL add !live_regs_mask. that's handled above by apple code */
+ if (frame_pointer_needed && TARGET_THUMB2 && !live_regs_mask)
+ thumb_set_frame_pointer (offsets);
+
+ if (flag_pic && arm_pic_register != INVALID_REGNUM)
+ {
+ unsigned long mask;
+
+ mask = live_regs_mask;
+ mask &= THUMB2_WORK_REGS;
+ if (!IS_NESTED (func_type))
+ mask |= (1 << IP_REGNUM);
+ arm_load_pic_register (mask);
+ }
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly if the user has requested no
+ scheduling in the prolog. Similarly if we want non-call exceptions
+ using the EABI unwinder, to prevent faulting instructions from being
+ swapped with a stack adjustment. */
+ if (current_function_profile || !TARGET_SCHED_PROLOG
+ || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
+ emit_insn (gen_blockage ());
+
+ /* If the link register is being kept alive, with the return address in it,
+ then make sure that it does not get reused by the ce2 pass. */
+ if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
+ {
+ emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
+ cfun->machine->lr_save_eliminated = 1;
+ }
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Print condition code to STREAM. Helper function for arm_print_operand. */
+static void
+arm_print_condition (FILE *stream)
+{
+ if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
+ {
+ /* Branch conversion is not implemented for Thumb-2. */
+ if (TARGET_THUMB)
+ {
+ output_operand_lossage ("predicated Thumb instruction");
+ return;
+ }
+ if (current_insn_predicate != NULL)
+ {
+ output_operand_lossage
+ ("predicated instruction in conditional sequence");
+ return;
+ }
+
+ fputs (arm_condition_codes[arm_current_cc], stream);
+ }
+ else if (current_insn_predicate)
+ {
+ enum arm_cond_code code;
+
+ if (TARGET_THUMB1)
+ {
+ output_operand_lossage ("predicated Thumb instruction");
+ return;
+ }
+
+ code = get_arm_condition_code (current_insn_predicate);
+ fputs (arm_condition_codes[code], stream);
+ }
+}
+
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* If CODE is 'd', then the X is a condition operand and the instruction
+ should only be executed if the condition is true.
+ if CODE is 'D', then the X is a condition operand and the instruction
+ should only be executed if the condition is false: however, if the mode
+ of the comparison is CCFPEmode, then always execute the instruction -- we
+ do this because in these circumstances !GE does not necessarily imply LT;
+ in these cases the instruction pattern will take care to make sure that
+ an instruction containing %d will follow, thereby undoing the effects of
+ doing this instruction unconditionally.
+ If CODE is 'N' then X is a floating point operand that must be negated
+ before output.
+ If CODE is 'B' then output a bitwise inverted value of X (a const int).
+ If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
+void
+arm_print_operand (FILE *stream, rtx x, int code)
+{
+ switch (code)
+ {
+ /* APPLE LOCAL begin ARM MACH assembler */
+ /* APPLE LOCAL change appple '.' code to '~' for v7 to avoid conflict */
+ case '~':
+#ifdef LOCAL_LABEL_PREFIX
+ fputs (LOCAL_LABEL_PREFIX, stream);
+#endif
+ return;
+ /* APPLE LOCAL end ARM MACH assembler */
+
+ case '@':
+ fputs (ASM_COMMENT_START, stream);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, stream);
+ return;
+
+ case '|':
+ fputs (REGISTER_PREFIX, stream);
+ return;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ case '?':
+ arm_print_condition (stream);
+ return;
+
+ case '(':
+ /* Nothing in unified syntax, otherwise the current condition code. */
+ if (!TARGET_UNIFIED_ASM)
+ arm_print_condition (stream);
+ break;
+
+ case ')':
+ /* The current condition code in unified syntax, otherwise nothing. */
+ if (TARGET_UNIFIED_ASM)
+ arm_print_condition (stream);
+ break;
+
+ case '.':
+ /* The current condition code for a condition code setting instruction.
+ Preceeded by 's' in unified syntax, otherwise followed by 's'. */
+ if (TARGET_UNIFIED_ASM)
+ {
+ fputc('s', stream);
+ arm_print_condition (stream);
+ }
+ else
+ {
+ arm_print_condition (stream);
+ fputc('s', stream);
+ }
+ return;
+
+ case '!':
+ /* If the instruction is conditionally executed then print
+ the current condition code, otherwise print 's'. */
+ gcc_assert (TARGET_THUMB2 && TARGET_UNIFIED_ASM);
+ if (current_insn_predicate)
+ arm_print_condition (stream);
+ else
+ fputc('s', stream);
+ break;
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* %# is a "break" sequence. It doesn't output anything, but is used to
+ seperate e.g. operand numbers from following text, if that text consists
+ of further digits which we don't want to be part of the operand
+ number. */
+ case '#':
+ return;
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+ case 'N':
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ fprintf (stream, "%s", fp_const_from_val (&r));
+ }
+ return;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* An integer without a preceding # sign. */
+ case 'c':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ case 'B':
+ if (GET_CODE (x) == CONST_INT)
+ {
+ HOST_WIDE_INT val;
+ val = ARM_SIGN_EXTEND (~INTVAL (x));
+ fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
+ }
+ else
+ {
+ putc ('~', stream);
+ output_addr_const (stream, x);
+ }
+ return;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ case 'L':
+ /* The low 16 bits of an immediate constant. */
+ fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ case 'i':
+ fprintf (stream, "%s", arithmetic_instr (x, 1));
+ return;
+
+ /* Truncate Cirrus shift counts. */
+ case 's':
+ if (GET_CODE (x) == CONST_INT)
+ {
+ fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
+ return;
+ }
+ arm_print_operand (stream, x, 0);
+ return;
+
+ case 'I':
+ fprintf (stream, "%s", arithmetic_instr (x, 0));
+ return;
+
+ case 'S':
+ {
+ HOST_WIDE_INT val;
+ const char *shift;
+
+ if (!shift_operator (x, SImode))
+ {
+ output_operand_lossage ("invalid shift operand");
+ break;
+ }
+
+ shift = shift_op (x, &val);
+
+ if (shift)
+ {
+ fprintf (stream, ", %s ", shift);
+ if (val == -1)
+ arm_print_operand (stream, XEXP (x, 1), 0);
+ else
+ fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
+ }
+ }
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least significant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most significant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
+ return;
+
+ case 'R':
+ if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
+ return;
+
+ case 'H':
+ if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ asm_fprintf (stream, "%r", REGNO (x) + 1);
+ return;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ case 'J':
+ if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 3 : 2));
+ return;
+
+ case 'K':
+ if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 2 : 3));
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ case 'm':
+ asm_fprintf (stream, "%r",
+ GET_CODE (XEXP (x, 0)) == REG
+ ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
+ return;
+
+ case 'M':
+ asm_fprintf (stream, "{%r-%r}",
+ REGNO (x),
+ REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
+ return;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* Like 'M', but writing doubleword vector registers, for use by Neon
+ insns. */
+ case 'h':
+ {
+ int regno = (REGNO (x) - FIRST_VFP_REGNUM) / 2;
+ int numregs = ARM_NUM_REGS (GET_MODE (x)) / 2;
+ if (numregs == 1)
+ asm_fprintf (stream, "{d%d}", regno);
+ else
+ asm_fprintf (stream, "{d%d-d%d}", regno, regno + numregs - 1);
+ }
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ case 'd':
+ /* CONST_TRUE_RTX means always -- that's the default. */
+ if (x == const_true_rtx)
+ return;
+
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ fputs (arm_condition_codes[get_arm_condition_code (x)],
+ stream);
+ return;
+
+ case 'D':
+ /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
+ want to do that. */
+ if (x == const_true_rtx)
+ {
+ output_operand_lossage ("instruction never exectued");
+ return;
+ }
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
+ (get_arm_condition_code (x))],
+ stream);
+ return;
+
+ /* Cirrus registers can be accessed in a variety of ways:
+ single floating point (f)
+ double floating point (d)
+ 32bit integer (fx)
+ 64bit integer (dx). */
+ case 'W': /* Cirrus register in F mode. */
+ case 'X': /* Cirrus register in D mode. */
+ case 'Y': /* Cirrus register in FX mode. */
+ case 'Z': /* Cirrus register in DX mode. */
+ gcc_assert (GET_CODE (x) == REG
+ && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
+
+ fprintf (stream, "mv%s%s",
+ code == 'W' ? "f"
+ : code == 'X' ? "d"
+ : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
+
+ return;
+
+ /* Print cirrus register in the mode specified by the register's mode. */
+ case 'V':
+ {
+ int mode = GET_MODE (x);
+
+ if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ fprintf (stream, "mv%s%s",
+ mode == DFmode ? "d"
+ : mode == SImode ? "fx"
+ : mode == DImode ? "dx"
+ : "f", reg_names[REGNO (x)] + 2);
+
+ return;
+ }
+
+ case 'U':
+ if (GET_CODE (x) != REG
+ || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
+ || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
+ /* Bad value for wCG register number. */
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ else
+ fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
+ return;
+
+ /* Print an iWMMXt control register name. */
+ case 'w':
+ if (GET_CODE (x) != CONST_INT
+ || INTVAL (x) < 0
+ || INTVAL (x) >= 16)
+ /* Bad value for wC register number. */
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ else
+ {
+ static const char * wc_reg_names [16] =
+ {
+ "wCID", "wCon", "wCSSF", "wCASF",
+ "wC4", "wC5", "wC6", "wC7",
+ "wCGR0", "wCGR1", "wCGR2", "wCGR3",
+ "wC12", "wC13", "wC14", "wC15"
+ };
+
+ /* APPLE LOCAL default to Wformat-security 5764921 */
+ fprintf (stream, "%s", wc_reg_names [INTVAL (x)]);
+ }
+ return;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* Print a VFP/Neon double precision or quad precision register name. */
+ case 'P':
+ case 'q':
+ {
+ int mode = GET_MODE (x);
+ int is_quad = (code == 'q');
+ int regno;
+
+ if (GET_MODE_SIZE (mode) != (is_quad ? 16 : 8))
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ if (GET_CODE (x) != REG
+ || !IS_VFP_REGNUM (REGNO (x)))
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ regno = REGNO (x);
+ if ((is_quad && !NEON_REGNO_OK_FOR_QUAD (regno))
+ || (!is_quad && !VFP_REGNO_OK_FOR_DOUBLE (regno)))
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ fprintf (stream, "%c%d", is_quad ? 'q' : 'd',
+ (regno - FIRST_VFP_REGNUM) >> (is_quad ? 2 : 1));
+ }
+ return;
+
+ /* APPLE LOCAL begin 6150859 use NEON instructions for SF math */
+ /* This code prints the double precision register name starting at
+ register number of the indicated single precision register. */
+ case 'p':
+ {
+ int mode = GET_MODE (x);
+ int regno;
+
+ if (GET_CODE (x) != REG || !IS_VFP_REGNUM (REGNO (x))
+ || GET_MODE_SIZE (mode) != 4)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ regno = REGNO (x);
+ if (((regno - FIRST_VFP_REGNUM) & 0x1) != 0)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ fprintf (stream, "d%d", (regno - FIRST_VFP_REGNUM) >> 1);
+ }
+ return;
+ /* APPLE LOCAL end 6150859 use NEON instructions for SF math */
+
+ /* These two codes print the low/high doubleword register of a Neon quad
+ register, respectively. For pair-structure types, can also print
+ low/high quadword registers. */
+ case 'e':
+ case 'f':
+ {
+ int mode = GET_MODE (x);
+ int regno;
+
+ if ((GET_MODE_SIZE (mode) != 16
+ && GET_MODE_SIZE (mode) != 32) || GET_CODE (x) != REG)
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ regno = REGNO (x);
+ if (!NEON_REGNO_OK_FOR_QUAD (regno))
+ {
+ output_operand_lossage ("invalid operand for code '%c'", code);
+ return;
+ }
+
+ if (GET_MODE_SIZE (mode) == 16)
+ fprintf (stream, "d%d", ((regno - FIRST_VFP_REGNUM) >> 1)
+ + (code == 'f' ? 1 : 0));
+ else
+ fprintf (stream, "q%d", ((regno - FIRST_VFP_REGNUM) >> 2)
+ + (code == 'f' ? 1 : 0));
+ }
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Print a VFPv3 floating-point constant, represented as an integer
+ index. */
+ case 'G':
+ {
+ int index = vfp3_const_double_index (x);
+ gcc_assert (index != -1);
+ fprintf (stream, "%d", index);
+ }
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* Print bits representing opcode features for Neon.
+
+ Bit 0 is 1 for signed, 0 for unsigned. Floats count as signed
+ and polynomials as unsigned.
+
+ Bit 1 is 1 for rounding functions, 0 otherwise.
+
+ Bit 2 is 1 for floats and polynomials, 0 for ordinary integers. */
+
+ /* Identify the type as 's', 'u', 'p' or 'f'. */
+ case 'T':
+ {
+ HOST_WIDE_INT bits = INTVAL (x);
+ fputc ((bits & 1) != 0
+ ? ((bits & 4) != 0 ? 'f' : 's')
+ : ((bits & 4) != 0 ? 'p' : 'u'),
+ stream);
+ }
+ return;
+
+ /* Likewise, but signed and unsigned integers are both 'i'. */
+ case 'F':
+ {
+ HOST_WIDE_INT bits = INTVAL (x);
+ fputc ((bits & 4) != 0
+ ? ((bits & 1) != 0 ? 'f' : 'p')
+ : 'i',
+ stream);
+ }
+ return;
+
+ /* As for 'T', but emit 'u' instead of 'p'. */
+ case 't':
+ {
+ HOST_WIDE_INT bits = INTVAL (x);
+ fputc ((bits & 1) != 0
+ ? ((bits & 4) != 0 ? 'f' : 's')
+ : 'u',
+ stream);
+ }
+ return;
+
+ /* Bit 1: rounding (vs none). */
+ case 'O':
+ {
+ HOST_WIDE_INT bits = INTVAL (x);
+ fputs ((bits & 2) != 0 ? "r" : "", stream);
+ }
+ return;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ default:
+ if (x == 0)
+ {
+ output_operand_lossage ("missing operand");
+ return;
+ }
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ asm_fprintf (stream, "%r", REGNO (x));
+ break;
+
+ case MEM:
+ output_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ break;
+
+ case CONST_DOUBLE:
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (TARGET_NEON)
+ {
+ char fpstr[20];
+ real_to_decimal (fpstr, CONST_DOUBLE_REAL_VALUE (x),
+ sizeof (fpstr), 0, 1);
+ fprintf (stream, "#%s", fpstr);
+ }
+ else
+ fprintf (stream, "#%s", fp_immediate_constant (x));
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ break;
+
+ default:
+ gcc_assert (GET_CODE (x) != NEG);
+ fputc ('#', stream);
+ output_addr_const (stream, x);
+ break;
+ }
+ }
+}
+
+#ifndef AOF_ASSEMBLER
+/* Target hook for assembling integer objects. The ARM version needs to
+ handle word-sized values specially. */
+static bool
+arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
+{
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ enum machine_mode mode;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ /* APPLE LOCAL begin ARM MACH assembler */
+ /* We can always handle unaligned data with the normal pseudoops. */
+ if (TARGET_MACHO)
+ aligned_p = 1;
+ /* APPLE LOCAL end ARM MACH assembler */
+
+ if (size == UNITS_PER_WORD && aligned_p)
+ {
+ /* APPLE LOCAL ARM MACH assembler */
+ fputs ("\t" DOT_WORD "\t", asm_out_file);
+ output_addr_const (asm_out_file, x);
+
+ /* Mark symbols as position independent. We only do this in the
+ .text segment, not in the .data segment. */
+ if (NEED_GOT_RELOC && flag_pic && making_const_table &&
+ (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
+ {
+ if (GET_CODE (x) == SYMBOL_REF
+ && (CONSTANT_POOL_ADDRESS_P (x)
+ || SYMBOL_REF_LOCAL_P (x)))
+ fputs ("(GOTOFF)", asm_out_file);
+ else if (GET_CODE (x) == LABEL_REF)
+ fputs ("(GOTOFF)", asm_out_file);
+ else
+ fputs ("(GOT)", asm_out_file);
+ }
+ fputc ('\n', asm_out_file);
+ return true;
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ mode = GET_MODE (x);
+
+ if (arm_vector_mode_supported_p (mode))
+ {
+ int i, units;
+ unsigned int invmask = 0, parts_per_word;
+
+ gcc_assert (GET_CODE (x) == CONST_VECTOR);
+
+ units = CONST_VECTOR_NUNITS (x);
+ size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+
+ /* For big-endian Neon vectors, we must permute the vector to the form
+ which, when loaded by a VLDR or VLDM instruction, will give a vector
+ with the elements in the right order. */
+ if (TARGET_NEON && WORDS_BIG_ENDIAN)
+ {
+ parts_per_word = UNITS_PER_WORD / size;
+ /* FIXME: This might be wrong for 64-bit vector elements, but we don't
+ support those anywhere yet. */
+ invmask = (parts_per_word == 0) ? 0 : (1 << (parts_per_word - 1)) - 1;
+ }
+
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ for (i = 0; i < units; i++)
+ {
+ rtx elt = CONST_VECTOR_ELT (x, i ^ invmask);
+ assemble_integer
+ (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
+ }
+ else
+ for (i = 0; i < units; i++)
+ {
+ rtx elt = CONST_VECTOR_ELT (x, i);
+ REAL_VALUE_TYPE rval;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rval, elt);
+
+ assemble_real
+ (rval, GET_MODE_INNER (mode),
+ i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT);
+ }
+
+ return true;
+ }
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+ return default_assemble_integer (x, size, aligned_p);
+}
+
+
+/* APPLE LOCAL ARM macho file format */
+#ifdef OBJECT_FORMAT_ELF
+/* Add a function to the list of static constructors. */
+
+static void
+arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+ if (!TARGET_AAPCS_BASED)
+ {
+ default_named_section_asm_out_constructor (symbol, priority);
+ return;
+ }
+
+ /* Put these in the .init_array section, using a special relocation. */
+ switch_to_section (ctors_section);
+ assemble_align (POINTER_SIZE);
+ fputs ("\t.word\t", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs ("(target1)\n", asm_out_file);
+}
+/* APPLE LOCAL ARM macho file format */
+#endif
+#endif
+
+/* A finite state machine takes care of noticing whether or not instructions
+ can be conditionally executed, and thus decrease execution time and code
+ size by deleting branch instructions. The fsm is controlled by
+ final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
+
+/* The state of the fsm controlling condition codes are:
+ 0: normal, do nothing special
+ 1: make ASM_OUTPUT_OPCODE not output this instruction
+ 2: make ASM_OUTPUT_OPCODE not output this instruction
+ 3: make instructions conditional
+ 4: make instructions conditional
+
+ State transitions (state->state by whom under condition):
+ 0 -> 1 final_prescan_insn if the `target' is a label
+ 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
+ 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
+ (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
+ 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
+ (the target insn is arm_target_insn).
+
+ If the jump clobbers the conditions then we use states 2 and 4.
+
+ A similar thing can be done with conditional return insns.
+
+ XXX In case the `target' is an unconditional branch, this conditionalising
+ of the instructions always reduces code size, but not always execution
+ time. But then, I want to reduce the code size to somewhere near what
+ /bin/cc produces. */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* In addition to this, state is maintained for Thumb-2 COND_EXEC
+ instructions. When a COND_EXEC instruction is seen the subsequent
+ instructions are scanned so that multiple conditional instructions can be
+ combined into a single IT block. arm_condexec_count and arm_condexec_mask
+ specify the length and true/false mask for the IT block. These will be
+ decremented/zeroed by arm_asm_output_opcode as the insns are output. */
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Returns the index of the ARM condition code string in
+ `arm_condition_codes'. COMPARISON should be an rtx like
+ `(eq (...) (...))'. */
+static enum arm_cond_code
+get_arm_condition_code (rtx comparison)
+{
+ enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
+ int code;
+ enum rtx_code comp_code = GET_CODE (comparison);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
+ XEXP (comparison, 1));
+
+ switch (mode)
+ {
+ case CC_DNEmode: code = ARM_NE; goto dominance;
+ case CC_DEQmode: code = ARM_EQ; goto dominance;
+ case CC_DGEmode: code = ARM_GE; goto dominance;
+ case CC_DGTmode: code = ARM_GT; goto dominance;
+ case CC_DLEmode: code = ARM_LE; goto dominance;
+ case CC_DLTmode: code = ARM_LT; goto dominance;
+ case CC_DGEUmode: code = ARM_CS; goto dominance;
+ case CC_DGTUmode: code = ARM_HI; goto dominance;
+ case CC_DLEUmode: code = ARM_LS; goto dominance;
+ case CC_DLTUmode: code = ARM_CC;
+
+ dominance:
+ gcc_assert (comp_code == EQ || comp_code == NE);
+
+ if (comp_code == EQ)
+ return ARM_INVERSE_CONDITION_CODE (code);
+ return code;
+
+ case CC_NOOVmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_PL;
+ case LT: return ARM_MI;
+ default: gcc_unreachable ();
+ }
+
+ case CC_Zmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ default: gcc_unreachable ();
+ }
+
+ case CC_Nmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_MI;
+ case EQ: return ARM_PL;
+ default: gcc_unreachable ();
+ }
+
+ case CCFPEmode:
+ case CCFPmode:
+ /* These encodings assume that AC=1 in the FPA system control
+ byte. This allows us to handle all cases except UNEQ and
+ LTGT. */
+ switch (comp_code)
+ {
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LS;
+ case LT: return ARM_MI;
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case ORDERED: return ARM_VC;
+ case UNORDERED: return ARM_VS;
+ case UNLT: return ARM_LT;
+ case UNLE: return ARM_LE;
+ case UNGT: return ARM_HI;
+ case UNGE: return ARM_PL;
+ /* UNEQ and LTGT do not have a representation. */
+ case UNEQ: /* Fall through. */
+ case LTGT: /* Fall through. */
+ default: gcc_unreachable ();
+ }
+
+ case CC_SWPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_LE;
+ case GT: return ARM_LT;
+ case LE: return ARM_GE;
+ case LT: return ARM_GT;
+ case GEU: return ARM_LS;
+ case GTU: return ARM_CC;
+ case LEU: return ARM_CS;
+ case LTU: return ARM_HI;
+ default: gcc_unreachable ();
+ }
+
+ case CC_Cmode:
+ switch (comp_code)
+ {
+ case LTU: return ARM_CS;
+ case GEU: return ARM_CC;
+ default: gcc_unreachable ();
+ }
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LE;
+ case LT: return ARM_LT;
+ case GEU: return ARM_CS;
+ case GTU: return ARM_HI;
+ case LEU: return ARM_LS;
+ case LTU: return ARM_CC;
+ default: gcc_unreachable ();
+ }
+
+ default: gcc_unreachable ();
+ }
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Tell arm_asm_ouput_opcode to output IT blocks for conditionally executed
+ instructions. */
+void
+thumb2_final_prescan_insn (rtx insn)
+{
+ rtx first_insn = insn;
+ rtx body = PATTERN (insn);
+ rtx predicate;
+ enum arm_cond_code code;
+ int n;
+ int mask;
+
+ /* Remove the previous insn from the count of insns to be output. */
+ if (arm_condexec_count)
+ arm_condexec_count--;
+
+ /* Nothing to do if we are already inside a conditional block. */
+ if (arm_condexec_count)
+ return;
+
+ if (GET_CODE (body) != COND_EXEC)
+ return;
+
+ /* Conditional jumps are implemented directly. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ return;
+
+ predicate = COND_EXEC_TEST (body);
+ arm_current_cc = get_arm_condition_code (predicate);
+
+ n = get_attr_ce_count (insn);
+ arm_condexec_count = 1;
+ arm_condexec_mask = (1 << n) - 1;
+ arm_condexec_masklen = n;
+ /* See if subsequent instructions can be combined into the same block. */
+ for (;;)
+ {
+ insn = next_nonnote_insn (insn);
+
+ /* Jumping into the middle of an IT block is illegal, so a label or
+ barrier terminates the block. */
+ if (GET_CODE (insn) != INSN && GET_CODE(insn) != JUMP_INSN)
+ break;
+
+ body = PATTERN (insn);
+ /* USE and CLOBBER aren't really insns, so just skip them. */
+ if (GET_CODE (body) == USE
+ || GET_CODE (body) == CLOBBER)
+ continue;
+
+ /* ??? Recognise conditional jumps, and combine them with IT blocks. */
+ if (GET_CODE (body) != COND_EXEC)
+ break;
+ /* Allow up to 4 conditionally executed instructions in a block. */
+ n = get_attr_ce_count (insn);
+ if (arm_condexec_masklen + n > 4)
+ break;
+
+ predicate = COND_EXEC_TEST (body);
+ code = get_arm_condition_code (predicate);
+ mask = (1 << n) - 1;
+ if (arm_current_cc == code)
+ arm_condexec_mask |= (mask << arm_condexec_masklen);
+ else if (arm_current_cc != ARM_INVERSE_CONDITION_CODE(code))
+ break;
+
+ arm_condexec_count++;
+ arm_condexec_masklen += n;
+
+ /* A jump must be the last instruction in a conditional block. */
+ if (GET_CODE(insn) == JUMP_INSN)
+ break;
+ }
+ /* Restore recog_data (getting the attributes of other insns can
+ destroy this array, but final.c assumes that it remains intact
+ across this call). */
+ extract_constrain_insn_cached (first_insn);
+}
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+void
+arm_final_prescan_insn (rtx insn)
+{
+ /* BODY will hold the body of INSN. */
+ rtx body = PATTERN (insn);
+
+ /* This will be 1 if trying to repeat the trick, and things need to be
+ reversed if it appears to fail. */
+ int reverse = 0;
+
+ /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
+ taken are clobbered, even if the rtl suggests otherwise. It also
+ means that we have to grub around within the jump expression to find
+ out what the conditions are when the jump isn't taken. */
+ int jump_clobbers = 0;
+
+ /* If we start with a return insn, we only succeed if we find another one. */
+ int seeking_return = 0;
+
+ /* START_INSN will hold the insn from where we start looking. This is the
+ first insn after the following code_label if REVERSE is true. */
+ rtx start_insn = insn;
+
+ /* If in state 4, check if the target branch is reached, in order to
+ change back to state 0. */
+ if (arm_ccfsm_state == 4)
+ {
+ if (insn == arm_target_insn)
+ {
+ arm_target_insn = NULL;
+ arm_ccfsm_state = 0;
+ }
+ return;
+ }
+
+ /* If in state 3, it is possible to repeat the trick, if this insn is an
+ unconditional branch to a label, and immediately following this branch
+ is the previous target label which is only used once, and the label this
+ branch jumps to is not too far off. */
+ if (arm_ccfsm_state == 3)
+ {
+ if (simplejump_p (insn))
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ {
+ /* XXX Isn't this always a barrier? */
+ start_insn = next_nonnote_insn (start_insn);
+ }
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ reverse = TRUE;
+ else
+ return;
+ }
+ else if (GET_CODE (body) == RETURN)
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ {
+ reverse = TRUE;
+ seeking_return = 1;
+ }
+ else
+ return;
+ }
+ else
+ return;
+ }
+
+ gcc_assert (!arm_ccfsm_state || reverse);
+ if (GET_CODE (insn) != JUMP_INSN)
+ return;
+
+ /* This jump might be paralleled with a clobber of the condition codes
+ the jump should always come first */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
+ body = XVECEXP (body, 0, 0);
+
+ if (reverse
+ || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
+ {
+ int insns_skipped;
+ int fail = FALSE, succeed = FALSE;
+ /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
+ int then_not_else = TRUE;
+ rtx this_insn = start_insn, label = 0;
+
+ /* If the jump cannot be done with one instruction, we cannot
+ conditionally execute the instruction in the inverse case. */
+ if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
+ {
+ jump_clobbers = 1;
+ return;
+ }
+
+ /* Register the insn jumped to. */
+ if (reverse)
+ {
+ if (!seeking_return)
+ label = XEXP (SET_SRC (body), 0);
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
+ label = XEXP (XEXP (SET_SRC (body), 1), 0);
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
+ {
+ label = XEXP (XEXP (SET_SRC (body), 2), 0);
+ then_not_else = FALSE;
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
+ seeking_return = 1;
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
+ {
+ seeking_return = 1;
+ then_not_else = FALSE;
+ }
+ else
+ gcc_unreachable ();
+
+ /* See how many insns this branch skips, and what kind of insns. If all
+ insns are okay, and the label or unconditional branch to the same
+ label is not too far away, succeed. */
+ for (insns_skipped = 0;
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
+ {
+ rtx scanbody;
+
+ this_insn = next_nonnote_insn (this_insn);
+ if (!this_insn)
+ break;
+
+ switch (GET_CODE (this_insn))
+ {
+ case CODE_LABEL:
+ /* Succeed if it is the target label, otherwise fail since
+ control falls in from somewhere else. */
+ if (this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case BARRIER:
+ /* Succeed if the following insn is the target label.
+ Otherwise fail.
+ If return insns are used then the last insn in a function
+ will be a barrier. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case CALL_INSN:
+ /* The AAPCS says that conditional calls should not be
+ used since they make interworking inefficient (the
+ linker can't transform BL<cond> into BLX). That's
+ only a problem if the machine has BLX. */
+ if (arm_arch5)
+ {
+ fail = TRUE;
+ break;
+ }
+
+ /* Succeed if the following insn is the target label, or
+ if the following two insns are a barrier and the
+ target label. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && GET_CODE (this_insn) == BARRIER)
+ this_insn = next_nonnote_insn (this_insn);
+
+ if (this_insn && this_insn == label
+ && insns_skipped < max_insns_skipped)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case JUMP_INSN:
+ /* If this is an unconditional branch to the same label, succeed.
+ If it is to another label, do nothing. If it is conditional,
+ fail. */
+ /* XXX Probably, the tests for SET and the PC are
+ unnecessary. */
+
+ scanbody = PATTERN (this_insn);
+ if (GET_CODE (scanbody) == SET
+ && GET_CODE (SET_DEST (scanbody)) == PC)
+ {
+ if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
+ && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
+ fail = TRUE;
+ }
+ /* Fail if a conditional return is undesirable (e.g. on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && !use_return_insn (TRUE, NULL)
+ && !optimize_size)
+ fail = TRUE;
+ else if (GET_CODE (scanbody) == RETURN
+ && seeking_return)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (scanbody) == PARALLEL)
+ {
+ switch (get_attr_conds (this_insn))
+ {
+ case CONDS_NOCOND:
+ break;
+ default:
+ fail = TRUE;
+ break;
+ }
+ }
+ else
+ fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
+
+ break;
+
+ case INSN:
+ /* Instructions using or affecting the condition codes make it
+ fail. */
+ scanbody = PATTERN (this_insn);
+ if (!(GET_CODE (scanbody) == SET
+ || GET_CODE (scanbody) == PARALLEL)
+ || get_attr_conds (this_insn) != CONDS_NOCOND)
+ fail = TRUE;
+
+ /* A conditional cirrus instruction must be followed by
+ a non Cirrus instruction. However, since we
+ conditionalize instructions in this function and by
+ the time we get here we can't add instructions
+ (nops), because shorten_branches() has already been
+ called, we will disable conditionalizing Cirrus
+ instructions to be safe. */
+ if (GET_CODE (scanbody) != USE
+ && GET_CODE (scanbody) != CLOBBER
+ && get_attr_cirrus (this_insn) != CIRRUS_NOT)
+ fail = TRUE;
+
+ /* APPLE LOCAL begin 6280380 */
+ /* While most ARM instructions are predicable, a few
+ like NEON instructions aren't... */
+ if (GET_CODE (scanbody) != USE
+ && GET_CODE (scanbody) != CLOBBER
+ && get_attr_predicable (this_insn) == PREDICABLE_NO)
+ fail = TRUE;
+ /* APPLE LOCAL end 6280380 */
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (succeed)
+ {
+ if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
+ arm_target_label = CODE_LABEL_NUMBER (label);
+ else
+ {
+ gcc_assert (seeking_return || arm_ccfsm_state == 2);
+
+ while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
+ {
+ this_insn = next_nonnote_insn (this_insn);
+ gcc_assert (!this_insn
+ || (GET_CODE (this_insn) != BARRIER
+ && GET_CODE (this_insn) != CODE_LABEL));
+ }
+ if (!this_insn)
+ {
+ /* Oh, dear! we ran off the end.. give up. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ extract_constrain_insn_cached (insn);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ return;
+ }
+ arm_target_insn = this_insn;
+ }
+ if (jump_clobbers)
+ {
+ gcc_assert (!reverse);
+ arm_current_cc =
+ get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
+ 0), 0), 1));
+ if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ else
+ {
+ /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
+ what it was. */
+ if (!reverse)
+ arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
+ 0));
+ }
+
+ if (reverse || then_not_else)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* Restore recog_data (getting the attributes of other insns can
+ destroy this array, but final.c assumes that it remains intact
+ across this call. */
+ extract_constrain_insn_cached (insn);
+ }
+}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Output IT instructions. */
+void
+thumb2_asm_output_opcode (FILE * stream)
+{
+ char buff[5];
+ int n;
+
+ if (arm_condexec_mask)
+ {
+ for (n = 0; n < arm_condexec_masklen; n++)
+ buff[n] = (arm_condexec_mask & (1 << n)) ? 't' : 'e';
+ buff[n] = 0;
+ asm_fprintf(stream, "i%s\t%s\n\t", buff,
+ arm_condition_codes[arm_current_cc]);
+ arm_condexec_mask = 0;
+ }
+}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Returns true if REGNO is a valid register
+ for holding a quantity of type MODE. */
+int
+arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ return (regno == CC_REGNUM
+ || (TARGET_HARD_FLOAT && TARGET_VFP
+ && regno == VFPCC_REGNUM));
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ /* For the Thumb we only allow values bigger than SImode in
+ registers 0 - 6, so that there is always a second low
+ register available to hold the upper part of the value.
+ We probably we ought to ensure that the register is the
+ start of an even numbered register pair. */
+ return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
+
+ if (TARGET_HARD_FLOAT && TARGET_MAVERICK
+ && IS_CIRRUS_REGNUM (regno))
+ /* We have outlawed SI values in Cirrus registers because they
+ reside in the lower 32 bits, but SF values reside in the
+ upper 32 bits. This causes gcc all sorts of grief. We can't
+ even split the registers into pairs because Cirrus SI values
+ get sign extended to 64bits-- aldyh. */
+ return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
+
+ if (TARGET_HARD_FLOAT && TARGET_VFP
+ && IS_VFP_REGNUM (regno))
+ {
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (mode == SFmode || mode == SImode)
+ return VFP_REGNO_OK_FOR_SINGLE (regno);
+
+ if (mode == DFmode)
+ return VFP_REGNO_OK_FOR_DOUBLE (regno);
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+
+ if (TARGET_NEON)
+ return (VALID_NEON_DREG_MODE (mode) && VFP_REGNO_OK_FOR_DOUBLE (regno))
+ || (VALID_NEON_QREG_MODE (mode)
+ && NEON_REGNO_OK_FOR_QUAD (regno))
+ || (mode == TImode && NEON_REGNO_OK_FOR_NREGS (regno, 2))
+ || (mode == EImode && NEON_REGNO_OK_FOR_NREGS (regno, 3))
+ || (mode == OImode && NEON_REGNO_OK_FOR_NREGS (regno, 4))
+ || (mode == CImode && NEON_REGNO_OK_FOR_NREGS (regno, 6))
+ || (mode == XImode && NEON_REGNO_OK_FOR_NREGS (regno, 8));
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ return FALSE;
+ }
+
+ if (TARGET_REALLY_IWMMXT)
+ {
+ if (IS_IWMMXT_GR_REGNUM (regno))
+ return mode == SImode;
+
+ if (IS_IWMMXT_REGNUM (regno))
+ return VALID_IWMMXT_REG_MODE (mode);
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* We allow any value to be stored in the general registers.
+ Restrict doubleword quantities to even register pairs so that we can
+ use ldrd. Do not allow Neon structure opaque modes in general registers;
+ they would use too many. */
+ if (regno <= LAST_ARM_REGNUM)
+ return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0)
+ && !VALID_NEON_STRUCT_MODE (mode);
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+ if (regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)
+ /* We only allow integers in the fake hard registers. */
+ return GET_MODE_CLASS (mode) == MODE_INT;
+
+ /* The only registers left are the FPA registers
+ which we only allow to hold FP values. */
+ return (TARGET_HARD_FLOAT && TARGET_FPA
+ && GET_MODE_CLASS (mode) == MODE_FLOAT
+ && regno >= FIRST_FPA_REGNUM
+ && regno <= LAST_FPA_REGNUM);
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* For efficiency and historical reasons LO_REGS, HI_REGS and CC_REGS are
+ not used in arm mode. */
+int
+arm_regno_class (int regno)
+{
+ if (TARGET_THUMB1)
+ {
+ if (regno == STACK_POINTER_REGNUM)
+ return STACK_REG;
+ if (regno == CC_REGNUM)
+ return CC_REG;
+ if (regno < 8)
+ return LO_REGS;
+ return HI_REGS;
+ }
+
+ if (TARGET_THUMB2 && regno < 8)
+ return LO_REGS;
+
+ if ( regno <= LAST_ARM_REGNUM
+ || regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)
+ return TARGET_THUMB2 ? HI_REGS : GENERAL_REGS;
+
+ if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
+ return TARGET_THUMB2 ? CC_REG : NO_REGS;
+
+ if (IS_CIRRUS_REGNUM (regno))
+ return CIRRUS_REGS;
+
+ if (IS_VFP_REGNUM (regno))
+ {
+ if (regno <= D7_VFP_REGNUM)
+ return VFP_D0_D7_REGS;
+ else if (regno <= LAST_LO_VFP_REGNUM)
+ return VFP_LO_REGS;
+ else
+ return VFP_HI_REGS;
+ }
+
+ if (IS_IWMMXT_REGNUM (regno))
+ return IWMMXT_REGS;
+
+ if (IS_IWMMXT_GR_REGNUM (regno))
+ return IWMMXT_GR_REGS;
+
+ return FPA_REGS;
+}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Handle a special case when computing the offset
+ of an argument from the frame pointer. */
+int
+arm_debugger_arg_offset (int value, rtx addr)
+{
+ rtx insn;
+
+ /* APPLE LOCAL begin ARM prefer SP to FP */
+ /* If we generated a frame, but the offset is from the SP anyway, then
+ we have to adjust the offset to be FP-relative, as that's what gdb
+ will be expecting. */
+ if (frame_pointer_needed)
+ {
+ if ((GET_CODE (addr) == REG) && (REGNO (addr) == SP_REGNUM))
+ return arm_local_debug_offset (addr);
+
+ if (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) == SP_REGNUM)
+ return arm_local_debug_offset (addr);
+ }
+
+ /* We are only interested if dbxout_parms() failed to compute the offset. */
+ if (value != 0)
+ return value;
+ /* APPLE LOCAL end ARM prefer SP to FP */
+
+ /* We can only cope with the case where the address is held in a register. */
+ if (GET_CODE (addr) != REG)
+ return 0;
+
+ /* If we are using the frame pointer to point at the argument, then
+ an offset of 0 is correct. */
+ if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
+ return 0;
+
+ /* If we are using the stack pointer to point at the
+ argument, then an offset of 0 is correct. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* ??? Check this is consistent with thumb2 frame layout. */
+ if ((TARGET_THUMB || !frame_pointer_needed)
+ && REGNO (addr) == SP_REGNUM)
+ return 0;
+
+ /* Oh dear. The argument is pointed to by a register rather
+ than being held in a register, or being stored at a known
+ offset from the frame pointer. Since GDB only understands
+ those two kinds of argument we must translate the address
+ held in the register into an offset from the frame pointer.
+ We do this by searching through the insns for the function
+ looking to see where this register gets its value. If the
+ register is initialized from the frame pointer plus an offset
+ then we are in luck and we can continue, otherwise we give up.
+
+ This code is exercised by producing debugging information
+ for a function with arguments like this:
+
+ double func (double a, double b, int c, double d) {return d;}
+
+ Without this code the stab for parameter 'd' will be set to
+ an offset of 0 from the frame pointer, rather than 8. */
+
+ /* The if() statement says:
+
+ If the insn is a normal instruction
+ and if the insn is setting the value in a register
+ and if the register being set is the register holding the address of the argument
+ and if the address is computing by an addition
+ that involves adding to a register
+ which is the frame pointer
+ a constant integer
+
+ then... */
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if ( GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
+ && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
+ )
+ {
+ value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
+
+ break;
+ }
+ }
+
+ if (value == 0)
+ {
+ debug_rtx (addr);
+ warning (0, "unable to compute real location of stacked parameter");
+ value = 8; /* XXX magic hack */
+ }
+
+ return value;
+}
+
+#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
+ do \
+ { \
+ if ((MASK) & insn_flags) \
+ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
+ BUILT_IN_MD, NULL, NULL_TREE); \
+ } \
+ while (0)
+
+struct builtin_description
+{
+ const unsigned int mask;
+ const enum insn_code icode;
+ const char * const name;
+ const enum arm_builtins code;
+ const enum rtx_code comparison;
+ const unsigned int flag;
+};
+
+static const struct builtin_description bdesc_2arg[] =
+{
+#define IWMMXT_BUILTIN(code, string, builtin) \
+ { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
+ ARM_BUILTIN_##builtin, 0, 0 },
+
+ IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
+ IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
+ IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
+ IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
+ IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
+ IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
+ IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
+ IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
+ IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
+ IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
+ IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
+ IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
+ IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
+ IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
+ IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
+ IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
+ IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
+ IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
+ IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
+ IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
+ IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
+ IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
+ IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
+ IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
+ IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
+ IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
+ IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
+ IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
+ IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
+ IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
+ IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
+ IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
+ IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
+ IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
+ IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
+ IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
+ IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
+ IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
+ IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
+ IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
+ IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
+ IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
+ IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
+ IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
+ IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
+ IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
+ IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
+ IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
+ IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
+ IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
+ IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
+ IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
+ IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
+ IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
+ IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
+ IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
+ IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
+ IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
+
+#define IWMMXT_BUILTIN2(code, builtin) \
+ { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
+
+ IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
+ IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
+ IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
+ IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
+ IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
+ IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
+ IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
+ IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
+ IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
+ IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
+ IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
+ IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
+ IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
+ IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
+ IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
+ IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
+ IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
+ IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
+ IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
+ IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
+ IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
+ IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
+ IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
+ IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
+ IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
+ IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
+ IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
+ IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
+ IWMMXT_BUILTIN2 (rordi3_di, WRORD)
+ IWMMXT_BUILTIN2 (rordi3, WRORDI)
+ IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
+ IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
+};
+
+static const struct builtin_description bdesc_1arg[] =
+{
+ IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
+ IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
+ IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
+ IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
+ IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
+ IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
+ IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
+ IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
+ IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
+ IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
+ IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
+ IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
+ IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
+ IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
+ IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
+ IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
+ IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
+ IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
+};
+
+/* Set up all the iWMMXt builtins. This is
+ not called if TARGET_IWMMXT is zero. */
+
+static void
+arm_init_iwmmxt_builtins (void)
+{
+ const struct builtin_description * d;
+ size_t i;
+ tree endlink = void_list_node;
+
+ tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
+ tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
+ tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
+
+ tree int_ftype_int
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node, endlink));
+ tree v8qi_ftype_v8qi_v8qi_int
+ = build_function_type (V8QI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+ tree v4hi_ftype_v4hi_int
+ = build_function_type (V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree v2si_ftype_v2si_int
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree v2si_ftype_di_di
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, long_long_integer_type_node,
+ tree_cons (NULL_TREE, long_long_integer_type_node,
+ endlink)));
+ tree di_ftype_di_int
+ = build_function_type (long_long_integer_type_node,
+ tree_cons (NULL_TREE, long_long_integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree di_ftype_di_int_int
+ = build_function_type (long_long_integer_type_node,
+ tree_cons (NULL_TREE, long_long_integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+ tree int_ftype_v8qi
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ endlink));
+ tree int_ftype_v4hi
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink));
+ tree int_ftype_v2si
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ endlink));
+ tree int_ftype_v8qi_int
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree int_ftype_v4hi_int
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree int_ftype_v2si_int
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree v8qi_ftype_v8qi_int_int
+ = build_function_type (V8QI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+ tree v4hi_ftype_v4hi_int_int
+ = build_function_type (V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+ tree v2si_ftype_v2si_int_int
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+ /* Miscellaneous. */
+ tree v8qi_ftype_v4hi_v4hi
+ = build_function_type (V8QI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink)));
+ tree v4hi_ftype_v2si_v2si
+ = build_function_type (V4HI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ endlink)));
+ tree v2si_ftype_v4hi_v4hi
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink)));
+ tree v2si_ftype_v8qi_v8qi
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ endlink)));
+ tree v4hi_ftype_v4hi_di
+ = build_function_type (V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE,
+ long_long_integer_type_node,
+ endlink)));
+ tree v2si_ftype_v2si_di
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ tree_cons (NULL_TREE,
+ long_long_integer_type_node,
+ endlink)));
+ tree void_ftype_int_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+ tree di_ftype_void
+ = build_function_type (long_long_unsigned_type_node, endlink);
+ tree di_ftype_v8qi
+ = build_function_type (long_long_integer_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ endlink));
+ tree di_ftype_v4hi
+ = build_function_type (long_long_integer_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink));
+ tree di_ftype_v2si
+ = build_function_type (long_long_integer_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ endlink));
+ tree v2si_ftype_v4hi
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink));
+ tree v4hi_ftype_v8qi
+ = build_function_type (V4HI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ endlink));
+
+ tree di_ftype_di_v4hi_v4hi
+ = build_function_type (long_long_unsigned_type_node,
+ tree_cons (NULL_TREE,
+ long_long_unsigned_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE,
+ V4HI_type_node,
+ endlink))));
+
+ tree di_ftype_v4hi_v4hi
+ = build_function_type (long_long_unsigned_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink)));
+
+ /* Normal vector binops. */
+ tree v8qi_ftype_v8qi_v8qi
+ = build_function_type (V8QI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ tree_cons (NULL_TREE, V8QI_type_node,
+ endlink)));
+ tree v4hi_ftype_v4hi_v4hi
+ = build_function_type (V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ tree_cons (NULL_TREE, V4HI_type_node,
+ endlink)));
+ tree v2si_ftype_v2si_v2si
+ = build_function_type (V2SI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ tree_cons (NULL_TREE, V2SI_type_node,
+ endlink)));
+ tree di_ftype_di_di
+ = build_function_type (long_long_unsigned_type_node,
+ tree_cons (NULL_TREE, long_long_unsigned_type_node,
+ tree_cons (NULL_TREE,
+ long_long_unsigned_type_node,
+ endlink)));
+
+ /* Add all builtins that are more or less simple operations on two
+ operands. */
+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ {
+ /* Use one of the operands; the target can have a different mode for
+ mask-generating compares. */
+ enum machine_mode mode;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+
+ mode = insn_data[d->icode].operand[1].mode;
+
+ switch (mode)
+ {
+ case V8QImode:
+ type = v8qi_ftype_v8qi_v8qi;
+ break;
+ case V4HImode:
+ type = v4hi_ftype_v4hi_v4hi;
+ break;
+ case V2SImode:
+ type = v2si_ftype_v2si_v2si;
+ break;
+ case DImode:
+ type = di_ftype_di_di;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ def_mbuiltin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the remaining MMX insns with somewhat more complicated types. */
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
+
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
+ def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
+}
+
+static void
+arm_init_tls_builtins (void)
+{
+ tree ftype;
+ tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
+ tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
+
+ ftype = build_function_type (ptr_type_node, void_list_node);
+ lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
+ ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
+ NULL, const_nothrow);
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+typedef enum {
+ T_V8QI = 0x0001,
+ T_V4HI = 0x0002,
+ T_V2SI = 0x0004,
+ T_V2SF = 0x0008,
+ T_DI = 0x0010,
+ T_V16QI = 0x0020,
+ T_V8HI = 0x0040,
+ T_V4SI = 0x0080,
+ T_V4SF = 0x0100,
+ T_V2DI = 0x0200,
+ T_TI = 0x0400,
+ T_EI = 0x0800,
+ T_OI = 0x1000
+} neon_builtin_type_bits;
+
+#define v8qi_UP T_V8QI
+#define v4hi_UP T_V4HI
+#define v2si_UP T_V2SI
+#define v2sf_UP T_V2SF
+#define di_UP T_DI
+#define v16qi_UP T_V16QI
+#define v8hi_UP T_V8HI
+#define v4si_UP T_V4SI
+#define v4sf_UP T_V4SF
+#define v2di_UP T_V2DI
+#define ti_UP T_TI
+#define ei_UP T_EI
+#define oi_UP T_OI
+
+#define UP(X) X##_UP
+
+#define T_MAX 13
+
+/* FIXME: Add other types of insn (loads & stores, etc.). */
+typedef enum {
+ NEON_BINOP,
+ NEON_TERNOP,
+ NEON_UNOP,
+ NEON_GETLANE,
+ NEON_SETLANE,
+ NEON_CREATE,
+ NEON_DUP,
+ NEON_DUPLANE,
+ NEON_COMBINE,
+ NEON_SPLIT,
+ NEON_LANEMUL,
+ NEON_LANEMULL,
+ NEON_LANEMULH,
+ NEON_LANEMAC,
+ NEON_SCALARMUL,
+ NEON_SCALARMULL,
+ NEON_SCALARMULH,
+ NEON_SCALARMAC,
+ NEON_CONVERT,
+ NEON_FIXCONV,
+ NEON_SELECT,
+ NEON_RESULTPAIR,
+ NEON_REINTERP,
+ NEON_VTBL,
+ NEON_VTBX,
+ NEON_LOAD1,
+ NEON_LOAD1LANE,
+ NEON_STORE1,
+ NEON_STORE1LANE,
+ NEON_LOADSTRUCT,
+ NEON_LOADSTRUCTLANE,
+ NEON_STORESTRUCT,
+ NEON_STORESTRUCTLANE,
+ NEON_LOGICBINOP,
+ NEON_SHIFTINSERT,
+ NEON_SHIFTIMM,
+ NEON_SHIFTACC
+} neon_itype;
+
+typedef struct {
+ const char *name;
+ const neon_itype itype;
+ const neon_builtin_type_bits bits;
+ const enum insn_code codes[T_MAX];
+ const unsigned int num_vars;
+ unsigned int base_fcode;
+} neon_builtin_datum;
+
+#define CF(N,X) CODE_FOR_neon_##N##X
+
+#define VAR1(T, N, A) \
+ #N, NEON_##T, UP (A), { CF (N, A) }, 1, 0
+#define VAR2(T, N, A, B) \
+ #N, NEON_##T, UP (A) | UP (B), { CF (N, A), CF (N, B) }, 2, 0
+#define VAR3(T, N, A, B, C) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C), \
+ { CF (N, A), CF (N, B), CF (N, C) }, 3, 0
+#define VAR4(T, N, A, B, C, D) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D) }, 4, 0
+#define VAR5(T, N, A, B, C, D, E) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E) }, 5, 0
+#define VAR6(T, N, A, B, C, D, E, F) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F) }, 6, 0
+#define VAR7(T, N, A, B, C, D, E, F, G) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G) }, 7, 0
+#define VAR8(T, N, A, B, C, D, E, F, G, H) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G) \
+ | UP (H), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H) }, 8, 0
+#define VAR9(T, N, A, B, C, D, E, F, G, H, I) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G) \
+ | UP (H) | UP (I), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H), CF (N, I) }, 9, 0
+#define VAR10(T, N, A, B, C, D, E, F, G, H, I, J) \
+ #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G) \
+ | UP (H) | UP (I) | UP (J), \
+ { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
+ CF (N, G), CF (N, H), CF (N, I), CF (N, J) }, 10, 0
+
+/* The mode entries in the following table correspond to the "key" type of the
+ instruction variant, i.e. equivalent to that which would be specified after
+ the assembler mnemonic, which usually refers to the last vector operand.
+ (Signed/unsigned/polynomial types are not differentiated between though, and
+ are all mapped onto the same mode for a given element size.) The modes
+ listed per instruction should be the same as those defined for that
+ instruction's pattern in neon.md.
+ WARNING: Variants should be listed in the same increasing order as
+ neon_builtin_type_bits. */
+
+static neon_builtin_datum neon_builtin_data[] =
+{
+ { VAR10 (BINOP, vadd,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR3 (BINOP, vaddl, v8qi, v4hi, v2si) },
+ { VAR3 (BINOP, vaddw, v8qi, v4hi, v2si) },
+ { VAR6 (BINOP, vhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR8 (BINOP, vqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR3 (BINOP, vaddhn, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vmul, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR8 (TERNOP, vmla, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR3 (TERNOP, vmlal, v8qi, v4hi, v2si) },
+ { VAR8 (TERNOP, vmls, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR3 (TERNOP, vmlsl, v8qi, v4hi, v2si) },
+ { VAR4 (BINOP, vqdmulh, v4hi, v2si, v8hi, v4si) },
+ { VAR2 (TERNOP, vqdmlal, v4hi, v2si) },
+ { VAR2 (TERNOP, vqdmlsl, v4hi, v2si) },
+ { VAR3 (BINOP, vmull, v8qi, v4hi, v2si) },
+ { VAR2 (SCALARMULL, vmull_n, v4hi, v2si) },
+ { VAR2 (LANEMULL, vmull_lane, v4hi, v2si) },
+ { VAR2 (SCALARMULL, vqdmull_n, v4hi, v2si) },
+ { VAR2 (LANEMULL, vqdmull_lane, v4hi, v2si) },
+ { VAR4 (SCALARMULH, vqdmulh_n, v4hi, v2si, v8hi, v4si) },
+ { VAR4 (LANEMULH, vqdmulh_lane, v4hi, v2si, v8hi, v4si) },
+ { VAR2 (BINOP, vqdmull, v4hi, v2si) },
+ { VAR8 (BINOP, vshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR3 (SHIFTIMM, vshrn_n, v8hi, v4si, v2di) },
+ { VAR3 (SHIFTIMM, vqshrn_n, v8hi, v4si, v2di) },
+ { VAR3 (SHIFTIMM, vqshrun_n, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vqshlu_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR3 (SHIFTIMM, vshll_n, v8qi, v4hi, v2si) },
+ { VAR8 (SHIFTACC, vsra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR10 (BINOP, vsub,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR3 (BINOP, vsubl, v8qi, v4hi, v2si) },
+ { VAR3 (BINOP, vsubw, v8qi, v4hi, v2si) },
+ { VAR8 (BINOP, vqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR6 (BINOP, vhsub, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR3 (BINOP, vsubhn, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vceq, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR8 (BINOP, vcge, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR8 (BINOP, vcgt, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR2 (BINOP, vcage, v2sf, v4sf) },
+ { VAR2 (BINOP, vcagt, v2sf, v4sf) },
+ { VAR6 (BINOP, vtst, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR8 (BINOP, vabd, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR3 (BINOP, vabdl, v8qi, v4hi, v2si) },
+ { VAR6 (TERNOP, vaba, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR3 (TERNOP, vabal, v8qi, v4hi, v2si) },
+ { VAR8 (BINOP, vmax, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR8 (BINOP, vmin, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR4 (BINOP, vpadd, v8qi, v4hi, v2si, v2sf) },
+ { VAR6 (UNOP, vpaddl, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR6 (BINOP, vpadal, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR4 (BINOP, vpmax, v8qi, v4hi, v2si, v2sf) },
+ { VAR4 (BINOP, vpmin, v8qi, v4hi, v2si, v2sf) },
+ { VAR2 (BINOP, vrecps, v2sf, v4sf) },
+ { VAR2 (BINOP, vrsqrts, v2sf, v4sf) },
+ { VAR8 (SHIFTINSERT, vsri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTINSERT, vsli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (UNOP, vabs, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR6 (UNOP, vqabs, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR8 (UNOP, vneg, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR6 (UNOP, vqneg, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR6 (UNOP, vcls, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR6 (UNOP, vclz, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ { VAR2 (UNOP, vcnt, v8qi, v16qi) },
+ { VAR4 (UNOP, vrecpe, v2si, v2sf, v4si, v4sf) },
+ { VAR4 (UNOP, vrsqrte, v2si, v2sf, v4si, v4sf) },
+ { VAR6 (UNOP, vmvn, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
+ /* FIXME: vget_lane supports more variants than this! */
+ { VAR10 (GETLANE, vget_lane,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (SETLANE, vset_lane,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (CREATE, vcreate, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR10 (DUP, vdup_n,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (DUPLANE, vdup_lane,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (COMBINE, vcombine, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (SPLIT, vget_high, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (SPLIT, vget_low, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR3 (UNOP, vmovn, v8hi, v4si, v2di) },
+ { VAR3 (UNOP, vqmovn, v8hi, v4si, v2di) },
+ { VAR3 (UNOP, vqmovun, v8hi, v4si, v2di) },
+ { VAR3 (UNOP, vmovl, v8qi, v4hi, v2si) },
+ { VAR6 (LANEMUL, vmul_lane, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR6 (LANEMAC, vmla_lane, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR2 (LANEMAC, vmlal_lane, v4hi, v2si) },
+ { VAR2 (LANEMAC, vqdmlal_lane, v4hi, v2si) },
+ { VAR6 (LANEMAC, vmls_lane, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR2 (LANEMAC, vmlsl_lane, v4hi, v2si) },
+ { VAR2 (LANEMAC, vqdmlsl_lane, v4hi, v2si) },
+ { VAR6 (SCALARMUL, vmul_n, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR6 (SCALARMAC, vmla_n, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR2 (SCALARMAC, vmlal_n, v4hi, v2si) },
+ { VAR2 (SCALARMAC, vqdmlal_n, v4hi, v2si) },
+ { VAR6 (SCALARMAC, vmls_n, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR2 (SCALARMAC, vmlsl_n, v4hi, v2si) },
+ { VAR2 (SCALARMAC, vqdmlsl_n, v4hi, v2si) },
+ { VAR10 (BINOP, vext,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR8 (UNOP, vrev64, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR4 (UNOP, vrev32, v8qi, v4hi, v16qi, v8hi) },
+ { VAR2 (UNOP, vrev16, v8qi, v16qi) },
+ { VAR4 (CONVERT, vcvt, v2si, v2sf, v4si, v4sf) },
+ { VAR4 (FIXCONV, vcvt_n, v2si, v2sf, v4si, v4sf) },
+ { VAR10 (SELECT, vbsl,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR1 (VTBL, vtbl1, v8qi) },
+ { VAR1 (VTBL, vtbl2, v8qi) },
+ { VAR1 (VTBL, vtbl3, v8qi) },
+ { VAR1 (VTBL, vtbl4, v8qi) },
+ { VAR1 (VTBX, vtbx1, v8qi) },
+ { VAR1 (VTBX, vtbx2, v8qi) },
+ { VAR1 (VTBX, vtbx3, v8qi) },
+ { VAR1 (VTBX, vtbx4, v8qi) },
+ { VAR8 (RESULTPAIR, vtrn, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR8 (RESULTPAIR, vzip, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR8 (RESULTPAIR, vuzp, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
+ { VAR5 (REINTERP, vreinterpretv8qi, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (REINTERP, vreinterpretv4hi, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (REINTERP, vreinterpretv2si, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (REINTERP, vreinterpretv2sf, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (REINTERP, vreinterpretdi, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (LOAD1, vld1,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (LOAD1LANE, vld1_lane,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (LOAD1, vld1_dup,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (STORE1, vst1,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (STORE1LANE, vst1_lane,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR9 (LOADSTRUCT,
+ vld2, v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ { VAR7 (LOADSTRUCTLANE, vld2_lane,
+ v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR5 (LOADSTRUCT, vld2_dup, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR9 (STORESTRUCT, vst2,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ { VAR7 (STORESTRUCTLANE, vst2_lane,
+ v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR9 (LOADSTRUCT,
+ vld3, v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ { VAR7 (LOADSTRUCTLANE, vld3_lane,
+ v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR5 (LOADSTRUCT, vld3_dup, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR9 (STORESTRUCT, vst3,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ { VAR7 (STORESTRUCTLANE, vst3_lane,
+ v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR9 (LOADSTRUCT, vld4,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ { VAR7 (LOADSTRUCTLANE, vld4_lane,
+ v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR5 (LOADSTRUCT, vld4_dup, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR9 (STORESTRUCT, vst4,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ { VAR7 (STORESTRUCTLANE, vst4_lane,
+ v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
+ { VAR10 (LOGICBINOP, vand,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (LOGICBINOP, vorr,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (BINOP, veor,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (LOGICBINOP, vbic,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR10 (LOGICBINOP, vorn,
+ v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) }
+};
+
+#undef CF
+#undef VAR1
+#undef VAR2
+#undef VAR3
+#undef VAR4
+#undef VAR5
+#undef VAR6
+#undef VAR7
+#undef VAR8
+#undef VAR9
+#undef VAR10
+
+static int
+valid_neon_mode (enum machine_mode mode)
+{
+ return VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode);
+}
+
+/* APPLE LOCAL begin 7083296 Build without warnings. */
+static tree
+make_neon_float_type (void)
+{
+ tree neon_float_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (neon_float_type_node) = FLOAT_TYPE_SIZE;
+ layout_type (neon_float_type_node);
+ return neon_float_type_node;
+}
+/* APPLE LOCAL end 7083296 Build without warnings. */
+
+static void
+arm_init_neon_builtins (void)
+{
+#define qi_TN neon_intQI_type_node
+#define hi_TN neon_intHI_type_node
+#define pqi_TN neon_polyQI_type_node
+#define qhi_TN neon_polyHI_type_node
+#define si_TN neon_intSI_type_node
+#define di_TN neon_intDI_type_node
+#define ti_TN intTI_type_node
+#define ei_TN intEI_type_node
+#define oi_TN intOI_type_node
+#define ci_TN intCI_type_node
+#define xi_TN intXI_type_node
+
+#define sf_TN neon_float_type_node
+
+#define v8qi_TN V8QI_type_node
+#define v4hi_TN V4HI_type_node
+#define v2si_TN V2SI_type_node
+#define v2sf_TN V2SF_type_node
+
+#define v16qi_TN V16QI_type_node
+#define v8hi_TN V8HI_type_node
+#define v4si_TN V4SI_type_node
+#define v4sf_TN V4SF_type_node
+#define v2di_TN V2DI_type_node
+
+#define pv8qi_TN V8QI_pointer_node
+#define pv4hi_TN V4HI_pointer_node
+#define pv2si_TN V2SI_pointer_node
+#define pv2sf_TN V2SF_pointer_node
+#define pdi_TN intDI_pointer_node
+
+#define pv16qi_TN V16QI_pointer_node
+#define pv8hi_TN V8HI_pointer_node
+#define pv4si_TN V4SI_pointer_node
+#define pv4sf_TN V4SF_pointer_node
+#define pv2di_TN V2DI_pointer_node
+
+#define void_TN void_type_node
+
+#define TYPE2(A,B) \
+ tree A##_##ftype##_##B = build_function_type_list (A##_TN, B##_TN, NULL)
+#define TYPE3(A,B,C) \
+ tree A##_##ftype##_##B##_##C = build_function_type_list (A##_TN, B##_TN, \
+ C##_TN, NULL)
+#define TYPE4(A,B,C,D) \
+ tree A##_##ftype##_##B##_##C##_##D = build_function_type_list (A##_TN, \
+ B##_TN, C##_TN, D##_TN, NULL)
+#define TYPE5(A,B,C,D,E) \
+ tree A##_##ftype##_##B##_##C##_##D##_##E = build_function_type_list (A##_TN, \
+ B##_TN, C##_TN, D##_TN, E##_TN, NULL)
+#define TYPE6(A,B,C,D,E,F) \
+ tree A##_##ftype##_##B##_##C##_##D##_##E##_##F = build_function_type_list \
+ (A##_TN, B##_TN, C##_TN, D##_TN, E##_TN, F##_TN, NULL)
+
+ unsigned int i, fcode = ARM_BUILTIN_NEON_BASE;
+
+ /* Create distinguished type nodes for NEON vector element types,
+ and pointers to values of such types, so we can detect them later. */
+ tree neon_intQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
+ tree neon_intHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
+ tree neon_polyQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
+ tree neon_polyHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
+ tree neon_intSI_type_node = make_signed_type (GET_MODE_PRECISION (SImode));
+ tree neon_intDI_type_node = make_signed_type (GET_MODE_PRECISION (DImode));
+ /* APPLE LOCAL begin 7083296 Build without warnings. */
+ tree neon_float_type_node = make_neon_float_type ();
+
+ /* APPLE LOCAL end 7083296 Build without warnings. */
+ tree intQI_pointer_node = build_pointer_type (neon_intQI_type_node);
+ tree intHI_pointer_node = build_pointer_type (neon_intHI_type_node);
+ tree intSI_pointer_node = build_pointer_type (neon_intSI_type_node);
+ tree intDI_pointer_node = build_pointer_type (neon_intDI_type_node);
+ tree float_pointer_node = build_pointer_type (neon_float_type_node);
+
+ /* Next create constant-qualified versions of the above types. */
+ tree const_intQI_node = build_qualified_type (neon_intQI_type_node,
+ TYPE_QUAL_CONST);
+ tree const_intHI_node = build_qualified_type (neon_intHI_type_node,
+ TYPE_QUAL_CONST);
+ tree const_intSI_node = build_qualified_type (neon_intSI_type_node,
+ TYPE_QUAL_CONST);
+ tree const_intDI_node = build_qualified_type (neon_intDI_type_node,
+ TYPE_QUAL_CONST);
+ tree const_float_node = build_qualified_type (neon_float_type_node,
+ TYPE_QUAL_CONST);
+
+ tree const_intQI_pointer_node = build_pointer_type (const_intQI_node);
+ tree const_intHI_pointer_node = build_pointer_type (const_intHI_node);
+ tree const_intSI_pointer_node = build_pointer_type (const_intSI_node);
+ tree const_intDI_pointer_node = build_pointer_type (const_intDI_node);
+ tree const_float_pointer_node = build_pointer_type (const_float_node);
+
+ /* Now create vector types based on our NEON element types. */
+ /* 64-bit vectors. */
+ tree V8QI_type_node =
+ build_vector_type_for_mode (neon_intQI_type_node, V8QImode);
+ tree V4HI_type_node =
+ build_vector_type_for_mode (neon_intHI_type_node, V4HImode);
+ tree V2SI_type_node =
+ build_vector_type_for_mode (neon_intSI_type_node, V2SImode);
+ tree V2SF_type_node =
+ build_vector_type_for_mode (neon_float_type_node, V2SFmode);
+ /* 128-bit vectors. */
+ tree V16QI_type_node =
+ build_vector_type_for_mode (neon_intQI_type_node, V16QImode);
+ tree V8HI_type_node =
+ build_vector_type_for_mode (neon_intHI_type_node, V8HImode);
+ tree V4SI_type_node =
+ build_vector_type_for_mode (neon_intSI_type_node, V4SImode);
+ tree V4SF_type_node =
+ build_vector_type_for_mode (neon_float_type_node, V4SFmode);
+ tree V2DI_type_node =
+ build_vector_type_for_mode (neon_intDI_type_node, V2DImode);
+
+ /* Unsigned integer types for various mode sizes. */
+ tree intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
+ tree intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
+ tree intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
+ tree intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+
+ /* Opaque integer types for structures of vectors. */
+ tree intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
+ tree intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
+ tree intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
+ tree intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
+
+ /* Pointers to vector types. */
+ tree V8QI_pointer_node = build_pointer_type (V8QI_type_node);
+ tree V4HI_pointer_node = build_pointer_type (V4HI_type_node);
+ tree V2SI_pointer_node = build_pointer_type (V2SI_type_node);
+ tree V2SF_pointer_node = build_pointer_type (V2SF_type_node);
+ tree V16QI_pointer_node = build_pointer_type (V16QI_type_node);
+ tree V8HI_pointer_node = build_pointer_type (V8HI_type_node);
+ tree V4SI_pointer_node = build_pointer_type (V4SI_type_node);
+ tree V4SF_pointer_node = build_pointer_type (V4SF_type_node);
+ tree V2DI_pointer_node = build_pointer_type (V2DI_type_node);
+
+ /* Binops, all-doubleword arithmetic. */
+ TYPE4 (v8qi, v8qi, v8qi, si);
+ TYPE4 (v4hi, v4hi, v4hi, si);
+ TYPE4 (v2si, v2si, v2si, si);
+ TYPE4 (v2sf, v2sf, v2sf, si);
+ TYPE4 (di, di, di, si);
+
+ /* Binops, all-quadword arithmetic. */
+ TYPE4 (v16qi, v16qi, v16qi, si);
+ TYPE4 (v8hi, v8hi, v8hi, si);
+ TYPE4 (v4si, v4si, v4si, si);
+ TYPE4 (v4sf, v4sf, v4sf, si);
+ TYPE4 (v2di, v2di, v2di, si);
+
+ /* Binops, "long" operations (dest wider than operands). */
+ TYPE4 (v8hi, v8qi, v8qi, si);
+ TYPE4 (v4si, v4hi, v4hi, si);
+ TYPE4 (v2di, v2si, v2si, si);
+
+ /* Binops, "wide" operations (dest and first operand wider than second
+ operand). */
+ TYPE4 (v8hi, v8hi, v8qi, si);
+ TYPE4 (v4si, v4si, v4hi, si);
+ TYPE4 (v2di, v2di, v2si, si);
+
+ /* Binops, "narrow" operations (dest narrower than operands). */
+ TYPE4 (v8qi, v8hi, v8hi, si);
+ TYPE4 (v4hi, v4si, v4si, si);
+ TYPE4 (v2si, v2di, v2di, si);
+
+ /* Binops, comparisons (return type always an integer vector). */
+ TYPE4 (v2si, v2sf, v2sf, si);
+ TYPE4 (v4si, v4sf, v4sf, si);
+
+ /* Binops, dest and first operand elements wider (vpadal). */
+ TYPE4 (v4hi, v4hi, v8qi, si);
+ TYPE4 (v2si, v2si, v4hi, si);
+ TYPE4 (di, di, v2si, si);
+ TYPE4 (v8hi, v8hi, v16qi, si);
+ TYPE4 (v4si, v4si, v8hi, si);
+ TYPE4 (v2di, v2di, v4si, si);
+
+ /* Ternary operations, all-doubleword arithmetic. */
+ TYPE5 (v8qi, v8qi, v8qi, v8qi, si);
+ TYPE5 (v4hi, v4hi, v4hi, v4hi, si);
+ TYPE5 (v2si, v2si, v2si, v2si, si);
+ TYPE5 (v2sf, v2sf, v2sf, v2sf, si);
+
+ /* Ternary operations, all-quadword arithmetic. */
+ TYPE5 (v16qi, v16qi, v16qi, v16qi, si);
+ TYPE5 (v8hi, v8hi, v8hi, v8hi, si);
+ TYPE5 (v4si, v4si, v4si, v4si, si);
+ TYPE5 (v4sf, v4sf, v4sf, v4sf, si);
+
+ /* Ternary operations, "long" operations (dest and first operand
+ wider than second and third operands). */
+ TYPE5 (v8hi, v8hi, v8qi, v8qi, si);
+ TYPE5 (v4si, v4si, v4hi, v4hi, si);
+ TYPE5 (v2di, v2di, v2si, v2si, si);
+
+ /* Unops, all-doubleword arithmetic. */
+ TYPE3 (v8qi, v8qi, si);
+ TYPE3 (v4hi, v4hi, si);
+ TYPE3 (v2si, v2si, si);
+ TYPE3 (v2sf, v2sf, si);
+ TYPE3 (di, di, si);
+
+ /* Unops, all-quadword arithmetic. */
+ TYPE3 (v16qi, v16qi, si);
+ TYPE3 (v8hi, v8hi, si);
+ TYPE3 (v4si, v4si, si);
+ TYPE3 (v4sf, v4sf, si);
+ TYPE3 (v2di, v2di, si);
+
+ /* Unops, narrowing. */
+ TYPE3 (v8qi, v8hi, si);
+ TYPE3 (v4hi, v4si, si);
+ TYPE3 (v2si, v2di, si);
+
+ /* Unops, widening. */
+ TYPE3 (v8hi, v8qi, si);
+ TYPE3 (v4si, v4hi, si);
+ TYPE3 (v2di, v2si, si);
+
+ /* Unops, dest elements wider (vpaddl). */
+ TYPE3 (v4hi, v8qi, si);
+ TYPE3 (v2si, v4hi, si);
+ TYPE3 (di, v2si, si);
+ TYPE3 (v8hi, v16qi, si);
+ TYPE3 (v4si, v8hi, si);
+ TYPE3 (v2di, v4si, si);
+
+ /* Get-lane from doubleword insns (single-element result). */
+ TYPE4 (qi, v8qi, si, si);
+ TYPE4 (hi, v4hi, si, si);
+ TYPE4 (si, v2si, si, si);
+ TYPE4 (sf, v2sf, si, si);
+ TYPE4 (di, di, si, si);
+
+ /* Get-lane from quadword insns. */
+ TYPE4 (qi, v16qi, si, si);
+ TYPE4 (hi, v8hi, si, si);
+ TYPE4 (si, v4si, si, si);
+ TYPE4 (sf, v4sf, si, si);
+ TYPE4 (di, v2di, si, si);
+
+ /* Set lane in doubleword insns (single-element result). */
+ TYPE4 (v8qi, qi, v8qi, si);
+ TYPE4 (v4hi, hi, v4hi, si);
+ TYPE4 (v2si, si, v2si, si);
+ TYPE4 (v2sf, sf, v2sf, si);
+
+ /* Set lane in quadword insns. */
+ TYPE4 (v16qi, qi, v16qi, si);
+ TYPE4 (v8hi, hi, v8hi, si);
+ TYPE4 (v4si, si, v4si, si);
+ TYPE4 (v4sf, sf, v4sf, si);
+ TYPE4 (v2di, di, v2di, si);
+
+ /* Create vector from bit pattern. */
+ TYPE2 (v8qi, di);
+ TYPE2 (v4hi, di);
+ TYPE2 (v2si, di);
+ TYPE2 (v2sf, di);
+ TYPE2 (di, di);
+
+ /* Duplicate an ARM register into lanes of a vector. */
+ TYPE2 (v8qi, qi);
+ TYPE2 (v4hi, hi);
+ TYPE2 (v2si, si);
+ TYPE2 (v2sf, sf);
+
+ TYPE2 (v16qi, qi);
+ TYPE2 (v8hi, hi);
+ TYPE2 (v4si, si);
+ TYPE2 (v4sf, sf);
+ TYPE2 (v2di, di);
+
+ /* Duplicate a lane of a vector to all lanes of another vector. */
+ TYPE3 (v16qi, v8qi, si);
+ TYPE3 (v8hi, v4hi, si);
+ TYPE3 (v4si, v2si, si);
+ TYPE3 (v4sf, v2sf, si);
+ TYPE3 (v2di, di, si);
+
+ /* Combine doubleword vectors into quadword vectors. */
+ TYPE3 (v16qi, v8qi, v8qi);
+ TYPE3 (v8hi, v4hi, v4hi);
+ TYPE3 (v4si, v2si, v2si);
+ TYPE3 (v4sf, v2sf, v2sf);
+ TYPE3 (v2di, di, di);
+
+ /* Split quadword vectors into high or low parts. */
+ TYPE2 (v8qi, v16qi);
+ TYPE2 (v4hi, v8hi);
+ TYPE2 (v2si, v4si);
+ TYPE2 (v2sf, v4sf);
+ TYPE2 (di, v2di);
+
+ /* Conversions, int<->float. */
+ TYPE3 (v2si, v2sf, si);
+ TYPE3 (v2sf, v2si, si);
+ TYPE3 (v4si, v4sf, si);
+ TYPE3 (v4sf, v4si, si);
+
+ /* Conversions, fixed point<->float. */
+ TYPE4 (v2si, v2sf, si, si);
+ TYPE4 (v2sf, v2si, si, si);
+ TYPE4 (v4si, v4sf, si, si);
+ TYPE4 (v4sf, v4si, si, si);
+
+ /* Multiply by scalar (lane). */
+ TYPE5 (v4hi, v4hi, v4hi, si, si);
+ TYPE5 (v2si, v2si, v2si, si, si);
+ TYPE5 (v2sf, v2sf, v2sf, si, si);
+ TYPE5 (v8hi, v8hi, v4hi, si, si);
+ TYPE5 (v4si, v4si, v2si, si, si);
+ TYPE5 (v4sf, v4sf, v2sf, si, si);
+
+ /* Long multiply by scalar (lane). */
+ TYPE5 (v4si, v4hi, v4hi, si, si);
+ TYPE5 (v2di, v2si, v2si, si, si);
+
+ /* Multiply-accumulate etc. by scalar (lane). */
+ TYPE6 (v4hi, v4hi, v4hi, v4hi, si, si);
+ TYPE6 (v2si, v2si, v2si, v2si, si, si);
+ TYPE6 (v2sf, v2sf, v2sf, v2sf, si, si);
+ TYPE6 (v8hi, v8hi, v8hi, v4hi, si, si);
+ TYPE6 (v4si, v4si, v4si, v2si, si, si);
+ TYPE6 (v4sf, v4sf, v4sf, v2sf, si, si);
+
+ /* Multiply-accumulate, etc. by scalar (lane), widening. */
+ TYPE6 (v4si, v4si, v4hi, v4hi, si, si);
+ TYPE6 (v2di, v2di, v2si, v2si, si, si);
+
+ /* Multiply by scalar. */
+ TYPE4 (v4hi, v4hi, hi, si);
+ TYPE4 (v2si, v2si, si, si);
+ TYPE4 (v2sf, v2sf, sf, si);
+
+ TYPE4 (v8hi, v8hi, hi, si);
+ TYPE4 (v4si, v4si, si, si);
+ TYPE4 (v4sf, v4sf, sf, si);
+
+ /* Long multiply by scalar. */
+ TYPE4 (v4si, v4hi, hi, si);
+
+ /* Multiply-accumulate etc. by scalar. */
+ TYPE5 (v4hi, v4hi, v4hi, hi, si);
+ /* TYPE5 (v2si, v2si, v2si, si, si);*/
+ TYPE5 (v2sf, v2sf, v2sf, sf, si);
+ TYPE5 (v8hi, v8hi, v8hi, hi, si);
+ TYPE5 (v4si, v4si, v4si, si, si);
+ TYPE5 (v4sf, v4sf, v4sf, sf, si);
+
+ /* Multiply-accumulate by scalar, widening. */
+ TYPE5 (v4si, v4si, v4hi, hi, si);
+ TYPE5 (v2di, v2di, v2si, si, si);
+
+ /* Bit select operations. */
+ TYPE4 (v8qi, v8qi, v8qi, v8qi);
+ TYPE4 (v4hi, v4hi, v4hi, v4hi);
+ TYPE4 (v2si, v2si, v2si, v2si);
+ TYPE4 (v2sf, v2si, v2sf, v2sf);
+ TYPE4 (di, di, di, di);
+
+ TYPE4 (v16qi, v16qi, v16qi, v16qi);
+ TYPE4 (v8hi, v8hi, v8hi, v8hi);
+ TYPE4 (v4si, v4si, v4si, v4si);
+ TYPE4 (v4sf, v4si, v4sf, v4sf);
+ TYPE4 (v2di, v2di, v2di, v2di);
+
+ /* Shift immediate operations. */
+ TYPE4 (v8qi, v8qi, si, si);
+ TYPE4 (v4hi, v4hi, si, si);
+
+ TYPE4 (v16qi, v16qi, si, si);
+ TYPE4 (v8hi, v8hi, si, si);
+ TYPE4 (v2di, v2di, si, si);
+
+ /* Shift immediate, long operations. */
+ TYPE4 (v8hi, v8qi, si, si);
+ TYPE4 (v4si, v4hi, si, si);
+ TYPE4 (v2di, v2si, si, si);
+
+ /* Shift immediate, narrowing operations. */
+ TYPE4 (v8qi, v8hi, si, si);
+ TYPE4 (v4hi, v4si, si, si);
+ TYPE4 (v2si, v2di, si, si);
+
+ /* Shift + accumulate operations. */
+ TYPE5 (v8qi, v8qi, v8qi, si, si);
+ TYPE5 (di, di, di, si, si);
+
+ TYPE5 (v16qi, v16qi, v16qi, si, si);
+ TYPE5 (v8hi, v8hi, v8hi, si, si);
+ TYPE5 (v4sf, v4sf, v4sf, si, si);
+ TYPE5 (v2di, v2di, v2di, si, si);
+
+ /* Operations which return results as pairs. */
+ TYPE4 (void, pv8qi, v8qi, v8qi);
+ TYPE4 (void, pv4hi, v4hi, v4hi);
+ TYPE4 (void, pv2si, v2si, v2si);
+ TYPE4 (void, pv2sf, v2sf, v2sf);
+ TYPE4 (void, pdi, di, di);
+
+ TYPE4 (void, pv16qi, v16qi, v16qi);
+ TYPE4 (void, pv8hi, v8hi, v8hi);
+ TYPE4 (void, pv4si, v4si, v4si);
+ TYPE4 (void, pv4sf, v4sf, v4sf);
+ TYPE4 (void, pv2di, v2di, v2di);
+
+ /* Table look-up. */
+ TYPE3 (v8qi, v8qi, v8qi);
+ TYPE3 (v8qi, ti, v8qi);
+ TYPE3 (v8qi, ei, v8qi);
+ TYPE3 (v8qi, oi, v8qi);
+
+ /* Extended table look-up. */
+ /*TYPE4 (v8qi, v8qi, v8qi, v8qi);*/
+ TYPE4 (v8qi, v8qi, ti, v8qi);
+ TYPE4 (v8qi, v8qi, ei, v8qi);
+ TYPE4 (v8qi, v8qi, oi, v8qi);
+
+ /* Load operations, double-word. */
+ tree v8qi_ftype_const_qi_pointer =
+ build_function_type_list (V8QI_type_node, const_intQI_pointer_node, NULL);
+ tree v4hi_ftype_const_hi_pointer =
+ build_function_type_list (V4HI_type_node, const_intHI_pointer_node, NULL);
+ tree v2si_ftype_const_si_pointer =
+ build_function_type_list (V2SI_type_node, const_intSI_pointer_node, NULL);
+ tree di_ftype_const_di_pointer =
+ build_function_type_list (intDI_type_node, const_intDI_pointer_node, NULL);
+ tree v2sf_ftype_const_sf_pointer =
+ build_function_type_list (V2SF_type_node, const_float_pointer_node, NULL);
+
+ /* Load operations, quad-word. */
+ tree v16qi_ftype_const_qi_pointer =
+ build_function_type_list (V16QI_type_node, const_intQI_pointer_node, NULL);
+ tree v8hi_ftype_const_hi_pointer =
+ build_function_type_list (V8HI_type_node, const_intHI_pointer_node, NULL);
+ tree v4si_ftype_const_si_pointer =
+ build_function_type_list (V4SI_type_node, const_intSI_pointer_node, NULL);
+ tree v2di_ftype_const_di_pointer =
+ build_function_type_list (V2DI_type_node, const_intDI_pointer_node, NULL);
+ tree v4sf_ftype_const_sf_pointer =
+ build_function_type_list (V4SF_type_node, const_float_pointer_node, NULL);
+
+ /* Load lane operations, double-word. */
+ tree v8qi_ftype_const_qi_pointer_v8qi_si =
+ build_function_type_list (V8QI_type_node, const_intQI_pointer_node,
+ V8QI_type_node, intSI_type_node, NULL);
+ tree v4hi_ftype_const_hi_pointer_v4hi_si =
+ build_function_type_list (V4HI_type_node, const_intHI_pointer_node,
+ V4HI_type_node, intSI_type_node, NULL);
+ tree v2si_ftype_const_si_pointer_v2si_si =
+ build_function_type_list (V2SI_type_node, const_intSI_pointer_node,
+ V2SI_type_node, intSI_type_node, NULL);
+ tree di_ftype_const_di_pointer_di_si =
+ build_function_type_list (intDI_type_node, const_intDI_pointer_node,
+ intDI_type_node, intSI_type_node, NULL);
+ tree v2sf_ftype_const_sf_pointer_v2sf_si =
+ build_function_type_list (V2SF_type_node, const_float_pointer_node,
+ V2SF_type_node, intSI_type_node, NULL);
+
+ /* Load lane operations, quad-word. */
+ tree v16qi_ftype_const_qi_pointer_v16qi_si =
+ build_function_type_list (V16QI_type_node, const_intQI_pointer_node,
+ V16QI_type_node, intSI_type_node, NULL);
+ tree v8hi_ftype_const_hi_pointer_v8hi_si =
+ build_function_type_list (V8HI_type_node, const_intHI_pointer_node,
+ V8HI_type_node, intSI_type_node, NULL);
+ tree v4si_ftype_const_si_pointer_v4si_si =
+ build_function_type_list (V4SI_type_node, const_intSI_pointer_node,
+ V4SI_type_node, intSI_type_node, NULL);
+ tree v2di_ftype_const_di_pointer_v2di_si =
+ build_function_type_list (V2DI_type_node, const_intDI_pointer_node,
+ V2DI_type_node, intSI_type_node, NULL);
+ tree v4sf_ftype_const_sf_pointer_v4sf_si =
+ build_function_type_list (V4SF_type_node, const_float_pointer_node,
+ V4SF_type_node, intSI_type_node, NULL);
+
+ /* Store operations, double-word. */
+ tree void_ftype_qi_pointer_v8qi =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ V8QI_type_node, NULL);
+ tree void_ftype_hi_pointer_v4hi =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ V4HI_type_node, NULL);
+ tree void_ftype_si_pointer_v2si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ V2SI_type_node, NULL);
+ tree void_ftype_di_pointer_di =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ intDI_type_node, NULL);
+ tree void_ftype_sf_pointer_v2sf =
+ build_function_type_list (void_type_node, float_pointer_node,
+ V2SF_type_node, NULL);
+
+ /* Store operations, quad-word. */
+ tree void_ftype_qi_pointer_v16qi =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ V16QI_type_node, NULL);
+ tree void_ftype_hi_pointer_v8hi =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ V8HI_type_node, NULL);
+ tree void_ftype_si_pointer_v4si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ V4SI_type_node, NULL);
+ tree void_ftype_di_pointer_v2di =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ V2DI_type_node, NULL);
+ tree void_ftype_sf_pointer_v4sf =
+ build_function_type_list (void_type_node, float_pointer_node,
+ V4SF_type_node, NULL);
+
+ /* Store lane operations, double-word. */
+ tree void_ftype_qi_pointer_v8qi_si =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ V8QI_type_node, intSI_type_node, NULL);
+ tree void_ftype_hi_pointer_v4hi_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ V4HI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_v2si_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ V2SI_type_node, intSI_type_node, NULL);
+ tree void_ftype_di_pointer_di_si =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ intDI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_v2sf_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ V2SF_type_node, intSI_type_node, NULL);
+
+ /* Store lane operations, quad-word. */
+ tree void_ftype_qi_pointer_v16qi_si =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ V16QI_type_node, intSI_type_node, NULL);
+ tree void_ftype_hi_pointer_v8hi_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ V8HI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_v4si_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ V4SI_type_node, intSI_type_node, NULL);
+ tree void_ftype_di_pointer_v2di_si =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ V2DI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_v4sf_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ V4SF_type_node, intSI_type_node, NULL);
+
+ /* Load size-2 structure operations, double-word. */
+ tree ti_ftype_const_qi_pointer =
+ build_function_type_list (intTI_type_node, const_intQI_pointer_node, NULL);
+ tree ti_ftype_const_hi_pointer =
+ build_function_type_list (intTI_type_node, const_intHI_pointer_node, NULL);
+ tree ti_ftype_const_si_pointer =
+ build_function_type_list (intTI_type_node, const_intSI_pointer_node, NULL);
+ tree ti_ftype_const_di_pointer =
+ build_function_type_list (intTI_type_node, const_intDI_pointer_node, NULL);
+ tree ti_ftype_const_sf_pointer =
+ build_function_type_list (intTI_type_node, const_float_pointer_node, NULL);
+
+ /* Load size-2 structure operations, quad-word; also load size-4,
+ double-word. */
+ tree oi_ftype_const_qi_pointer =
+ build_function_type_list (intOI_type_node, const_intQI_pointer_node, NULL);
+ tree oi_ftype_const_hi_pointer =
+ build_function_type_list (intOI_type_node, const_intHI_pointer_node, NULL);
+ tree oi_ftype_const_si_pointer =
+ build_function_type_list (intOI_type_node, const_intSI_pointer_node, NULL);
+ tree oi_ftype_const_sf_pointer =
+ build_function_type_list (intOI_type_node, const_float_pointer_node, NULL);
+
+ /* Load lane size-2 structure operations, double-word. */
+ tree ti_ftype_const_qi_pointer_ti_si =
+ build_function_type_list (intTI_type_node, const_intQI_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+ tree ti_ftype_const_hi_pointer_ti_si =
+ build_function_type_list (intTI_type_node, const_intHI_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+ tree ti_ftype_const_si_pointer_ti_si =
+ build_function_type_list (intTI_type_node, const_intSI_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+ tree ti_ftype_const_sf_pointer_ti_si =
+ build_function_type_list (intTI_type_node, const_float_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+
+ /* Load lane size-2 structure operations, quad-word; also load lane size-4,
+ double-word. */
+ tree oi_ftype_const_hi_pointer_oi_si =
+ build_function_type_list (intOI_type_node, const_intHI_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+ tree oi_ftype_const_si_pointer_oi_si =
+ build_function_type_list (intOI_type_node, const_intSI_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+ tree oi_ftype_const_sf_pointer_oi_si =
+ build_function_type_list (intOI_type_node, const_float_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+
+ /* Store size-2 structure operations, double-word. */
+ tree void_ftype_qi_pointer_ti =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intTI_type_node, NULL);
+ tree void_ftype_hi_pointer_ti =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intTI_type_node, NULL);
+ tree void_ftype_si_pointer_ti =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intTI_type_node, NULL);
+ tree void_ftype_di_pointer_ti =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ intTI_type_node, NULL);
+ tree void_ftype_sf_pointer_ti =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intTI_type_node, NULL);
+
+ /* Store size-2 structure operations, quad-word; also store size-4,
+ double-word. */
+ tree void_ftype_qi_pointer_oi =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intOI_type_node, NULL);
+ tree void_ftype_hi_pointer_oi =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intOI_type_node, NULL);
+ tree void_ftype_si_pointer_oi =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intOI_type_node, NULL);
+ tree void_ftype_sf_pointer_oi =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intOI_type_node, NULL);
+
+ /* Store lane size-2 structure operations, double-word. */
+ tree void_ftype_qi_pointer_ti_si =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+ tree void_ftype_hi_pointer_ti_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_ti_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_ti_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intTI_type_node, intSI_type_node, NULL);
+
+ /* Store lane size-2 structure operations, quad-word; also store
+ lane size-4, double-word. */
+ tree void_ftype_hi_pointer_oi_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_oi_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_oi_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+
+ /* Load size-3 structure operations, double-word. */
+ tree ei_ftype_const_qi_pointer =
+ build_function_type_list (intEI_type_node, const_intQI_pointer_node, NULL);
+ tree ei_ftype_const_hi_pointer =
+ build_function_type_list (intEI_type_node, const_intHI_pointer_node, NULL);
+ tree ei_ftype_const_si_pointer =
+ build_function_type_list (intEI_type_node, const_intSI_pointer_node, NULL);
+ tree ei_ftype_const_di_pointer =
+ build_function_type_list (intEI_type_node, const_intDI_pointer_node, NULL);
+ tree ei_ftype_const_sf_pointer =
+ build_function_type_list (intEI_type_node, const_float_pointer_node, NULL);
+
+ /* Load size-3 structure operations, quad-word. */
+ tree ci_ftype_const_qi_pointer =
+ build_function_type_list (intCI_type_node, const_intQI_pointer_node, NULL);
+ tree ci_ftype_const_hi_pointer =
+ build_function_type_list (intCI_type_node, const_intHI_pointer_node, NULL);
+ tree ci_ftype_const_si_pointer =
+ build_function_type_list (intCI_type_node, const_intSI_pointer_node, NULL);
+ tree ci_ftype_const_sf_pointer =
+ build_function_type_list (intCI_type_node, const_float_pointer_node, NULL);
+
+ /* Load lane size-3 structure operations, double-word. */
+ tree ei_ftype_const_qi_pointer_ei_si =
+ build_function_type_list (intEI_type_node, const_intQI_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+ tree ei_ftype_const_hi_pointer_ei_si =
+ build_function_type_list (intEI_type_node, const_intHI_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+ tree ei_ftype_const_si_pointer_ei_si =
+ build_function_type_list (intEI_type_node, const_intSI_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+ tree ei_ftype_const_sf_pointer_ei_si =
+ build_function_type_list (intEI_type_node, const_float_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+
+ /* Load lane size-3 structure operations, quad-word. */
+ tree ci_ftype_const_hi_pointer_ci_si =
+ build_function_type_list (intCI_type_node, const_intHI_pointer_node,
+ intCI_type_node, intSI_type_node, NULL);
+ tree ci_ftype_const_si_pointer_ci_si =
+ build_function_type_list (intCI_type_node, const_intSI_pointer_node,
+ intCI_type_node, intSI_type_node, NULL);
+ tree ci_ftype_const_sf_pointer_ci_si =
+ build_function_type_list (intCI_type_node, const_float_pointer_node,
+ intCI_type_node, intSI_type_node, NULL);
+
+ /* Store size-3 structure operations, double-word. */
+ tree void_ftype_qi_pointer_ei =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intEI_type_node, NULL);
+ tree void_ftype_hi_pointer_ei =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intEI_type_node, NULL);
+ tree void_ftype_si_pointer_ei =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intEI_type_node, NULL);
+ tree void_ftype_di_pointer_ei =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ intEI_type_node, NULL);
+ tree void_ftype_sf_pointer_ei =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intEI_type_node, NULL);
+
+ /* Store size-3 structure operations, quad-word. */
+ tree void_ftype_qi_pointer_ci =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intCI_type_node, NULL);
+ tree void_ftype_hi_pointer_ci =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intCI_type_node, NULL);
+ tree void_ftype_si_pointer_ci =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intCI_type_node, NULL);
+ tree void_ftype_sf_pointer_ci =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intCI_type_node, NULL);
+
+ /* Store lane size-3 structure operations, double-word. */
+ tree void_ftype_qi_pointer_ei_si =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+ tree void_ftype_hi_pointer_ei_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_ei_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_ei_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intEI_type_node, intSI_type_node, NULL);
+
+ /* Store lane size-3 structure operations, quad-word. */
+ tree void_ftype_hi_pointer_ci_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intCI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_ci_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intCI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_ci_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intCI_type_node, intSI_type_node, NULL);
+
+ /* Load size-4 structure operations, double-word. */
+ tree oi_ftype_const_di_pointer =
+ build_function_type_list (intOI_type_node, const_intDI_pointer_node, NULL);
+
+ /* Load size-4 structure operations, quad-word. */
+ tree xi_ftype_const_qi_pointer =
+ build_function_type_list (intXI_type_node, const_intQI_pointer_node, NULL);
+ tree xi_ftype_const_hi_pointer =
+ build_function_type_list (intXI_type_node, const_intHI_pointer_node, NULL);
+ tree xi_ftype_const_si_pointer =
+ build_function_type_list (intXI_type_node, const_intSI_pointer_node, NULL);
+ tree xi_ftype_const_sf_pointer =
+ build_function_type_list (intXI_type_node, const_float_pointer_node, NULL);
+
+ /* Load lane size-4 structure operations, double-word. */
+ tree oi_ftype_const_qi_pointer_oi_si =
+ build_function_type_list (intOI_type_node, const_intQI_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+
+ /* Load lane size-4 structure operations, quad-word. */
+ tree xi_ftype_const_hi_pointer_xi_si =
+ build_function_type_list (intXI_type_node, const_intHI_pointer_node,
+ intXI_type_node, intSI_type_node, NULL);
+ tree xi_ftype_const_si_pointer_xi_si =
+ build_function_type_list (intXI_type_node, const_intSI_pointer_node,
+ intXI_type_node, intSI_type_node, NULL);
+ tree xi_ftype_const_sf_pointer_xi_si =
+ build_function_type_list (intXI_type_node, const_float_pointer_node,
+ intXI_type_node, intSI_type_node, NULL);
+
+ /* Store size-4 structure operations, double-word. */
+ tree void_ftype_di_pointer_oi =
+ build_function_type_list (void_type_node, intDI_pointer_node,
+ intOI_type_node, NULL);
+
+ /* Store size-4 structure operations, quad-word. */
+ tree void_ftype_qi_pointer_xi =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intXI_type_node, NULL);
+ tree void_ftype_hi_pointer_xi =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intXI_type_node, NULL);
+ tree void_ftype_si_pointer_xi =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intXI_type_node, NULL);
+ tree void_ftype_sf_pointer_xi =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intXI_type_node, NULL);
+
+ /* Store lane size-4 structure operations, double-word. */
+ tree void_ftype_qi_pointer_oi_si =
+ build_function_type_list (void_type_node, intQI_pointer_node,
+ intOI_type_node, intSI_type_node, NULL);
+
+ /* Store lane size-4 structure operations, quad-word. */
+ tree void_ftype_hi_pointer_xi_si =
+ build_function_type_list (void_type_node, intHI_pointer_node,
+ intXI_type_node, intSI_type_node, NULL);
+ tree void_ftype_si_pointer_xi_si =
+ build_function_type_list (void_type_node, intSI_pointer_node,
+ intXI_type_node, intSI_type_node, NULL);
+ tree void_ftype_sf_pointer_xi_si =
+ build_function_type_list (void_type_node, float_pointer_node,
+ intXI_type_node, intSI_type_node, NULL);
+
+ tree reinterp_ftype_dreg[5][5];
+ tree reinterp_ftype_qreg[5][5];
+ tree dreg_types[5], qreg_types[5];
+
+ /* APPLE LOCAL begin 7083296 Build without warnings. */
+ /* Define typedefs which exactly correspond to the modes we are basing vector
+ types on. If you change these names you'll need to change
+ the table used by arm_mangle_type too. */
+ (*lang_hooks.types.register_builtin_type) (neon_intQI_type_node,
+ "__builtin_neon_qi");
+ (*lang_hooks.types.register_builtin_type) (neon_intHI_type_node,
+ "__builtin_neon_hi");
+ (*lang_hooks.types.register_builtin_type) (neon_intSI_type_node,
+ "__builtin_neon_si");
+ (*lang_hooks.types.register_builtin_type) (neon_float_type_node,
+ "__builtin_neon_sf");
+ (*lang_hooks.types.register_builtin_type) (neon_intDI_type_node,
+ "__builtin_neon_di");
+
+ (*lang_hooks.types.register_builtin_type) (neon_polyQI_type_node,
+ "__builtin_neon_poly8");
+ (*lang_hooks.types.register_builtin_type) (neon_polyHI_type_node,
+ "__builtin_neon_poly16");
+
+ (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
+ "__builtin_neon_uqi");
+ (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
+ "__builtin_neon_uhi");
+ (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
+ "__builtin_neon_usi");
+ (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
+ "__builtin_neon_udi");
+
+ (*lang_hooks.types.register_builtin_type) (intTI_type_node,
+ "__builtin_neon_ti");
+ (*lang_hooks.types.register_builtin_type) (intEI_type_node,
+ "__builtin_neon_ei");
+ (*lang_hooks.types.register_builtin_type) (intOI_type_node,
+ "__builtin_neon_oi");
+ (*lang_hooks.types.register_builtin_type) (intCI_type_node,
+ "__builtin_neon_ci");
+ (*lang_hooks.types.register_builtin_type) (intXI_type_node,
+ "__builtin_neon_xi");
+ /* APPLE LOCAL end 7083296 Build without warnings. */
+
+ dreg_types[0] = V8QI_type_node;
+ dreg_types[1] = V4HI_type_node;
+ dreg_types[2] = V2SI_type_node;
+ dreg_types[3] = V2SF_type_node;
+ dreg_types[4] = neon_intDI_type_node;
+
+ qreg_types[0] = V16QI_type_node;
+ qreg_types[1] = V8HI_type_node;
+ qreg_types[2] = V4SI_type_node;
+ qreg_types[3] = V4SF_type_node;
+ qreg_types[4] = V2DI_type_node;
+
+ for (i = 0; i < 5; i++)
+ {
+ int j;
+ for (j = 0; j < 5; j++)
+ {
+ reinterp_ftype_dreg[i][j]
+ = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+ reinterp_ftype_qreg[i][j]
+ = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE (neon_builtin_data); i++)
+ {
+ neon_builtin_datum *d = &neon_builtin_data[i];
+ unsigned int j, codeidx = 0;
+
+ d->base_fcode = fcode;
+
+ for (j = 0; j < T_MAX; j++)
+ {
+ const char* const modenames[] = {
+ "v8qi", "v4hi", "v2si", "v2sf", "di",
+ "v16qi", "v8hi", "v4si", "v4sf", "v2di"
+ };
+ char namebuf[60];
+ tree ftype = NULL;
+ enum insn_code icode;
+ enum machine_mode tmode, mode0, mode1, mode2, mode3;
+
+ if ((d->bits & (1 << j)) == 0)
+ continue;
+
+ icode = d->codes[codeidx++];
+
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+ mode2 = insn_data[icode].operand[3].mode;
+ mode3 = insn_data[icode].operand[4].mode;
+
+ switch (d->itype)
+ {
+ case NEON_UNOP:
+ /* A unary operation with one vector operand and a vector
+ destination, plus an extra information word. */
+ gcc_assert (valid_neon_mode (tmode) && valid_neon_mode (mode0)
+ && mode1 == SImode);
+ switch (tmode)
+ {
+ case V8QImode:
+ if (mode0 == V8QImode)
+ ftype = v8qi_ftype_v8qi_si;
+ else if (mode0 == V8HImode)
+ ftype = v8qi_ftype_v8hi_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == V4HImode)
+ ftype = v4hi_ftype_v4hi_si;
+ else if (mode0 == V4SImode)
+ ftype = v4hi_ftype_v4si_si;
+ else if (mode0 == V8QImode)
+ ftype = v4hi_ftype_v8qi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode)
+ ftype = v2si_ftype_v2si_si;
+ else if (mode0 == V2DImode)
+ ftype = v2si_ftype_v2di_si;
+ else if (mode0 == V4HImode)
+ ftype = v2si_ftype_v4hi_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode)
+ ftype = v2sf_ftype_v2sf_si;
+ break;
+
+ case DImode:
+ if (mode0 == DImode)
+ ftype = di_ftype_di_si;
+ else if (mode0 == V2SImode)
+ ftype = di_ftype_v2si_si;
+ break;
+
+ case V16QImode:
+ if (mode0 == V16QImode)
+ ftype = v16qi_ftype_v16qi_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode)
+ ftype = v8hi_ftype_v8hi_si;
+ else if (mode0 == V8QImode)
+ ftype = v8hi_ftype_v8qi_si;
+ else if (mode0 == V16QImode)
+ ftype = v8hi_ftype_v16qi_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode)
+ ftype = v4si_ftype_v4si_si;
+ else if (mode0 == V4HImode)
+ ftype = v4si_ftype_v4hi_si;
+ else if (mode0 == V8HImode)
+ ftype = v4si_ftype_v8hi_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode)
+ ftype = v4sf_ftype_v4sf_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2DImode)
+ ftype = v2di_ftype_v2di_si;
+ else if (mode0 == V2SImode)
+ ftype = v2di_ftype_v2si_si;
+ else if (mode0 == V4SImode)
+ ftype = v2di_ftype_v4si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_BINOP:
+ case NEON_LOGICBINOP:
+ case NEON_SHIFTINSERT:
+ /* A binary operation with two vector operands and a vector
+ destination, plus an extra information word. */
+ gcc_assert (valid_neon_mode (tmode) && valid_neon_mode (mode0)
+ && valid_neon_mode (mode1) && mode2 == SImode);
+ switch (tmode)
+ {
+ case V8QImode:
+ if (mode0 == V8QImode && mode1 == V8QImode)
+ ftype = v8qi_ftype_v8qi_v8qi_si;
+ else if (mode0 == V8HImode && mode1 == V8HImode)
+ ftype = v8qi_ftype_v8hi_v8hi_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v4hi_ftype_v4hi_v4hi_si;
+ else if (mode0 == V4SImode && mode1 == V4SImode)
+ ftype = v4hi_ftype_v4si_v4si_si;
+ else if (mode0 == V4HImode && mode1 == V8QImode)
+ ftype = v4hi_ftype_v4hi_v8qi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v2si_ftype_v2si_v2si_si;
+ else if (mode0 == V2DImode && mode1 == V2DImode)
+ ftype = v2si_ftype_v2di_v2di_si;
+ else if (mode0 == V2SFmode && mode1 == V2SFmode)
+ ftype = v2si_ftype_v2sf_v2sf_si;
+ else if (mode0 == V2SImode && mode1 == V4HImode)
+ ftype = v2si_ftype_v2si_v4hi_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode && mode1 == V2SFmode)
+ ftype = v2sf_ftype_v2sf_v2sf_si;
+ break;
+
+ case DImode:
+ if (mode0 == DImode && mode1 == DImode)
+ ftype = di_ftype_di_di_si;
+ else if (mode0 == DImode && mode1 == V2SImode)
+ ftype = di_ftype_di_v2si_si;
+ break;
+
+ case V16QImode:
+ if (mode0 == V16QImode && mode1 == V16QImode)
+ ftype = v16qi_ftype_v16qi_v16qi_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == V8HImode)
+ ftype = v8hi_ftype_v8hi_v8hi_si;
+ else if (mode0 == V8QImode && mode1 == V8QImode)
+ ftype = v8hi_ftype_v8qi_v8qi_si;
+ else if (mode0 == V8HImode && mode1 == V8QImode)
+ ftype = v8hi_ftype_v8hi_v8qi_si;
+ else if (mode0 == V8HImode && mode1 == V16QImode)
+ ftype = v8hi_ftype_v8hi_v16qi_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == V4SImode)
+ ftype = v4si_ftype_v4si_v4si_si;
+ else if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v4si_ftype_v4hi_v4hi_si;
+ else if (mode0 == V4SImode && mode1 == V4HImode)
+ ftype = v4si_ftype_v4si_v4hi_si;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode)
+ ftype = v4si_ftype_v4sf_v4sf_si;
+ else if (mode0 == V4SImode && mode1 == V8HImode)
+ ftype = v4si_ftype_v4si_v8hi_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode && mode1 == V4SFmode)
+ ftype = v4sf_ftype_v4sf_v4sf_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2DImode && mode1 == V2DImode)
+ ftype = v2di_ftype_v2di_v2di_si;
+ else if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v2di_ftype_v2si_v2si_si;
+ else if (mode0 == V2DImode && mode1 == V2SImode)
+ ftype = v2di_ftype_v2di_v2si_si;
+ else if (mode0 == V2DImode && mode1 == V4SImode)
+ ftype = v2di_ftype_v2di_v4si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_TERNOP:
+ /* A ternary operation with three vector operands and a
+ vector destination, plus an extra information
+ word. */
+ gcc_assert (valid_neon_mode (tmode) && valid_neon_mode (mode0)
+ && valid_neon_mode (mode1)
+ && valid_neon_mode (mode2)
+ && mode3 == SImode);
+ switch (tmode)
+ {
+ case V8QImode:
+ if (mode0 == V8QImode && mode1 == V8QImode
+ && mode2 == V8QImode)
+ ftype = v8qi_ftype_v8qi_v8qi_v8qi_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode
+ && mode2 == V4HImode)
+ ftype = v4hi_ftype_v4hi_v4hi_v4hi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode
+ && mode2 == V2SImode)
+ ftype = v2si_ftype_v2si_v2si_v2si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode && mode1 == V2SFmode
+ && mode2 == V2SFmode)
+ ftype = v2sf_ftype_v2sf_v2sf_v2sf_si;
+ break;
+
+ case V16QImode:
+ if (mode0 == V16QImode && mode1 == V16QImode
+ && mode2 == V16QImode)
+ ftype = v16qi_ftype_v16qi_v16qi_v16qi_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == V8HImode
+ && mode2 == V8HImode)
+ ftype = v8hi_ftype_v8hi_v8hi_v8hi_si;
+ else if (mode0 == V8HImode && mode1 == V8QImode
+ && mode2 == V8QImode)
+ ftype = v8hi_ftype_v8hi_v8qi_v8qi_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == V4SImode
+ && mode2 == V4SImode)
+ ftype = v4si_ftype_v4si_v4si_v4si_si;
+ else if (mode0 == V4SImode && mode1 == V4HImode
+ && mode2 == V4HImode)
+ ftype = v4si_ftype_v4si_v4hi_v4hi_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode && mode1 == V4SFmode
+ && mode2 == V4SFmode)
+ ftype = v4sf_ftype_v4sf_v4sf_v4sf_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2DImode && mode1 == V2SImode
+ && mode2 == V2SImode)
+ ftype = v2di_ftype_v2di_v2si_v2si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_GETLANE:
+ /* Vector lane extraction. */
+ gcc_assert (valid_neon_mode (mode0) && mode1 == SImode
+ && mode2 == SImode);
+ switch (tmode)
+ {
+ case QImode:
+ if (mode0 == V8QImode)
+ ftype = qi_ftype_v8qi_si_si;
+ else if (mode0 == V16QImode)
+ ftype = qi_ftype_v16qi_si_si;
+ break;
+
+ case HImode:
+ if (mode0 == V4HImode)
+ ftype = hi_ftype_v4hi_si_si;
+ else if (mode0 == V8HImode)
+ ftype = hi_ftype_v8hi_si_si;
+ break;
+
+ case SImode:
+ if (mode0 == V2SImode)
+ ftype = si_ftype_v2si_si_si;
+ else if (mode0 == V4SImode)
+ ftype = si_ftype_v4si_si_si;
+ break;
+
+ case SFmode:
+ if (mode0 == V2SFmode)
+ ftype = sf_ftype_v2sf_si_si;
+ else if (mode0 == V4SFmode)
+ ftype = sf_ftype_v4sf_si_si;
+ break;
+
+ case DImode:
+ if (mode0 == DImode)
+ ftype = di_ftype_di_si_si;
+ else if (mode0 == V2DImode)
+ ftype = di_ftype_v2di_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_SETLANE:
+ {
+ /* Set lanes in vector. */
+ gcc_assert (valid_neon_mode (mode1) && mode2 == SImode);
+ switch (tmode)
+ {
+ case V8QImode:
+ if (mode0 == QImode && mode1 == V8QImode)
+ ftype = v8qi_ftype_qi_v8qi_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == HImode && mode1 == V4HImode)
+ ftype = v4hi_ftype_hi_v4hi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == SImode && mode1 == V2SImode)
+ ftype = v2si_ftype_si_v2si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == SFmode && mode1 == V2SFmode)
+ ftype = v2sf_ftype_sf_v2sf_si;
+ break;
+
+ case DImode:
+ if (mode0 == DImode && mode1 == DImode)
+ ftype = di_ftype_di_di_si;
+ break;
+
+ case V16QImode:
+ if (mode0 == QImode && mode1 == V16QImode)
+ ftype = v16qi_ftype_qi_v16qi_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == HImode && mode1 == V8HImode)
+ ftype = v8hi_ftype_hi_v8hi_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == SImode && mode1 == V4SImode)
+ ftype = v4si_ftype_si_v4si_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == SFmode && mode1 == V4SFmode)
+ ftype = v4sf_ftype_sf_v4sf_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == DImode && mode1 == V2DImode)
+ ftype = v2di_ftype_di_v2di_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_CREATE:
+ gcc_assert (mode0 == DImode);
+ /* Create vector from bit pattern. */
+ switch (tmode)
+ {
+ case V8QImode: ftype = v8qi_ftype_di; break;
+ case V4HImode: ftype = v4hi_ftype_di; break;
+ case V2SImode: ftype = v2si_ftype_di; break;
+ case V2SFmode: ftype = v2sf_ftype_di; break;
+ case DImode: ftype = di_ftype_di; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_DUP:
+ gcc_assert ((mode0 == DImode && tmode == DImode)
+ || mode0 == GET_MODE_INNER (tmode));
+ switch (tmode)
+ {
+ case V8QImode: ftype = v8qi_ftype_qi; break;
+ case V4HImode: ftype = v4hi_ftype_hi; break;
+ case V2SImode: ftype = v2si_ftype_si; break;
+ case V2SFmode: ftype = v2sf_ftype_sf; break;
+ case DImode: ftype = di_ftype_di; break;
+ case V16QImode: ftype = v16qi_ftype_qi; break;
+ case V8HImode: ftype = v8hi_ftype_hi; break;
+ case V4SImode: ftype = v4si_ftype_si; break;
+ case V4SFmode: ftype = v4sf_ftype_sf; break;
+ case V2DImode: ftype = v2di_ftype_di; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_DUPLANE:
+ gcc_assert (valid_neon_mode (mode0) && mode1 == SImode);
+ switch (tmode)
+ {
+ case V8QImode: ftype = v8qi_ftype_v8qi_si; break;
+ case V4HImode: ftype = v4hi_ftype_v4hi_si; break;
+ case V2SImode: ftype = v2si_ftype_v2si_si; break;
+ case V2SFmode: ftype = v2sf_ftype_v2sf_si; break;
+ case DImode: ftype = di_ftype_di_si; break;
+ case V16QImode: ftype = v16qi_ftype_v8qi_si; break;
+ case V8HImode: ftype = v8hi_ftype_v4hi_si; break;
+ case V4SImode: ftype = v4si_ftype_v2si_si; break;
+ case V4SFmode: ftype = v4sf_ftype_v2sf_si; break;
+ case V2DImode: ftype = v2di_ftype_di_si; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_SHIFTIMM:
+ gcc_assert (mode1 == SImode && mode2 == SImode);
+ switch (tmode)
+ {
+ case V8QImode:
+ if (mode0 == V8QImode)
+ ftype = v8qi_ftype_v8qi_si_si;
+ else if (mode0 == V8HImode)
+ ftype = v8qi_ftype_v8hi_si_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == V4HImode)
+ ftype = v4hi_ftype_v4hi_si_si;
+ else if (mode0 == V4SImode)
+ ftype = v4hi_ftype_v4si_si_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode)
+ ftype = v2si_ftype_v2si_si_si;
+ else if (mode0 == V2DImode)
+ ftype = v2si_ftype_v2di_si_si;
+ break;
+
+ case DImode:
+ if (mode0 == DImode)
+ ftype = di_ftype_di_si_si;
+ break;
+
+ case V16QImode:
+ if (mode0 == V16QImode)
+ ftype = v16qi_ftype_v16qi_si_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode)
+ ftype = v8hi_ftype_v8hi_si_si;
+ else if (mode0 == V8QImode)
+ ftype = v8hi_ftype_v8qi_si_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode)
+ ftype = v4si_ftype_v4si_si_si;
+ else if (mode0 == V4HImode)
+ ftype = v4si_ftype_v4hi_si_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2DImode)
+ ftype = v2di_ftype_v2di_si_si;
+ else if (mode0 == V2SImode)
+ ftype = v2di_ftype_v2si_si_si;
+ break;
+
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_SHIFTACC:
+ gcc_assert (tmode == mode0 && mode0 == mode1 && mode2 == SImode
+ && mode3 == SImode);
+ switch (tmode)
+ {
+ case V8QImode: ftype = v8qi_ftype_v8qi_v8qi_si_si; break;
+ case V4HImode: ftype = v4hi_ftype_v4hi_v4hi_si_si; break;
+ case V2SImode: ftype = v2si_ftype_v2si_v2si_si_si; break;
+ case V2SFmode: ftype = v2sf_ftype_v2sf_v2sf_si_si; break;
+ case DImode: ftype = di_ftype_di_di_si_si; break;
+ case V16QImode: ftype = v16qi_ftype_v16qi_v16qi_si_si; break;
+ case V8HImode: ftype = v8hi_ftype_v8hi_v8hi_si_si; break;
+ case V4SImode: ftype = v4si_ftype_v4si_v4si_si_si; break;
+ case V4SFmode: ftype = v4sf_ftype_v4sf_v4sf_si_si; break;
+ case V2DImode: ftype = v2di_ftype_v2di_v2di_si_si; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_COMBINE:
+ gcc_assert (valid_neon_mode (mode0) && valid_neon_mode (mode1));
+ switch (tmode)
+ {
+ case V16QImode:
+ if (mode0 == V8QImode && mode1 == V8QImode)
+ ftype = v16qi_ftype_v8qi_v8qi;
+ break;
+
+ case V8HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v8hi_ftype_v4hi_v4hi;
+ break;
+
+ case V4SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v4si_ftype_v2si_v2si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V2SFmode && mode1 == V2SFmode)
+ ftype = v4sf_ftype_v2sf_v2sf;
+ break;
+
+ case V2DImode:
+ if (mode0 == DImode && mode1 == DImode)
+ ftype = v2di_ftype_di_di;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_SPLIT:
+ gcc_assert (valid_neon_mode (mode0));
+ switch (tmode)
+ {
+ case V8QImode:
+ if (mode0 == V16QImode)
+ ftype = v8qi_ftype_v16qi;
+ break;
+
+ case V4HImode:
+ if (mode0 == V8HImode)
+ ftype = v4hi_ftype_v8hi;
+ break;
+
+ case V2SImode:
+ if (mode0 == V4SImode)
+ ftype = v2si_ftype_v4si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V4SFmode)
+ ftype = v2sf_ftype_v4sf;
+ break;
+
+ case DImode:
+ if (mode0 == V2DImode)
+ ftype = di_ftype_v2di;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_CONVERT:
+ gcc_assert (valid_neon_mode (mode0) && mode1 == SImode);
+ switch (tmode)
+ {
+ case V2SImode:
+ if (mode0 == V2SFmode)
+ ftype = v2si_ftype_v2sf_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SImode)
+ ftype = v2sf_ftype_v2si_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SFmode)
+ ftype = v4si_ftype_v4sf_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SImode)
+ ftype = v4sf_ftype_v4si_si;
+ break;
+
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_FIXCONV:
+ gcc_assert (valid_neon_mode (mode0) && mode1 == SImode
+ && mode2 == SImode);
+ switch (tmode)
+ {
+ case V2SImode:
+ if (mode0 == V2SFmode)
+ ftype = v2si_ftype_v2sf_si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SImode)
+ ftype = v2sf_ftype_v2si_si_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SFmode)
+ ftype = v4si_ftype_v4sf_si_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SImode)
+ ftype = v4sf_ftype_v4si_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_LANEMUL:
+ {
+ enum machine_mode mode3 = insn_data[icode].operand[4].mode;
+ gcc_assert (valid_neon_mode (mode0) && valid_neon_mode (mode1)
+ && mode2 == SImode && mode3 == SImode);
+ switch (tmode)
+ {
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v4hi_ftype_v4hi_v4hi_si_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v2si_ftype_v2si_v2si_si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode && mode1 == V2SFmode)
+ ftype = v2sf_ftype_v2sf_v2sf_si_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == V4HImode)
+ ftype = v8hi_ftype_v8hi_v4hi_si_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == V2SImode)
+ ftype = v4si_ftype_v4si_v2si_si_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode && mode1 == V2SFmode)
+ ftype = v4sf_ftype_v4sf_v2sf_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_LANEMULL:
+ {
+ enum machine_mode mode3 = insn_data[icode].operand[4].mode;
+ gcc_assert (valid_neon_mode (mode0) && valid_neon_mode (mode1)
+ && mode2 == SImode && mode3 == SImode);
+ switch (tmode)
+ {
+ case V4SImode:
+ if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v4si_ftype_v4hi_v4hi_si_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v2di_ftype_v2si_v2si_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_LANEMULH:
+ {
+ enum machine_mode mode3 = insn_data[icode].operand[4].mode;
+ gcc_assert (valid_neon_mode (mode0) && valid_neon_mode (mode1)
+ && mode2 == SImode && mode3 == SImode);
+ switch (tmode)
+ {
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == V2SImode)
+ ftype = v4si_ftype_v4si_v2si_si_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == V4HImode)
+ ftype = v8hi_ftype_v8hi_v4hi_si_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v2si_ftype_v2si_v2si_si_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v4hi_ftype_v4hi_v4hi_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_LANEMAC:
+ {
+ enum machine_mode mode3 = insn_data[icode].operand[4].mode;
+ enum machine_mode mode4 = insn_data[icode].operand[5].mode;
+ gcc_assert (valid_neon_mode (mode0) && valid_neon_mode (mode1)
+ && valid_neon_mode (mode2) && mode3 == SImode
+ && mode4 == SImode);
+ switch (tmode)
+ {
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode
+ && mode2 == V4HImode)
+ ftype = v4hi_ftype_v4hi_v4hi_v4hi_si_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode
+ && mode2 == V2SImode)
+ ftype = v2si_ftype_v2si_v2si_v2si_si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode && mode1 == V2SFmode
+ && mode2 == V2SFmode)
+ ftype = v2sf_ftype_v2sf_v2sf_v2sf_si_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == V8HImode
+ && mode2 == V4HImode)
+ ftype = v8hi_ftype_v8hi_v8hi_v4hi_si_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == V4SImode
+ && mode2 == V2SImode)
+ ftype = v4si_ftype_v4si_v4si_v2si_si_si;
+ else if (mode0 == V4SImode && mode1 == V4HImode
+ && mode2 == V4HImode)
+ ftype = v4si_ftype_v4si_v4hi_v4hi_si_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode && mode1 == V4SFmode
+ && mode2 == V2SFmode)
+ ftype = v4sf_ftype_v4sf_v4sf_v2sf_si_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2DImode && mode1 == V2SImode
+ && mode2 == V2SImode)
+ ftype = v2di_ftype_v2di_v2si_v2si_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_SCALARMUL:
+ switch (tmode)
+ {
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == HImode)
+ ftype = v4hi_ftype_v4hi_hi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == SImode)
+ ftype = v2si_ftype_v2si_si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode && mode1 == SFmode)
+ ftype = v2sf_ftype_v2sf_sf_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == HImode)
+ ftype = v8hi_ftype_v8hi_hi_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == SImode)
+ ftype = v4si_ftype_v4si_si_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode && mode1 == SFmode)
+ ftype = v4sf_ftype_v4sf_sf_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_SCALARMULL:
+ switch (tmode)
+ {
+ case V4SImode:
+ if (mode0 == V4HImode && mode1 == HImode)
+ ftype = v4si_ftype_v4hi_hi_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2SImode && mode1 == SImode)
+ ftype = v2di_ftype_v2si_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_SCALARMULH:
+ {
+ switch (tmode)
+ {
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == SImode)
+ ftype = v4si_ftype_v4si_si_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == HImode)
+ ftype = v8hi_ftype_v8hi_hi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == SImode)
+ ftype = v2si_ftype_v2si_si_si;
+ break;
+
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == HImode)
+ ftype = v4hi_ftype_v4hi_hi_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_SCALARMAC:
+ {
+ gcc_assert (mode2 == GET_MODE_INNER (mode1));
+ switch (tmode)
+ {
+ case V4HImode:
+ if (mode0 == V4HImode && mode1 == V4HImode)
+ ftype = v4hi_ftype_v4hi_v4hi_hi_si;
+ break;
+
+ case V2SImode:
+ if (mode0 == V2SImode && mode1 == V2SImode)
+ ftype = v2si_ftype_v2si_v2si_si_si;
+ break;
+
+ case V2SFmode:
+ if (mode0 == V2SFmode && mode1 == V2SFmode)
+ ftype = v2sf_ftype_v2sf_v2sf_sf_si;
+ break;
+
+ case V8HImode:
+ if (mode0 == V8HImode && mode1 == V8HImode)
+ ftype = v8hi_ftype_v8hi_v8hi_hi_si;
+ break;
+
+ case V4SImode:
+ if (mode0 == V4SImode && mode1 == V4SImode)
+ ftype = v4si_ftype_v4si_v4si_si_si;
+ else if (mode0 == V4SImode && mode1 == V4HImode)
+ ftype = v4si_ftype_v4si_v4hi_hi_si;
+ break;
+
+ case V4SFmode:
+ if (mode0 == V4SFmode && mode1 == V4SFmode)
+ ftype = v4sf_ftype_v4sf_v4sf_sf_si;
+ break;
+
+ case V2DImode:
+ if (mode0 == V2DImode && mode1 == V2SImode)
+ ftype = v2di_ftype_v2di_v2si_si_si;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_SELECT:
+ gcc_assert (mode1 == mode2
+ && (mode0 == mode1
+ || (mode0 == V2SImode && mode1 == V2SFmode)
+ || (mode0 == V4SImode && mode1 == V4SFmode)));
+ switch (tmode)
+ {
+ case V8QImode: ftype = v8qi_ftype_v8qi_v8qi_v8qi; break;
+ case V4HImode: ftype = v4hi_ftype_v4hi_v4hi_v4hi; break;
+ case V2SImode: ftype = v2si_ftype_v2si_v2si_v2si; break;
+ case V2SFmode: ftype = v2sf_ftype_v2si_v2sf_v2sf; break;
+ case DImode: ftype = di_ftype_di_di_di; break;
+ case V16QImode: ftype = v16qi_ftype_v16qi_v16qi_v16qi; break;
+ case V8HImode: ftype = v8hi_ftype_v8hi_v8hi_v8hi; break;
+ case V4SImode: ftype = v4si_ftype_v4si_v4si_v4si; break;
+ case V4SFmode: ftype = v4sf_ftype_v4si_v4sf_v4sf; break;
+ case V2DImode: ftype = v2di_ftype_v2di_v2di_v2di; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_VTBL:
+ {
+ gcc_assert (tmode == V8QImode && mode1 == V8QImode);
+ switch (mode0)
+ {
+ case V8QImode: ftype = v8qi_ftype_v8qi_v8qi; break;
+ case TImode: ftype = v8qi_ftype_ti_v8qi; break;
+ case EImode: ftype = v8qi_ftype_ei_v8qi; break;
+ case OImode: ftype = v8qi_ftype_oi_v8qi; break;
+ default: gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_VTBX:
+ {
+ gcc_assert (tmode == V8QImode && mode0 == V8QImode
+ && mode2 == V8QImode);
+ switch (mode1)
+ {
+ case V8QImode: ftype = v8qi_ftype_v8qi_v8qi_v8qi; break;
+ case TImode: ftype = v8qi_ftype_v8qi_ti_v8qi; break;
+ case EImode: ftype = v8qi_ftype_v8qi_ei_v8qi; break;
+ case OImode: ftype = v8qi_ftype_v8qi_oi_v8qi; break;
+ default: gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_RESULTPAIR:
+ {
+ switch (mode0)
+ {
+ case V8QImode: ftype = void_ftype_pv8qi_v8qi_v8qi; break;
+ case V4HImode: ftype = void_ftype_pv4hi_v4hi_v4hi; break;
+ case V2SImode: ftype = void_ftype_pv2si_v2si_v2si; break;
+ case V2SFmode: ftype = void_ftype_pv2sf_v2sf_v2sf; break;
+ case DImode: ftype = void_ftype_pdi_di_di; break;
+ case V16QImode: ftype = void_ftype_pv16qi_v16qi_v16qi; break;
+ case V8HImode: ftype = void_ftype_pv8hi_v8hi_v8hi; break;
+ case V4SImode: ftype = void_ftype_pv4si_v4si_v4si; break;
+ case V4SFmode: ftype = void_ftype_pv4sf_v4sf_v4sf; break;
+ case V2DImode: ftype = void_ftype_pv2di_v2di_v2di; break;
+ default: gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_REINTERP:
+ {
+ /* We iterate over 5 doubleword types, then 5 quadword
+ types. */
+ int rhs = j % 5;
+ switch (tmode)
+ {
+ case V8QImode: ftype = reinterp_ftype_dreg[0][rhs]; break;
+ case V4HImode: ftype = reinterp_ftype_dreg[1][rhs]; break;
+ case V2SImode: ftype = reinterp_ftype_dreg[2][rhs]; break;
+ case V2SFmode: ftype = reinterp_ftype_dreg[3][rhs]; break;
+ case DImode: ftype = reinterp_ftype_dreg[4][rhs]; break;
+ case V16QImode: ftype = reinterp_ftype_qreg[0][rhs]; break;
+ case V8HImode: ftype = reinterp_ftype_qreg[1][rhs]; break;
+ case V4SImode: ftype = reinterp_ftype_qreg[2][rhs]; break;
+ case V4SFmode: ftype = reinterp_ftype_qreg[3][rhs]; break;
+ case V2DImode: ftype = reinterp_ftype_qreg[4][rhs]; break;
+ default: gcc_unreachable ();
+ }
+ }
+ break;
+
+ case NEON_LOAD1:
+ switch (tmode)
+ {
+ case V8QImode: ftype = v8qi_ftype_const_qi_pointer; break;
+ case V4HImode: ftype = v4hi_ftype_const_hi_pointer; break;
+ case V2SImode: ftype = v2si_ftype_const_si_pointer; break;
+ case V2SFmode: ftype = v2sf_ftype_const_sf_pointer; break;
+ case DImode: ftype = di_ftype_const_di_pointer; break;
+ case V16QImode: ftype = v16qi_ftype_const_qi_pointer; break;
+ case V8HImode: ftype = v8hi_ftype_const_hi_pointer; break;
+ case V4SImode: ftype = v4si_ftype_const_si_pointer; break;
+ case V4SFmode: ftype = v4sf_ftype_const_sf_pointer; break;
+ case V2DImode: ftype = v2di_ftype_const_di_pointer; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_LOAD1LANE:
+ switch (tmode)
+ {
+ case V8QImode:
+ ftype = v8qi_ftype_const_qi_pointer_v8qi_si;
+ break;
+ case V4HImode:
+ ftype = v4hi_ftype_const_hi_pointer_v4hi_si;
+ break;
+ case V2SImode:
+ ftype = v2si_ftype_const_si_pointer_v2si_si;
+ break;
+ case V2SFmode:
+ ftype = v2sf_ftype_const_sf_pointer_v2sf_si;
+ break;
+ case DImode:
+ ftype = di_ftype_const_di_pointer_di_si;
+ break;
+ case V16QImode:
+ ftype = v16qi_ftype_const_qi_pointer_v16qi_si;
+ break;
+ case V8HImode:
+ ftype = v8hi_ftype_const_hi_pointer_v8hi_si;
+ break;
+ case V4SImode:
+ ftype = v4si_ftype_const_si_pointer_v4si_si;
+ break;
+ case V4SFmode:
+ ftype = v4sf_ftype_const_sf_pointer_v4sf_si;
+ break;
+ case V2DImode:
+ ftype = v2di_ftype_const_di_pointer_v2di_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_STORE1:
+ switch (mode0)
+ {
+ case V8QImode: ftype = void_ftype_qi_pointer_v8qi; break;
+ case V4HImode: ftype = void_ftype_hi_pointer_v4hi; break;
+ case V2SImode: ftype = void_ftype_si_pointer_v2si; break;
+ case V2SFmode: ftype = void_ftype_sf_pointer_v2sf; break;
+ case DImode: ftype = void_ftype_di_pointer_di; break;
+ case V16QImode: ftype = void_ftype_qi_pointer_v16qi; break;
+ case V8HImode: ftype = void_ftype_hi_pointer_v8hi; break;
+ case V4SImode: ftype = void_ftype_si_pointer_v4si; break;
+ case V4SFmode: ftype = void_ftype_sf_pointer_v4sf; break;
+ case V2DImode: ftype = void_ftype_di_pointer_v2di; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_STORE1LANE:
+ switch (mode0)
+ {
+ case V8QImode: ftype = void_ftype_qi_pointer_v8qi_si; break;
+ case V4HImode: ftype = void_ftype_hi_pointer_v4hi_si; break;
+ case V2SImode: ftype = void_ftype_si_pointer_v2si_si; break;
+ case V2SFmode: ftype = void_ftype_sf_pointer_v2sf_si; break;
+ case DImode: ftype = void_ftype_di_pointer_di_si; break;
+ case V16QImode: ftype = void_ftype_qi_pointer_v16qi_si; break;
+ case V8HImode: ftype = void_ftype_hi_pointer_v8hi_si; break;
+ case V4SImode: ftype = void_ftype_si_pointer_v4si_si; break;
+ case V4SFmode: ftype = void_ftype_sf_pointer_v4sf_si; break;
+ case V2DImode: ftype = void_ftype_di_pointer_v2di_si; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case NEON_LOADSTRUCT:
+ switch (tmode)
+ {
+ case TImode:
+ /* vld2 cases. */
+ switch (1 << j)
+ {
+ case T_V8QI: ftype = ti_ftype_const_qi_pointer; break;
+ case T_V4HI: ftype = ti_ftype_const_hi_pointer; break;
+ case T_V2SI: ftype = ti_ftype_const_si_pointer; break;
+ case T_V2SF: ftype = ti_ftype_const_sf_pointer; break;
+ case T_DI: ftype = ti_ftype_const_di_pointer; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case EImode:
+ /* vld3 cases. */
+ switch (1 << j)
+ {
+ case T_V8QI: ftype = ei_ftype_const_qi_pointer; break;
+ case T_V4HI: ftype = ei_ftype_const_hi_pointer; break;
+ case T_V2SI: ftype = ei_ftype_const_si_pointer; break;
+ case T_V2SF: ftype = ei_ftype_const_sf_pointer; break;
+ case T_DI: ftype = ei_ftype_const_di_pointer; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case OImode:
+ /* vld2q and vld4 cases. */
+ switch (1 << j)
+ {
+ /* vld2q cases. */
+ case T_V16QI: ftype = oi_ftype_const_qi_pointer; break;
+ case T_V8HI: ftype = oi_ftype_const_hi_pointer; break;
+ case T_V4SI: ftype = oi_ftype_const_si_pointer; break;
+ case T_V4SF: ftype = oi_ftype_const_sf_pointer; break;
+ /* vld4 cases. */
+ case T_V8QI: ftype = oi_ftype_const_qi_pointer; break;
+ case T_V4HI: ftype = oi_ftype_const_hi_pointer; break;
+ case T_V2SI: ftype = oi_ftype_const_si_pointer; break;
+ case T_V2SF: ftype = oi_ftype_const_sf_pointer; break;
+ case T_DI: ftype = oi_ftype_const_di_pointer; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CImode:
+ /* vld3q cases. */
+ switch (1 << j)
+ {
+ case T_V16QI: ftype = ci_ftype_const_qi_pointer; break;
+ case T_V8HI: ftype = ci_ftype_const_hi_pointer; break;
+ case T_V4SI: ftype = ci_ftype_const_si_pointer; break;
+ case T_V4SF: ftype = ci_ftype_const_sf_pointer; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case XImode:
+ /* vld4q cases. */
+ switch (1 << j)
+ {
+ case T_V16QI: ftype = xi_ftype_const_qi_pointer; break;
+ case T_V8HI: ftype = xi_ftype_const_hi_pointer; break;
+ case T_V4SI: ftype = xi_ftype_const_si_pointer; break;
+ case T_V4SF: ftype = xi_ftype_const_sf_pointer; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_LOADSTRUCTLANE:
+ switch (tmode)
+ {
+ case TImode:
+ /* vld2_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8QI:
+ ftype = ti_ftype_const_qi_pointer_ti_si;
+ break;
+ case T_V4HI:
+ ftype = ti_ftype_const_hi_pointer_ti_si;
+ break;
+ case T_V2SI:
+ ftype = ti_ftype_const_si_pointer_ti_si;
+ break;
+ case T_V2SF:
+ ftype = ti_ftype_const_sf_pointer_ti_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case EImode:
+ /* vld3_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8QI:
+ ftype = ei_ftype_const_qi_pointer_ei_si;
+ break;
+ case T_V4HI:
+ ftype = ei_ftype_const_hi_pointer_ei_si;
+ break;
+ case T_V2SI:
+ ftype = ei_ftype_const_si_pointer_ei_si;
+ break;
+ case T_V2SF:
+ ftype = ei_ftype_const_sf_pointer_ei_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case OImode:
+ /* vld2q_lane and vld4_lane cases. */
+ switch (1 << j)
+ {
+ /* vld2q_lane cases. */
+ case T_V8HI:
+ ftype = oi_ftype_const_hi_pointer_oi_si;
+ break;
+ case T_V4SI:
+ ftype = oi_ftype_const_si_pointer_oi_si;
+ break;
+ case T_V4SF:
+ ftype = oi_ftype_const_sf_pointer_oi_si;
+ break;
+ /* vld4_lane cases. */
+ case T_V8QI:
+ ftype = oi_ftype_const_qi_pointer_oi_si;
+ break;
+ case T_V4HI:
+ ftype = oi_ftype_const_hi_pointer_oi_si;
+ break;
+ case T_V2SI:
+ ftype = oi_ftype_const_si_pointer_oi_si;
+ break;
+ case T_V2SF:
+ ftype = oi_ftype_const_sf_pointer_oi_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case CImode:
+ /* vld3q_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8HI:
+ ftype = ci_ftype_const_hi_pointer_ci_si;
+ break;
+ case T_V4SI:
+ ftype = ci_ftype_const_si_pointer_ci_si;
+ break;
+ case T_V4SF:
+ ftype = ci_ftype_const_sf_pointer_ci_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case XImode:
+ /* vld4q_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8HI:
+ ftype = xi_ftype_const_hi_pointer_xi_si;
+ break;
+ case T_V4SI:
+ ftype = xi_ftype_const_si_pointer_xi_si;
+ break;
+ case T_V4SF:
+ ftype = xi_ftype_const_sf_pointer_xi_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+
+ case NEON_STORESTRUCT:
+ switch (mode0)
+ {
+ case TImode:
+ /* vst2 cases. */
+ switch (1 << j)
+ {
+ case T_V8QI: ftype = void_ftype_qi_pointer_ti; break;
+ case T_V4HI: ftype = void_ftype_hi_pointer_ti; break;
+ case T_V2SI: ftype = void_ftype_si_pointer_ti; break;
+ case T_V2SF: ftype = void_ftype_sf_pointer_ti; break;
+ case T_DI: ftype = void_ftype_di_pointer_ti; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case EImode:
+ /* vst3 cases. */
+ switch (1 << j)
+ {
+ case T_V8QI: ftype = void_ftype_qi_pointer_ei; break;
+ case T_V4HI: ftype = void_ftype_hi_pointer_ei; break;
+ case T_V2SI: ftype = void_ftype_si_pointer_ei; break;
+ case T_V2SF: ftype = void_ftype_sf_pointer_ei; break;
+ case T_DI: ftype = void_ftype_di_pointer_ei; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case OImode:
+ /* vst2q and vst4 cases. */
+ switch (1 << j)
+ {
+ /* vst2q cases. */
+ case T_V16QI: ftype = void_ftype_qi_pointer_oi; break;
+ case T_V8HI: ftype = void_ftype_hi_pointer_oi; break;
+ case T_V4SI: ftype = void_ftype_si_pointer_oi; break;
+ case T_V4SF: ftype = void_ftype_sf_pointer_oi; break;
+ /* vst4 cases. */
+ case T_V8QI: ftype = void_ftype_qi_pointer_oi; break;
+ case T_V4HI: ftype = void_ftype_hi_pointer_oi; break;
+ case T_V2SI: ftype = void_ftype_si_pointer_oi; break;
+ case T_V2SF: ftype = void_ftype_sf_pointer_oi; break;
+ case T_DI: ftype = void_ftype_di_pointer_oi; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CImode:
+ /* vst3q cases. */
+ switch (1 << j)
+ {
+ case T_V16QI: ftype = void_ftype_qi_pointer_ci; break;
+ case T_V8HI: ftype = void_ftype_hi_pointer_ci; break;
+ case T_V4SI: ftype = void_ftype_si_pointer_ci; break;
+ case T_V4SF: ftype = void_ftype_sf_pointer_ci; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case XImode:
+ /* vst4q cases. */
+ switch (1 << j)
+ {
+ case T_V16QI: ftype = void_ftype_qi_pointer_xi; break;
+ case T_V8HI: ftype = void_ftype_hi_pointer_xi; break;
+ case T_V4SI: ftype = void_ftype_si_pointer_xi; break;
+ case T_V4SF: ftype = void_ftype_sf_pointer_xi; break;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case NEON_STORESTRUCTLANE:
+ switch (mode0)
+ {
+ case TImode:
+ /* vst2_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8QI:
+ ftype = void_ftype_qi_pointer_ti_si;
+ break;
+ case T_V4HI:
+ ftype = void_ftype_hi_pointer_ti_si;
+ break;
+ case T_V2SI:
+ ftype = void_ftype_si_pointer_ti_si;
+ break;
+ case T_V2SF:
+ ftype = void_ftype_sf_pointer_ti_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case EImode:
+ /* vst3_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8QI:
+ ftype = void_ftype_qi_pointer_ei_si;
+ break;
+ case T_V4HI:
+ ftype = void_ftype_hi_pointer_ei_si;
+ break;
+ case T_V2SI:
+ ftype = void_ftype_si_pointer_ei_si;
+ break;
+ case T_V2SF:
+ ftype = void_ftype_sf_pointer_ei_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case OImode:
+ /* vst2q_lane and vst4_lane cases. */
+ switch (1 << j)
+ {
+ /* vst2q_lane cases. */
+ case T_V8HI:
+ ftype = void_ftype_hi_pointer_oi_si;
+ break;
+ case T_V4SI:
+ ftype = void_ftype_si_pointer_oi_si;
+ break;
+ case T_V4SF:
+ ftype = void_ftype_sf_pointer_oi_si;
+ break;
+ /* vst4_lane cases. */
+ case T_V8QI:
+ ftype = void_ftype_qi_pointer_oi_si;
+ break;
+ case T_V4HI:
+ ftype = void_ftype_hi_pointer_oi_si;
+ break;
+ case T_V2SI:
+ ftype = void_ftype_si_pointer_oi_si;
+ break;
+ case T_V2SF:
+ ftype = void_ftype_sf_pointer_oi_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case CImode:
+ /* vst3q_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8HI:
+ ftype = void_ftype_hi_pointer_ci_si;
+ break;
+ case T_V4SI:
+ ftype = void_ftype_si_pointer_ci_si;
+ break;
+ case T_V4SF:
+ ftype = void_ftype_sf_pointer_ci_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case XImode:
+ /* vst4q_lane cases. */
+ switch (1 << j)
+ {
+ case T_V8HI:
+ ftype = void_ftype_hi_pointer_xi_si;
+ break;
+ case T_V4SI:
+ ftype = void_ftype_si_pointer_xi_si;
+ break;
+ case T_V4SF:
+ ftype = void_ftype_sf_pointer_xi_si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_assert (ftype != NULL);
+
+ sprintf (namebuf, "__builtin_neon_%s%s", d->name, modenames[j]);
+
+ lang_hooks.builtin_function (namebuf, ftype, fcode++, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ }
+ }
+#undef qi_TN
+#undef hi_TN
+#undef si_TN
+#undef di_TN
+#undef ti_TN
+#undef ei_TN
+#undef oi_TN
+#undef ci_TN
+#undef xi_TN
+
+#undef sf_TN
+
+#undef v8qi_TN
+#undef v4hi_TN
+#undef v2si_TN
+#undef v2sf_TN
+
+#undef v16qi_TN
+#undef v8hi_TN
+#undef v4si_TN
+#undef v4sf_TN
+#undef v2di_TN
+
+#undef pv8qi_TN
+#undef pv4hi_TN
+#undef pv2si_TN
+#undef pv2sf_TN
+#undef pdi_TN
+
+#undef pv16qi_TN
+#undef pv8hi_TN
+#undef pv4si_TN
+#undef pv4sf_TN
+#undef pv2di_TN
+
+#undef void_TN
+
+#undef TYPE2
+#undef TYPE3
+#undef TYPE4
+#undef TYPE5
+#undef TYPE6
+}
+
+static void
+arm_init_builtins (void)
+{
+ arm_init_tls_builtins ();
+
+ if (TARGET_REALLY_IWMMXT)
+ arm_init_iwmmxt_builtins ();
+
+ if (TARGET_NEON)
+ arm_init_neon_builtins ();
+/* APPLE LOCAL begin ARM darwin builtins */
+#ifdef SUBTARGET_INIT_BUILTINS
+ SUBTARGET_INIT_BUILTINS;
+#endif
+/* APPLE LOCAL end ARM darwin builtins */
+}
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* Errors in the source file can cause expand_expr to return const0_rtx
+ where we expect a vector. To avoid crashing, use one of the vector
+ clear instructions. */
+
+static rtx
+safe_vector_operand (rtx x, enum machine_mode mode)
+{
+ if (x != const0_rtx)
+ return x;
+ x = gen_reg_rtx (mode);
+
+ emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
+ : gen_rtx_SUBREG (DImode, x, 0)));
+ return x;
+}
+
+/* Subroutine of arm_expand_builtin to take care of binop insns. */
+
+static rtx
+arm_expand_binop_builtin (enum insn_code icode,
+ tree arglist, rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of arm_expand_builtin to take care of unop insns. */
+
+static rtx
+arm_expand_unop_builtin (enum insn_code icode,
+ tree arglist, rtx target, int do_load)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ rtx op0 = expand_normal (arg0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ if (do_load)
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ else
+ {
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ }
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+static int
+neon_builtin_compare (const void *a, const void *b)
+{
+ const neon_builtin_datum *key = a;
+ const neon_builtin_datum *memb = b;
+ unsigned int soughtcode = key->base_fcode;
+
+ if (soughtcode >= memb->base_fcode
+ && soughtcode < memb->base_fcode + memb->num_vars)
+ return 0;
+ else if (soughtcode < memb->base_fcode)
+ return -1;
+ else
+ return 1;
+}
+
+static enum insn_code
+locate_neon_builtin_icode (int fcode, neon_itype *itype)
+{
+ neon_builtin_datum key, *found;
+ int idx;
+
+ key.base_fcode = fcode;
+ found = bsearch (&key, &neon_builtin_data[0], ARRAY_SIZE (neon_builtin_data),
+ sizeof (neon_builtin_data[0]), neon_builtin_compare);
+ gcc_assert (found);
+ idx = fcode - (int) found->base_fcode;
+ gcc_assert (idx >= 0 && idx < T_MAX && idx < (int)found->num_vars);
+
+ if (itype)
+ *itype = found->itype;
+
+ return found->codes[idx];
+}
+
+typedef enum {
+ NEON_ARG_COPY_TO_REG,
+ NEON_ARG_CONSTANT,
+ NEON_ARG_STOP
+} builtin_arg;
+
+#define NEON_MAX_BUILTIN_ARGS 5
+
+/* Expand a Neon builtin. */
+static rtx
+arm_expand_neon_args (rtx target, int icode, int have_retval,
+ tree arglist, ...)
+{
+ va_list ap;
+ rtx pat;
+ tree arg[NEON_MAX_BUILTIN_ARGS];
+ rtx op[NEON_MAX_BUILTIN_ARGS];
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode[NEON_MAX_BUILTIN_ARGS];
+ int argc = 0;
+
+ if (have_retval
+ && (!target
+ || GET_MODE (target) != tmode
+ || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
+ target = gen_reg_rtx (tmode);
+
+ va_start (ap, arglist);
+
+ for (;;)
+ {
+ builtin_arg thisarg = va_arg (ap, int);
+
+ if (thisarg == NEON_ARG_STOP)
+ break;
+ else
+ {
+ arg[argc] = TREE_VALUE (arglist);
+ op[argc] = expand_expr (arg[argc], NULL_RTX, VOIDmode, 0);
+ mode[argc] = insn_data[icode].operand[argc + have_retval].mode;
+ /* APPLE LOCAL 6574544 begin NEON builtin argument types */
+ /* Make sure the modes match. */
+ op[argc] = convert_to_mode (mode[argc], op[argc],
+ TYPE_UNSIGNED(TREE_TYPE(arg[argc])));
+ /* APPLE LOCAL 6574544 end NEON builtin argument types */
+
+ arglist = TREE_CHAIN (arglist);
+
+ switch (thisarg)
+ {
+ case NEON_ARG_COPY_TO_REG:
+ /*gcc_assert (GET_MODE (op[argc]) == mode[argc]);*/
+ if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+ (op[argc], mode[argc]))
+ op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
+ break;
+
+ case NEON_ARG_CONSTANT:
+ /* FIXME: This error message is somewhat unhelpful. */
+ if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+ (op[argc], mode[argc]))
+ error ("argument must be a constant");
+ break;
+
+ case NEON_ARG_STOP:
+ gcc_unreachable ();
+ }
+
+ argc++;
+ }
+ }
+
+ va_end (ap);
+
+ if (have_retval)
+ switch (argc)
+ {
+ case 1:
+ pat = GEN_FCN (icode) (target, op[0]);
+ break;
+
+ case 2:
+ pat = GEN_FCN (icode) (target, op[0], op[1]);
+ break;
+
+ case 3:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
+ break;
+
+ case 4:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
+ break;
+
+ case 5:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ else
+ switch (argc)
+ {
+ case 1:
+ pat = GEN_FCN (icode) (op[0]);
+ break;
+
+ case 2:
+ pat = GEN_FCN (icode) (op[0], op[1]);
+ break;
+
+ case 3:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2]);
+ break;
+
+ case 4:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
+ break;
+
+ case 5:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!pat)
+ return 0;
+
+ emit_insn (pat);
+
+ return target;
+}
+
+/* Expand a Neon builtin. These are "special" because they don't have symbolic
+ constants defined per-instruction or per instruction-variant. Instead, the
+ required info is looked up in the table neon_builtin_data. */
+static rtx
+arm_expand_neon_builtin (rtx target, int fcode, tree arglist)
+{
+ neon_itype itype;
+ enum insn_code icode = locate_neon_builtin_icode (fcode, &itype);
+
+ switch (itype)
+ {
+ case NEON_UNOP:
+ case NEON_CONVERT:
+ case NEON_DUPLANE:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT, NEON_ARG_STOP);
+
+ case NEON_BINOP:
+ case NEON_SETLANE:
+ case NEON_SCALARMUL:
+ case NEON_SCALARMULL:
+ case NEON_SCALARMULH:
+ case NEON_SHIFTINSERT:
+ case NEON_LOGICBINOP:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
+ NEON_ARG_STOP);
+
+ case NEON_TERNOP:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
+ NEON_ARG_CONSTANT, NEON_ARG_STOP);
+
+ case NEON_GETLANE:
+ case NEON_FIXCONV:
+ case NEON_SHIFTIMM:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT, NEON_ARG_CONSTANT,
+ NEON_ARG_STOP);
+
+ case NEON_CREATE:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
+
+ case NEON_DUP:
+ case NEON_SPLIT:
+ case NEON_REINTERP:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
+
+ case NEON_COMBINE:
+ case NEON_VTBL:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
+
+ case NEON_RESULTPAIR:
+ return arm_expand_neon_args (target, icode, 0, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
+ NEON_ARG_STOP);
+
+ case NEON_LANEMUL:
+ case NEON_LANEMULL:
+ case NEON_LANEMULH:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
+ NEON_ARG_CONSTANT, NEON_ARG_STOP);
+
+ case NEON_LANEMAC:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
+ NEON_ARG_CONSTANT, NEON_ARG_CONSTANT, NEON_ARG_STOP);
+
+ case NEON_SHIFTACC:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
+ NEON_ARG_CONSTANT, NEON_ARG_STOP);
+
+ case NEON_SCALARMAC:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
+ NEON_ARG_CONSTANT, NEON_ARG_STOP);
+
+ case NEON_SELECT:
+ case NEON_VTBX:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
+ NEON_ARG_STOP);
+
+ case NEON_LOAD1:
+ case NEON_LOADSTRUCT:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
+
+ case NEON_LOAD1LANE:
+ case NEON_LOADSTRUCTLANE:
+ return arm_expand_neon_args (target, icode, 1, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
+ NEON_ARG_STOP);
+
+ case NEON_STORE1:
+ case NEON_STORESTRUCT:
+ return arm_expand_neon_args (target, icode, 0, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
+
+ case NEON_STORE1LANE:
+ case NEON_STORESTRUCTLANE:
+ return arm_expand_neon_args (target, icode, 0, arglist,
+ NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
+ NEON_ARG_STOP);
+ }
+
+ gcc_unreachable ();
+}
+
+/* Emit code to reinterpret one Neon type as another, without altering bits. */
+void
+neon_reinterpret (rtx dest, rtx src)
+{
+ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
+}
+
+/* Emit code to place a Neon pair result in memory locations (with equal
+ registers). */
+void
+neon_emit_pair_result_insn (enum machine_mode mode,
+ rtx (*intfn) (rtx, rtx, rtx, rtx), rtx destaddr,
+ rtx op1, rtx op2)
+{
+ rtx mem = gen_rtx_MEM (mode, destaddr);
+ rtx tmp1 = gen_reg_rtx (mode);
+ rtx tmp2 = gen_reg_rtx (mode);
+
+ emit_insn (intfn (tmp1, op1, tmp2, op2));
+
+ emit_move_insn (mem, tmp1);
+ mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
+ emit_move_insn (mem, tmp2);
+}
+
+/* Set up operands for a register copy from src to dest, taking care not to
+ clobber registers in the process.
+ FIXME: This has rather high polynomial complexity (O(n^3)?) but shouldn't
+ be called with a large N, so that should be OK. */
+
+void
+neon_disambiguate_copy (rtx *operands, rtx *dest, rtx *src, unsigned int count)
+{
+ unsigned int copied = 0, opctr = 0;
+ unsigned int done = (1 << count) - 1;
+ unsigned int i, j;
+
+ while (copied != done)
+ {
+ for (i = 0; i < count; i++)
+ {
+ int good = 1;
+
+ for (j = 0; good && j < count; j++)
+ if (i != j && (copied & (1 << j)) == 0
+ && reg_overlap_mentioned_p (src[j], dest[i]))
+ good = 0;
+
+ if (good)
+ {
+ operands[opctr++] = dest[i];
+ operands[opctr++] = src[i];
+ copied |= 1 << i;
+ }
+ }
+ }
+
+ gcc_assert (opctr == count * 2);
+}
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+arm_expand_builtin (tree exp,
+ rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ const struct builtin_description * d;
+ enum insn_code icode;
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ tree arg0;
+ tree arg1;
+ tree arg2;
+ rtx op0;
+ rtx op1;
+ rtx op2;
+ rtx pat;
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+ size_t i;
+ enum machine_mode tmode;
+ enum machine_mode mode0;
+ enum machine_mode mode1;
+ enum machine_mode mode2;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (fcode >= ARM_BUILTIN_NEON_BASE)
+ return arm_expand_neon_builtin (target, fcode, arglist);
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ switch (fcode)
+ {
+ case ARM_BUILTIN_TEXTRMSB:
+ case ARM_BUILTIN_TEXTRMUB:
+ case ARM_BUILTIN_TEXTRMSH:
+ case ARM_BUILTIN_TEXTRMUH:
+ case ARM_BUILTIN_TEXTRMSW:
+ case ARM_BUILTIN_TEXTRMUW:
+ icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
+ : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
+ : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
+ : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
+ : CODE_FOR_iwmmxt_textrmw);
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ {
+ /* @@@ better error message */
+ error ("selector must be an immediate");
+ return gen_reg_rtx (tmode);
+ }
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ARM_BUILTIN_TINSRB:
+ case ARM_BUILTIN_TINSRH:
+ case ARM_BUILTIN_TINSRW:
+ icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
+ : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
+ : CODE_FOR_iwmmxt_tinsrw);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+ mode2 = insn_data[icode].operand[3].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ {
+ /* @@@ better error message */
+ error ("selector must be an immediate");
+ return const0_rtx;
+ }
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ARM_BUILTIN_SETWCX:
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = force_reg (SImode, expand_normal (arg0));
+ op1 = expand_normal (arg1);
+ emit_insn (gen_iwmmxt_tmcr (op1, op0));
+ return 0;
+
+ case ARM_BUILTIN_GETWCX:
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ target = gen_reg_rtx (SImode);
+ emit_insn (gen_iwmmxt_tmrc (target, op0));
+ return target;
+
+ case ARM_BUILTIN_WSHUFH:
+ icode = CODE_FOR_iwmmxt_wshufh;
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_mode_reg (mode1, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ {
+ /* @@@ better error message */
+ error ("mask must be an immediate");
+ return const0_rtx;
+ }
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ARM_BUILTIN_WSADB:
+ return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
+ case ARM_BUILTIN_WSADH:
+ return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
+ case ARM_BUILTIN_WSADBZ:
+ return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
+ case ARM_BUILTIN_WSADHZ:
+ return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
+
+ /* Several three-argument builtins. */
+ case ARM_BUILTIN_WMACS:
+ case ARM_BUILTIN_WMACU:
+ case ARM_BUILTIN_WALIGN:
+ case ARM_BUILTIN_TMIA:
+ case ARM_BUILTIN_TMIAPH:
+ case ARM_BUILTIN_TMIATT:
+ case ARM_BUILTIN_TMIATB:
+ case ARM_BUILTIN_TMIABT:
+ case ARM_BUILTIN_TMIABB:
+ icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
+ : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
+ : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
+ : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
+ : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
+ : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
+ : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
+ : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
+ : CODE_FOR_iwmmxt_walign);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+ mode2 = insn_data[icode].operand[3].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ARM_BUILTIN_WZERO:
+ target = gen_reg_rtx (DImode);
+ emit_insn (gen_iwmmxt_clrdi (target));
+ return target;
+
+ case ARM_BUILTIN_THREAD_POINTER:
+ return arm_load_tp (target);
+
+ default:
+ break;
+ }
+
+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == (const enum arm_builtins) fcode)
+ return arm_expand_binop_builtin (d->icode, arglist, target);
+
+ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == (const enum arm_builtins) fcode)
+ return arm_expand_unop_builtin (d->icode, arglist, target, 0);
+
+ /* @@@ Should really do something sensible here. */
+ return NULL_RTX;
+}
+
+/* Return the number (counting from 0) of
+ the least significant set bit in MASK. */
+
+inline static int
+number_of_first_bit_set (unsigned mask)
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++bit)
+ continue;
+
+ return bit;
+}
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* Handle push or pop of registers from the stack.
+ If EMIT is true, generate the code.
+ If EMIT is false, compute and return the number of bytes that
+ would result from a call with EMIT true. In this case F is
+ not necessarily valid and should not be referenced.
+
+ F is the assembly file. MASK is the registers to push or pop. PUSH is
+ nonzero if we should push, and zero if we should pop. For debugging
+ output, if pushing, adjust CFA_OFFSET by the amount of space added
+ to the stack. REAL_REGS should have the same number of bits set as
+ MASK, and will be used instead (in the same order) to describe which
+ registers were saved - this is used to mark the save slots when we
+ push high registers after moving them to low registers.
+*/
+static int
+handle_thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
+ unsigned long real_regs, bool emit)
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+ int pushed_words = 0;
+ int bytes = 0;
+
+ gcc_assert (mask);
+
+ if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+ return handle_thumb_exit (f, -1, emit);
+ }
+
+ if (ARM_EABI_UNWIND_TABLES && push && emit)
+ {
+ fprintf (f, "\t.save\t{");
+ for (regno = 0; regno < 15; regno++)
+ {
+ if (real_regs & (1 << regno))
+ {
+ if (real_regs & ((1 << regno) -1))
+ fprintf (f, ", ");
+ asm_fprintf (f, "%r", regno);
+ }
+ }
+ fprintf (f, "}\n");
+ }
+
+ bytes += 2;
+ if (emit)
+ fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+ for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ if (emit)
+ {
+ asm_fprintf (f, "%r", regno);
+
+ if ((lo_mask & ~1) != 0)
+ fprintf (f, ", ");
+ }
+
+ pushed_words++;
+ }
+ }
+
+ if (push && (mask & (1 << LR_REGNUM)))
+ {
+ /* Catch pushing the LR. */
+ if (emit)
+ {
+ if (mask & 0xFF)
+ fprintf (f, ", ");
+
+ asm_fprintf (f, "%r", LR_REGNUM);
+ }
+
+ pushed_words++;
+ }
+ else if (!push && (mask & (1 << PC_REGNUM)))
+ {
+ /* Catch popping the PC. */
+ /* APPLE LOCAL begin ARM interworking */
+ if ((TARGET_INTERWORK && !arm_arch5)
+ || TARGET_BACKTRACE
+ || current_function_calls_eh_return)
+ /* APPLE LOCAL end ARM interworking */
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+ if (emit)
+ fprintf (f, "}\n");
+
+ bytes += handle_thumb_exit (f, -1, emit);
+
+ return bytes;
+ }
+ else if (emit)
+ {
+ if (mask & 0xFF)
+ fprintf (f, ", ");
+
+ asm_fprintf (f, "%r", PC_REGNUM);
+ }
+ }
+
+ if (emit)
+ fprintf (f, "}\n");
+
+ if (emit && push && pushed_words && dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ int pushed_mask = real_regs;
+
+ *cfa_offset += pushed_words * 4;
+ dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
+
+ pushed_words = 0;
+ pushed_mask = real_regs;
+ for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
+ {
+ if (pushed_mask & 1)
+ dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
+ }
+ }
+ return bytes;
+}
+
+/* Handle return from a thumb function.
+ If EMIT is true, generate the code.
+ If EMIT is false, compute and return the number of bytes that
+ would result from a call with EMIT true. In this case F is
+ not necessarily valid and should not be referenced.
+ If 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer.
+*/
+static int
+handle_thumb_exit (FILE *f, int reg_containing_return_addr, bool emit)
+{
+ unsigned regs_available_for_popping;
+ unsigned regs_to_pop;
+ int pops_needed;
+ unsigned available;
+ unsigned required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+ int bytes = 0;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LR_REGNUM;
+ ++pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore the (ARM) frame pointer and stack pointer. */
+ regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and
+ return. */
+ if (pops_needed == 0)
+ {
+ if (current_function_calls_eh_return)
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
+ }
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
+
+ return bytes;
+ }
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ /* APPLE LOCAL ARM interworking */
+ else if ((!TARGET_INTERWORK || arm_arch5)
+ && !TARGET_BACKTRACE
+ && !is_called_in_ARM_mode (current_function_decl)
+ && !current_function_calls_eh_return)
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
+ return bytes;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+ /* If returning via __builtin_eh_return, the bottom three registers
+ all contain information needed for the return. */
+ if (current_function_calls_eh_return)
+ size = 12;
+ else
+ {
+ /* If we can deduce the registers used from the function's
+ return value. This is more reliable that examining
+ regs_ever_live[] because that will be set if the register is
+ ever used in the function, not just if the register is used
+ to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_REGISTER (1))
+ | (1 << ARG_REGISTER (2))
+ | (1 << ARG_REGISTER (3));
+ else
+ regs_available_for_popping =
+ (1 << ARG_REGISTER (2))
+ | (1 << ARG_REGISTER (3));
+ }
+ else if (size <= 4)
+ regs_available_for_popping =
+ (1 << ARG_REGISTER (2))
+ | (1 << ARG_REGISTER (3));
+ else if (size <= 8)
+ regs_available_for_popping =
+ (1 << ARG_REGISTER (3));
+ }
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == LAST_ARG_REGNUM)
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
+ reg_containing_return_addr = LR_REGNUM;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
+ }
+
+ if (reg_containing_return_addr != LAST_ARG_REGNUM)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
+
+ --pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ bytes += handle_thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
+ regs_available_for_popping, emit);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~(1 << LR_REGNUM);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by further pops. */
+ regs_available_for_popping &= ~(1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n",
+ ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~(1 << frame_pointer);
+ regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well,
+ find the register that contains it. */
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
+ reg_containing_return_addr);
+
+ reg_containing_return_addr = LR_REGNUM;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ bytes += handle_thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
+ regs_available_for_popping, emit);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
+
+ regs_to_pop &= ~(1 << move_to);
+
+ --pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ bytes += handle_thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
+ regs_available_for_popping, emit);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LR_REGNUM)
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
+ reg_containing_return_addr = LR_REGNUM;
+ }
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
+ }
+
+ if (current_function_calls_eh_return)
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
+ }
+
+ /* Return to caller. */
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
+ return bytes;
+}
+/* APPLE LOCAL end ARM compact switch tables */
+
+
+void
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_final_prescan_insn (rtx insn)
+{
+ if (flag_print_asm_name)
+ asm_fprintf (asm_out_file, "%@ 0x%04x\n",
+ INSN_ADDRESSES (INSN_UID (insn)));
+}
+
+int
+thumb_shiftable_const (unsigned HOST_WIDE_INT val)
+{
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ if (val == 0) /* XXX */
+ return 0;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+/* Returns nonzero if the current function contains,
+ or might contain a far jump. */
+static int
+thumb_far_jump_used_p (void)
+{
+ rtx insn;
+
+ /* This test is only important for leaf functions. */
+ /* assert (!leaf_function_p ()); */
+
+ /* If we have already decided that far jumps may be used,
+ do not bother checking again, and always return true even if
+ it turns out that they are not being used. Once we have made
+ the decision that far jumps are present (and that hence the link
+ register will be pushed onto the stack) we cannot go back on it. */
+ if (cfun->machine->far_jump_used)
+ return 1;
+
+ /* If this function is not being called from the prologue/epilogue
+ generation code then it must be being called from the
+ INITIAL_ELIMINATION_OFFSET macro. */
+ if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
+ {
+ /* In this case we know that we are being asked about the elimination
+ of the arg pointer register. If that register is not being used,
+ then there are no arguments on the stack, and we do not have to
+ worry that a far jump might force the prologue to push the link
+ register, changing the stack offsets. In this case we can just
+ return false, since the presence of far jumps in the function will
+ not affect stack offsets.
+
+ If the arg pointer is live (or if it was live, but has now been
+ eliminated and so set to dead) then we do have to test to see if
+ the function might contain a far jump. This test can lead to some
+ false negatives, since before reload is completed, then length of
+ branch instructions is not known, so gcc defaults to returning their
+ longest length, which in turn sets the far jump attribute to true.
+
+ A false negative will not result in bad code being generated, but it
+ will result in a needless push and pop of the link register. We
+ hope that this does not occur too often.
+
+ If we need doubleword stack alignment this could affect the other
+ elimination offsets so we can't risk getting it wrong. */
+ if (regs_ever_live [ARG_POINTER_REGNUM])
+ cfun->machine->arg_pointer_live = 1;
+ else if (!cfun->machine->arg_pointer_live)
+ return 0;
+ }
+
+ /* Check to see if the function contains a branch
+ insn with the far jump attribute set. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES
+ )
+ {
+ /* Record the fact that we have decided that
+ the function does use far jumps. */
+ cfun->machine->far_jump_used = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Return nonzero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (tree func)
+{
+ gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
+
+ /* Ignore the problem about functions whose address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+#ifdef ARM_PE
+ return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+}
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* This handles the part of the epilogue that is not expressed as RTL.
+ It computes and returns the number of bytes in this part of the epilogue.
+ When EMIT is true, it additionally outputs this part of the epilogue.
+ When !EMIT, this function does not output anything; in this case
+ F need not be valid and should not be referenced.
+*/
+static int
+handle_thumb_unexpanded_epilogue (bool emit)
+{
+ int regno;
+ unsigned long live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int had_to_push_lr;
+ int size;
+ int bytes = 0;
+
+ if (return_used_this_function)
+ return bytes;
+
+ if (IS_NAKED (arm_current_func_type ()))
+ return bytes;
+
+ /* APPLE LOCAL begin 6465387 exception handling interworking VFP save */
+ if (current_function_has_nonlocal_label && arm_arch6)
+ {
+ bytes += 4;
+ if (emit)
+ asm_fprintf (asm_out_file, "\tblx ___restore_vfp_d8_d15_regs\n");
+ }
+ /* APPLE LOCAL end 6465387 exception handling interworking VFP save */
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ live_regs_mask = thumb1_compute_save_reg_mask ();
+ high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
+
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+ size = arm_size_return_regs ();
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. e.g. the testsuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ unsigned long mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ /* The available low registers depend on the size of the value we are
+ returning. */
+ if (size <= 12)
+ mask |= 1 << 3;
+ if (size <= 8)
+ mask |= 1 << 2;
+
+ if (mask == 0)
+ /* Oh dear! We have no low registers into which we can pop
+ high registers! */
+ internal_error
+ ("no low registers available for popping high registers");
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (live_regs_mask & (1 << next_hi_reg))
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find lo register(s) into which the high register(s) can
+ be popped. */
+ for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ bytes += handle_thumb_pushpop (asm_out_file, mask, 0, NULL, mask, emit);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
+ regno);
+
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (live_regs_mask & (1 << next_hi_reg))
+ break;
+ }
+ }
+ }
+ live_regs_mask &= ~0x0f00;
+ }
+
+ had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
+ live_regs_mask &= 0xff;
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ /* Pop the return address into the PC. */
+ if (had_to_push_lr)
+ live_regs_mask |= 1 << PC_REGNUM;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+ if (live_regs_mask)
+ bytes += handle_thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
+ live_regs_mask, emit);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function. */
+ if (!had_to_push_lr)
+ bytes += handle_thumb_exit (asm_out_file, LR_REGNUM, emit);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ if (live_regs_mask)
+ bytes += handle_thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
+ live_regs_mask, emit);
+
+ if (had_to_push_lr)
+ {
+ if (size > 12)
+ {
+ /* We have no free low regs, so save one. */
+ bytes += 2;
+ if (emit)
+ asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
+ LAST_ARG_REGNUM);
+ }
+
+ /* Get the return address into a temporary register. */
+ bytes += handle_thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
+ 1 << LAST_ARG_REGNUM, emit);
+
+ if (size > 12)
+ {
+ bytes += 4;
+ if (emit)
+ {
+ /* Move the return address to lr. */
+ asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
+ LAST_ARG_REGNUM);
+ /* Restore the low register. */
+ asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
+ IP_REGNUM);
+ }
+ regno = LR_REGNUM;
+ }
+ else
+ regno = LAST_ARG_REGNUM;
+ }
+ else
+ regno = LR_REGNUM;
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ bytes += 2;
+ if (emit)
+ asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
+ SP_REGNUM, SP_REGNUM,
+ current_function_pretend_args_size);
+
+ bytes += handle_thumb_exit (asm_out_file, regno, emit);
+ }
+
+ return bytes;
+}
+
+/* This is the externally visible entry point for generating code for the
+ part of the epilogue that is not stored as RTL. This is just a wrapper
+ around the previous, with the correct externally imposed interface. */
+
+const char * thumb_unexpanded_epilogue (void)
+{
+ (void) handle_thumb_unexpanded_epilogue (true);
+ return "";
+}
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* Functions to save and restore machine-specific function data. */
+static struct machine_function *
+arm_init_machine_status (void)
+{
+ struct machine_function *machine;
+ machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
+
+#if ARM_FT_UNKNOWN != 0
+ machine->func_type = ARM_FT_UNKNOWN;
+#endif
+ return machine;
+}
+
+/* Return an RTX indicating where the return address to the
+ calling function can be found. */
+/* APPLE LOCAL begin ARM reliable backtraces */
+rtx
+arm_return_addr (int count, rtx frame)
+{
+ if (count != 0)
+ return gen_rtx_MEM (Pmode, plus_constant (frame, 4));
+
+ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
+}
+/* APPLE LOCAL end ARM reliable backtraces */
+
+/* Do anything needed before RTL is emitted for each function. */
+void
+arm_init_expanders (void)
+{
+ /* Arrange to initialize and mark the machine per-function status. */
+ init_machine_status = arm_init_machine_status;
+
+ /* This is to stop the combine pass optimizing away the alignment
+ adjustment of va_arg. */
+ /* ??? It is claimed that this should not be necessary. */
+ if (cfun)
+ mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
+}
+
+
+/* Like arm_compute_initial_elimination offset. Simpler because there
+ isn't an ABI specified frame pointer for Thumb. Instead, we set it
+ to point at the base of the local variables after static stack
+ space for a function has been allocated. */
+
+HOST_WIDE_INT
+thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
+{
+ arm_stack_offsets *offsets;
+
+ offsets = arm_get_frame_offsets ();
+
+ switch (from)
+ {
+ case ARG_POINTER_REGNUM:
+ switch (to)
+ {
+ case STACK_POINTER_REGNUM:
+ return offsets->outgoing_args - offsets->saved_args;
+
+ case FRAME_POINTER_REGNUM:
+ return offsets->soft_frame - offsets->saved_args;
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ case HARD_FRAME_POINTER_REGNUM:
+ return offsets->frame - offsets->saved_args;
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case FRAME_POINTER_REGNUM:
+ switch (to)
+ {
+ case STACK_POINTER_REGNUM:
+ return offsets->outgoing_args - offsets->soft_frame;
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ case HARD_FRAME_POINTER_REGNUM:
+ return offsets->frame - offsets->soft_frame;
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+
+/* Generate the rest of a function's prologue. */
+void
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_expand_prologue (void)
+{
+ rtx insn, dwarf;
+
+ HOST_WIDE_INT amount;
+ arm_stack_offsets *offsets;
+ unsigned long func_type;
+ int regno;
+ unsigned long live_regs_mask;
+
+ func_type = arm_current_func_type ();
+
+ /* Naked functions don't have prologues. */
+ if (IS_NAKED (func_type))
+ return;
+
+ if (IS_INTERRUPT (func_type))
+ {
+ error ("interrupt Service Routines cannot be coded in Thumb mode");
+ return;
+ }
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ live_regs_mask = thumb1_compute_save_reg_mask ();
+ /* Load the pic register before setting the frame pointer,
+ so we can use r7 as a temporary work register. */
+ if (flag_pic && arm_pic_register != INVALID_REGNUM)
+ arm_load_pic_register (live_regs_mask);
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ offsets = arm_get_frame_offsets ();
+
+ if (frame_pointer_needed)
+ {
+ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (offsets->saved_regs
+ - offsets->frame)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
+ stack_pointer_rtx);
+ }
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ amount = offsets->outgoing_args - offsets->saved_regs;
+ if (amount)
+ {
+ if (amount < 512)
+ {
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- amount)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ {
+ rtx reg;
+
+ /* The stack decrement is too big for an immediate value in a single
+ insn. In theory we could issue multiple subtracts, but after
+ three of them it becomes more space efficient to place the full
+ value in the constant pool and load into a register. (Also the
+ ARM debugger really likes to see only one stack decrement per
+ function). So instead we look for a scratch register into which
+ we can load the decrement, and then we subtract this from the
+ stack pointer. Unfortunately on the thumb the only available
+ scratch registers are the argument registers, and we cannot use
+ these as they may hold arguments to the function. Instead we
+ attempt to locate a call preserved register which is used by this
+ function. If we can find one, then we know that it will have
+ been pushed at the start of the prologue and so we can corrupt
+ it now. */
+ for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
+ if (live_regs_mask & (1 << regno)
+ && !(frame_pointer_needed
+ && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
+ break;
+
+ if (regno > LAST_LO_REGNUM) /* Very unlikely. */
+ {
+ rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
+
+ /* Choose an arbitrary, non-argument low register. */
+ /* APPLE LOCAL ARM custom frame layout */
+ reg = gen_rtx_REG (SImode, LAST_LO_REGNUM - 1);
+
+ /* Save it by copying it into a high, scratch register. */
+ emit_insn (gen_movsi (spare, reg));
+ /* Add a USE to stop propagate_one_insn() from barfing. */
+ emit_insn (gen_prologue_use (spare));
+
+ /* Decrement the stack. */
+ emit_insn (gen_movsi (reg, GEN_INT (- amount)));
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx, reg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx,
+ -amount));
+ RTX_FRAME_RELATED_P (dwarf) = 1;
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+ REG_NOTES (insn));
+
+ /* Restore the low register's original value. */
+ emit_insn (gen_movsi (reg, spare));
+
+ /* Emit a USE of the restored scratch register, so that flow
+ analysis will not consider the restore redundant. The
+ register won't be used again in this function and isn't
+ restored by the epilogue. */
+ emit_insn (gen_prologue_use (reg));
+ }
+ else
+ {
+ reg = gen_rtx_REG (SImode, regno);
+
+ emit_insn (gen_movsi (reg, GEN_INT (- amount)));
+
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx, reg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx,
+ -amount));
+ RTX_FRAME_RELATED_P (dwarf) = 1;
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
+ REG_NOTES (insn));
+ }
+ }
+ }
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ /* Removed lines. */
+ /* APPLE LOCAL end ARM custom frame layout */
+
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly if the user has requested no
+ scheduling in the prolog. Similarly if we want non-call exceptions
+ using the EABI unwinder, to prevent faulting instructions from being
+ swapped with a stack adjustment. */
+ if (current_function_profile || !TARGET_SCHED_PROLOG
+ || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
+ emit_insn (gen_blockage ());
+
+ cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
+ if (live_regs_mask & 0xff)
+ cfun->machine->lr_save_eliminated = 0;
+
+ /* If the link register is being kept alive, with the return address in it,
+ then make sure that it does not get reused by the ce2 pass. */
+ if (cfun->machine->lr_save_eliminated)
+ emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
+}
+
+
+void
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_expand_epilogue (void)
+{
+ HOST_WIDE_INT amount;
+ arm_stack_offsets *offsets;
+ int regno;
+
+ /* Naked functions don't have prologues. */
+ if (IS_NAKED (arm_current_func_type ()))
+ return;
+
+ offsets = arm_get_frame_offsets ();
+ amount = offsets->outgoing_args - offsets->saved_regs;
+
+ /* APPLE LOCAL begin ARM custom frame layout */
+ /* Because popping the stack frame using the frame pointer is so much
+ more expensive than just popping it from the SP, only use the FP
+ when we must -- i.e., when we don't know the SP offset because it
+ has changed since the beginning of the function. */
+ if (! current_function_sp_is_unchanging)
+ {
+ int fp_offset = offsets->frame - offsets->saved_regs;
+
+ if (fp_offset)
+ {
+ /* r3 is always free in the epilogue. */
+ rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
+
+ emit_insn (gen_movsi (reg, hard_frame_pointer_rtx));
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (fp_offset)));
+ emit_insn (gen_movsi (stack_pointer_rtx, reg));
+ }
+ else
+ {
+ emit_insn (gen_movsi (stack_pointer_rtx,
+ hard_frame_pointer_rtx));
+ }
+ }
+ else if (amount)
+ /* APPLE LOCAL end ARM custom frame layout */
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ /* r3 is always free in the epilogue. */
+ rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ }
+
+ /* Emit a USE (stack_pointer_rtx), so that
+ the stack adjustment will not be deleted. */
+ emit_insn (gen_prologue_use (stack_pointer_rtx));
+
+ if (current_function_profile || !TARGET_SCHED_PROLOG)
+ emit_insn (gen_blockage ());
+
+ /* Emit a clobber for each insn that will be restored in the epilogue,
+ so that flow2 will get register lifetimes correct. */
+ for (regno = 0; regno < 13; regno++)
+ if (regs_ever_live[regno] && !call_used_regs[regno])
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
+
+ if (! regs_ever_live[LR_REGNUM])
+ emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
+}
+
+/* APPLE LOCAL begin ARM 4790140 compact switch tables */
+/* This handles the part of the prologue that is not expressed as RTL.
+ It computes and returns the number of bytes in this part of the prologue.
+ When EMIT is true, it additionally outputs this part of the prologue.
+ When !EMIT, this function does not output anything; in this case
+ F need not be valid and should not be referenced.
+*/
+static int
+handle_thumb_unexpanded_prologue (FILE *f, bool emit)
+{
+ unsigned long live_regs_mask = 0;
+ unsigned long l_mask;
+ unsigned high_regs_pushed = 0;
+ int cfa_offset = 0;
+ int regno;
+ int bytes = 0;
+
+ if (IS_NAKED (arm_current_func_type ()))
+ return bytes;
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ const char * name;
+
+ gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
+ gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
+ == SYMBOL_REF);
+
+ bytes += 8;
+
+ if (emit)
+ {
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCTION_NAME. */
+ asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
+ asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c. */
+
+#define STUB_NAME ".real_start_of"
+
+ fprintf (f, "\t.code\t16\n");
+#ifdef ARM_PE
+ if (arm_dllexport_name_p (name))
+ name = arm_strip_name_encoding (name);
+#endif
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ /* APPLE LOCAL begin ARM thumb_func <symbol_name> */
+ if (TARGET_MACHO)
+ asm_fprintf (f, "\t.thumb_func %s%U%s\n", STUB_NAME, name);
+ else
+ fprintf (f, "\t.thumb_func\n");
+ /* APPLE LOCAL end ARM thumb_func <symbol_name> */
+
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ /* Output unwind directive for the stack adjustment. */
+ if (emit && ARM_EABI_UNWIND_TABLES)
+ fprintf (f, "\t.pad #%d\n",
+ current_function_pretend_args_size);
+
+ if (emit)
+ {
+ if (cfun->machine->uses_anonymous_args)
+ {
+ int num_pushes;
+
+ fprintf (f, "\tpush\t{");
+
+ num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
+
+ for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
+ regno <= LAST_ARG_REGNUM;
+ regno++)
+ asm_fprintf (f, "%r%s", regno,
+ regno == LAST_ARG_REGNUM ? "" : ", ");
+
+ fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
+ SP_REGNUM, SP_REGNUM,
+ current_function_pretend_args_size);
+ }
+
+ /* We don't need to record the stores for unwinding (would it
+ help the debugger any if we did?), but record the change in
+ the stack pointer. */
+ if (emit && dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+
+ cfa_offset = cfa_offset + current_function_pretend_args_size;
+ dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
+ }
+ }
+
+ /* Get the registers we are going to push. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ live_regs_mask = thumb1_compute_save_reg_mask ();
+ /* Extract a mask of the ones we can give to the Thumb's push instruction. */
+ l_mask = live_regs_mask & 0x40ff;
+ /* Then count how many other high registers will need to be pushed. */
+ high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
+
+ if (TARGET_BACKTRACE)
+ {
+ unsigned offset;
+ unsigned work_register;
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Push low registers.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ work_register = thumb_find_work_register (live_regs_mask);
+
+ if (emit && ARM_EABI_UNWIND_TABLES)
+ asm_fprintf (f, "\t.pad #16\n");
+
+ bytes += 2;
+ if (emit)
+ asm_fprintf
+ (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
+ SP_REGNUM, SP_REGNUM);
+
+ if (emit && dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+
+ cfa_offset = cfa_offset + 16;
+ dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
+ }
+
+ if (l_mask)
+ {
+ bytes += handle_thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask, emit);
+ offset = bit_count (l_mask) * UNITS_PER_WORD;
+ }
+ else
+ offset = 0;
+
+ bytes += 4;
+ if (emit)
+ {
+ asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
+ offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
+ offset + 4);
+ }
+
+ bytes += 8;
+ if (emit)
+ {
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+ if (l_mask)
+ {
+ asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
+ asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
+ offset + 12);
+ asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
+ ARM_HARD_FRAME_POINTER_REGNUM);
+ asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
+ offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
+ ARM_HARD_FRAME_POINTER_REGNUM);
+ asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
+ offset);
+ asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
+ asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
+ offset + 12);
+ }
+ }
+
+ bytes += 8;
+ if (emit)
+ {
+ asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
+ asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
+ offset + 8);
+ asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
+ offset + 12);
+ asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
+ ARM_HARD_FRAME_POINTER_REGNUM, work_register);
+ }
+ }
+ /* Optimization: If we are not pushing any low registers but we are going
+ to push some high registers then delay our first push. This will just
+ be a push of LR and we can combine it with the push of the first high
+ register. */
+ else if ((l_mask & 0xff) != 0
+ || (high_regs_pushed == 0 && l_mask))
+ bytes += handle_thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask, emit);
+
+ if (high_regs_pushed)
+ {
+ unsigned pushable_regs;
+ unsigned next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
+ if (live_regs_mask & (1 << next_hi_reg))
+ break;
+
+ /* APPLE LOCAL ARM thumb requires FP */
+ pushable_regs = l_mask & 0x7f;
+
+ if (pushable_regs == 0)
+ pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
+
+ while (high_regs_pushed > 0)
+ {
+ unsigned long real_regs_mask = 0;
+
+ for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
+ {
+ if (pushable_regs & (1 << regno))
+ {
+ bytes += 2;
+ if (emit)
+ asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
+
+ high_regs_pushed --;
+ real_regs_mask |= (1 << next_hi_reg);
+
+ if (high_regs_pushed)
+ {
+ for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
+ next_hi_reg --)
+ if (live_regs_mask & (1 << next_hi_reg))
+ break;
+ }
+ else
+ {
+ pushable_regs &= ~((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+
+ /* If we had to find a work register and we have not yet
+ saved the LR then add it to the list of regs to push. */
+ if (l_mask == (1 << LR_REGNUM))
+ {
+ bytes += handle_thumb_pushpop
+ (f, pushable_regs | (1 << LR_REGNUM),
+ 1, &cfa_offset,
+ real_regs_mask | (1 << LR_REGNUM), emit);
+ l_mask = 0;
+ }
+ else
+ bytes += handle_thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask, emit);
+ }
+ }
+ /* APPLE LOCAL begin 6465387 exception handling interworking VFP save */
+ if (current_function_has_nonlocal_label && arm_arch6)
+ {
+ bytes += 4;
+ if (emit)
+ {
+ asm_fprintf (f, "\tblx ___save_vfp_d8_d15_regs\n");
+ /* Let the debugger know about the additional space used */
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ /* d8 - d15 save is 64 bytes of space */
+ cfa_offset = cfa_offset + 64;
+ dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
+ }
+ }
+ }
+ /* APPLE LOCAL end 6465387 exception handling interworking VFP save */
+ return bytes;
+}
+
+static void
+/* APPLE LOCAL v7 support. Merge from mainline */
+thumb1_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ (void) handle_thumb_unexpanded_prologue (f, true);
+}
+
+int count_thumb_unexpanded_prologue (void)
+{
+ return handle_thumb_unexpanded_prologue (NULL, false);
+}
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+const char *
+thumb_load_double_from_address (rtx *operands)
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ gcc_assert (GET_CODE (operands[0]) == REG);
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ /* Get the memory address. */
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = adjust_address (operands[1], SImode, 4);
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2", operands);
+ output_asm_insn ("ldr\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1", operands);
+ output_asm_insn ("ldr\t%H0, %2", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+ operands[2] = adjust_address (operands[1], SImode, 4);
+
+ output_asm_insn ("ldr\t%0, %1", operands);
+ output_asm_insn ("ldr\t%H0, %2", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ gcc_assert (GET_CODE (base) == REG);
+
+ /* Catch the case of <address> = <reg> + <reg> */
+ if (GET_CODE (offset) == REG)
+ {
+ /* APPLE LOCAL begin ARM compact switch tables */
+ /* thumb_legitimate_address_p won't allow this form,
+ and allowing a 3-instruction variant confuses
+ our instruction length counts, so remove it.
+ Details in rdar://5435967. */
+ gcc_unreachable();
+ /* APPLE LOCAL end ARM compact switch tables */
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+ operands[2] = adjust_address (operands[1], SImode, 4);
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2", operands);
+ output_asm_insn ("ldr\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1", operands);
+ output_asm_insn ("ldr\t%H0, %2", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value
+ directly. */
+ operands[2] = adjust_address (operands[1], SImode, 4);
+
+ output_asm_insn ("ldr\t%H0, %2", operands);
+ output_asm_insn ("ldr\t%0, %1", operands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return "";
+}
+
+const char *
+thumb_output_move_mem_multiple (int n, rtx *operands)
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[4]) > REGNO (operands[5]))
+ {
+ tmp = operands[4];
+ operands[4] = operands[5];
+ operands[5] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
+ output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[4]) > REGNO (operands[5]))
+ {
+ tmp = operands[4];
+ operands[4] = operands[5];
+ operands[5] = tmp;
+ }
+ if (REGNO (operands[5]) > REGNO (operands[6]))
+ {
+ tmp = operands[5];
+ operands[5] = operands[6];
+ operands[6] = tmp;
+ }
+ if (REGNO (operands[4]) > REGNO (operands[5]))
+ {
+ tmp = operands[4];
+ operands[4] = operands[5];
+ operands[5] = tmp;
+ }
+
+ output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
+ output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return "";
+}
+
+/* Output a call-via instruction for thumb state. */
+const char *
+thumb_call_via_reg (rtx reg)
+{
+ int regno = REGNO (reg);
+ rtx *labelp;
+
+ gcc_assert (regno < LR_REGNUM);
+
+ /* If we are in the normal text section we can use a single instance
+ per compilation unit. If we are doing function sections, then we need
+ an entry per section, since we can't rely on reachability. */
+ if (in_section == text_section)
+ {
+ thumb_call_reg_needed = 1;
+
+ if (thumb_call_via_label[regno] == NULL)
+ thumb_call_via_label[regno] = gen_label_rtx ();
+ labelp = thumb_call_via_label + regno;
+ }
+ else
+ {
+ if (cfun->machine->call_via[regno] == NULL)
+ cfun->machine->call_via[regno] = gen_label_rtx ();
+ labelp = cfun->machine->call_via + regno;
+ }
+
+ output_asm_insn ("bl\t%a0", labelp);
+ return "";
+}
+
+/* Routines for generating rtl. */
+void
+thumb_expand_movmemqi (rtx *operands)
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in, out, in));
+ len -= 12;
+ }
+
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in, out, in));
+ len -= 8;
+ }
+
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
+ emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+void
+thumb_reload_out_hi (rtx *operands)
+{
+ emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
+}
+
+/* Handle reading a half-word from memory during reload. */
+void
+thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
+{
+ gcc_unreachable ();
+}
+
+/* Return the length of a function name prefix
+ that starts with the character 'c'. */
+static int
+arm_get_strip_length (int c)
+{
+ switch (c)
+ {
+ ARM_NAME_ENCODING_LENGTHS
+ default: return 0;
+ }
+}
+
+/* Return a pointer to a function's name with any
+ and all prefix encodings stripped from it. */
+const char *
+arm_strip_name_encoding (const char *name)
+{
+ int skip;
+
+ while ((skip = arm_get_strip_length (* name)))
+ name += skip;
+
+ return name;
+}
+
+/* If there is a '*' anywhere in the name's prefix, then
+ emit the stripped name verbatim, otherwise prepend an
+ underscore if leading underscores are being used. */
+void
+arm_asm_output_labelref (FILE *stream, const char *name)
+{
+ int skip;
+ int verbatim = 0;
+
+ while ((skip = arm_get_strip_length (* name)))
+ {
+ verbatim |= (*name == '*');
+ name += skip;
+ }
+
+ if (verbatim)
+ fputs (name, stream);
+ else
+ asm_fprintf (stream, "%U%s", name);
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+static void
+arm_file_start (void)
+{
+ int val;
+
+ if (TARGET_UNIFIED_ASM)
+ asm_fprintf (asm_out_file, "\t.syntax unified\n");
+
+ if (TARGET_BPABI)
+ {
+ const char *fpu_name;
+ if (arm_select[0].string)
+ asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
+ else if (arm_select[1].string)
+ asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
+ else
+ asm_fprintf (asm_out_file, "\t.cpu %s\n",
+ all_cores[arm_default_cpu].name);
+
+ if (TARGET_SOFT_FLOAT)
+ {
+ if (TARGET_VFP)
+ fpu_name = "softvfp";
+ else
+ fpu_name = "softfpa";
+ }
+ else
+ {
+ int set_float_abi_attributes = 0;
+ switch (arm_fpu_arch)
+ {
+ case FPUTYPE_FPA:
+ fpu_name = "fpa";
+ break;
+ case FPUTYPE_FPA_EMU2:
+ fpu_name = "fpe2";
+ break;
+ case FPUTYPE_FPA_EMU3:
+ fpu_name = "fpe3";
+ break;
+ case FPUTYPE_MAVERICK:
+ fpu_name = "maverick";
+ break;
+ case FPUTYPE_VFP:
+ fpu_name = "vfp";
+ set_float_abi_attributes = 1;
+ break;
+ case FPUTYPE_VFP3:
+ fpu_name = "vfp3";
+ set_float_abi_attributes = 1;
+ break;
+ case FPUTYPE_NEON:
+ fpu_name = "neon";
+ set_float_abi_attributes = 1;
+ break;
+ default:
+ abort();
+ }
+ if (set_float_abi_attributes)
+ {
+ if (TARGET_HARD_FLOAT)
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
+ if (TARGET_HARD_FLOAT_ABI)
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
+ }
+ }
+ asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
+
+ /* Some of these attributes only apply when the corresponding features
+ are used. However we don't have any easy way of figuring this out.
+ Conservatively record the setting that would have been used. */
+
+ /* Tag_ABI_PCS_wchar_t. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
+ (int)WCHAR_TYPE_SIZE / BITS_PER_UNIT);
+
+ /* Tag_ABI_FP_rounding. */
+ if (flag_rounding_math)
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
+ if (!flag_unsafe_math_optimizations)
+ {
+ /* Tag_ABI_FP_denomal. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
+ /* Tag_ABI_FP_exceptions. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
+ }
+ /* Tag_ABI_FP_user_exceptions. */
+ if (flag_signaling_nans)
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
+ /* Tag_ABI_FP_number_model. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
+ flag_finite_math_only ? 1 : 3);
+
+ /* Tag_ABI_align8_needed. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
+ /* Tag_ABI_align8_preserved. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
+ /* Tag_ABI_enum_size. */
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
+ flag_short_enums ? 1 : 2);
+
+ /* Tag_ABI_optimization_goals. */
+ if (optimize_size)
+ val = 4;
+ else if (optimize >= 2)
+ val = 2;
+ else if (optimize)
+ val = 1;
+ else
+ val = 6;
+ asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
+ }
+ /* APPLE LOCAL 6345234 begin place text sections together */
+#if TARGET_MACHO
+ /* Emit declarations for all code sections at the beginning of the file;
+ this keeps them from being separated by data sections, which can
+ lead to out-of-range branches. */
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__text,regular\n");
+ fprintf (asm_out_file, "\t.section __TEXT,__textcoal_nt,coalesced\n");
+ fprintf (asm_out_file, "\t.section __TEXT,__const_coal,coalesced\n");
+ if (MACHO_DYNAMIC_NO_PIC_P )
+ fprintf (asm_out_file,
+ "\t.section __TEXT,__symbol_stub4,symbol_stubs,none,12\n");
+ else
+ fprintf (asm_out_file,
+ "\t.section __TEXT,__picsymbolstub4,symbol_stubs,none,16\n");
+ }
+#endif
+ /* APPLE LOCAL 6345234 end place text sections together */
+ default_file_start();
+}
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+static void
+arm_file_end (void)
+{
+ int regno;
+
+ if (! thumb_call_reg_needed)
+ return;
+
+ switch_to_section (text_section);
+ asm_fprintf (asm_out_file, "\t.code 16\n");
+ ASM_OUTPUT_ALIGN (asm_out_file, 1);
+
+ for (regno = 0; regno < LR_REGNUM; regno++)
+ {
+ rtx label = thumb_call_via_label[regno];
+
+ if (label != 0)
+ {
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (label));
+ asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
+ }
+ }
+}
+
+/* APPLE LOCAL begin ARM asm file hooks */
+#if TARGET_MACHO
+static void
+arm_darwin_file_start (void)
+{
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ arm_file_start();
+ darwin_file_start();
+}
+
+static void
+arm_darwin_file_end (void)
+{
+ darwin_file_end ();
+ arm_file_end ();
+}
+#endif
+/* APPLE LOCAL end ARM asm file hooks */
+
+rtx aof_pic_label;
+
+#ifdef AOF_ASSEMBLER
+/* Special functions only needed when producing AOF syntax assembler. */
+
+struct pic_chain
+{
+ struct pic_chain * next;
+ const char * symname;
+};
+
+static struct pic_chain * aof_pic_chain = NULL;
+
+rtx
+aof_pic_entry (rtx x)
+{
+ struct pic_chain ** chainp;
+ int offset;
+
+ if (aof_pic_label == NULL_RTX)
+ {
+ aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
+ }
+
+ for (offset = 0, chainp = &aof_pic_chain; *chainp;
+ offset += 4, chainp = &(*chainp)->next)
+ if ((*chainp)->symname == XSTR (x, 0))
+ return plus_constant (aof_pic_label, offset);
+
+ *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
+ (*chainp)->next = NULL;
+ (*chainp)->symname = XSTR (x, 0);
+ return plus_constant (aof_pic_label, offset);
+}
+
+void
+aof_dump_pic_table (FILE *f)
+{
+ struct pic_chain * chain;
+
+ if (aof_pic_chain == NULL)
+ return;
+
+ asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
+ PIC_OFFSET_TABLE_REGNUM,
+ PIC_OFFSET_TABLE_REGNUM);
+ fputs ("|x$adcons|\n", f);
+
+ for (chain = aof_pic_chain; chain; chain = chain->next)
+ {
+ fputs ("\tDCD\t", f);
+ assemble_name (f, chain->symname);
+ fputs ("\n", f);
+ }
+}
+
+int arm_text_section_count = 1;
+
+/* A get_unnamed_section callback for switching to the text section. */
+
+static void
+aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ if (flag_pic)
+ fprintf (asm_out_file, ", PIC, REENTRANT");
+ fprintf (asm_out_file, "\n");
+}
+
+static int arm_data_section_count = 1;
+
+/* A get_unnamed_section callback for switching to the data section. */
+
+static void
+aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
+ arm_data_section_count++);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS.
+
+ AOF Assembler syntax is a nightmare when it comes to areas, since once
+ we change from one area to another, we can't go back again. Instead,
+ we must create a new area with the same attributes and add the new output
+ to that. Unfortunately, there is nothing we can do here to guarantee that
+ two areas with the same attributes will be linked adjacently in the
+ resulting executable, so we have to be careful not to do pc-relative
+ addressing across such boundaries. */
+
+static void
+aof_asm_init_sections (void)
+{
+ text_section = get_unnamed_section (SECTION_CODE,
+ aof_output_text_section_asm_op, NULL);
+ data_section = get_unnamed_section (SECTION_WRITE,
+ aof_output_data_section_asm_op, NULL);
+ readonly_data_section = text_section;
+}
+
+void
+zero_init_section (void)
+{
+ static int zero_init_count = 1;
+
+ fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
+ in_section = NULL;
+}
+
+/* The AOF assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare
+ a function as imported near the beginning of the file, and then to
+ export it later on. It is, however, possible to delay the decision
+ until all the functions in the file have been compiled. To get
+ around this, we maintain a list of the imports and exports, and
+ delete from it any that are subsequently defined. At the end of
+ compilation we spit the remainder of the list out before the END
+ directive. */
+
+struct import
+{
+ struct import * next;
+ const char * name;
+};
+
+static struct import * imports_list = NULL;
+
+void
+aof_add_import (const char *name)
+{
+ struct import * new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+aof_delete_import (const char *name)
+{
+ struct import ** old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+int arm_main_function = 0;
+
+static void
+aof_dump_imports (FILE *f)
+{
+ /* The AOF assembler needs this to cause the startup code to be extracted
+ from the library. Brining in __main causes the whole thing to work
+ automagically. */
+ if (arm_main_function)
+ {
+ switch_to_section (text_section);
+ fputs ("\tIMPORT __main\n", f);
+ fputs ("\tDCD __main\n", f);
+ }
+
+ /* Now dump the remaining imports. */
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+
+static void
+aof_globalize_label (FILE *stream, const char *name)
+{
+ default_globalize_label (stream, name);
+ if (! strcmp (name, "main"))
+ arm_main_function = 1;
+}
+
+static void
+aof_file_start (void)
+{
+ fputs ("__r0\tRN\t0\n", asm_out_file);
+ fputs ("__a1\tRN\t0\n", asm_out_file);
+ fputs ("__a2\tRN\t1\n", asm_out_file);
+ fputs ("__a3\tRN\t2\n", asm_out_file);
+ fputs ("__a4\tRN\t3\n", asm_out_file);
+ fputs ("__v1\tRN\t4\n", asm_out_file);
+ fputs ("__v2\tRN\t5\n", asm_out_file);
+ fputs ("__v3\tRN\t6\n", asm_out_file);
+ fputs ("__v4\tRN\t7\n", asm_out_file);
+ fputs ("__v5\tRN\t8\n", asm_out_file);
+ fputs ("__v6\tRN\t9\n", asm_out_file);
+ fputs ("__sl\tRN\t10\n", asm_out_file);
+ fputs ("__fp\tRN\t11\n", asm_out_file);
+ fputs ("__ip\tRN\t12\n", asm_out_file);
+ fputs ("__sp\tRN\t13\n", asm_out_file);
+ fputs ("__lr\tRN\t14\n", asm_out_file);
+ fputs ("__pc\tRN\t15\n", asm_out_file);
+ fputs ("__f0\tFN\t0\n", asm_out_file);
+ fputs ("__f1\tFN\t1\n", asm_out_file);
+ fputs ("__f2\tFN\t2\n", asm_out_file);
+ fputs ("__f3\tFN\t3\n", asm_out_file);
+ fputs ("__f4\tFN\t4\n", asm_out_file);
+ fputs ("__f5\tFN\t5\n", asm_out_file);
+ fputs ("__f6\tFN\t6\n", asm_out_file);
+ fputs ("__f7\tFN\t7\n", asm_out_file);
+ switch_to_section (text_section);
+}
+
+static void
+aof_file_end (void)
+{
+ if (flag_pic)
+ aof_dump_pic_table (asm_out_file);
+ arm_file_end ();
+ aof_dump_imports (asm_out_file);
+ fputs ("\tEND\n", asm_out_file);
+}
+#endif /* AOF_ASSEMBLER */
+
+/* APPLE LOCAL ARM darwin section_info */
+#if !defined(ARM_PE) && !TARGET_MACHO
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+
+static void
+arm_encode_section_info (tree decl, rtx rtl, int first)
+{
+ /* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+ if (optimize > 0 && TREE_CONSTANT (decl))
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+#endif
+
+ /* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag. */
+ if (first && DECL_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
+ else if (! TREE_PUBLIC (decl))
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
+ }
+
+ default_encode_section_info (decl, rtl, first);
+}
+/* APPLE LOCAL begin ARM darwin section_info */
+#endif /* !ARM_PE && !TARGET_MACHO*/
+
+#if TARGET_MACHO
+/* Encode the standard darwin attributes, plus the longcall flag. */
+
+static void
+arm_darwin_encode_section_info (tree decl, rtx rtl, int first)
+{
+ darwin_encode_section_info (decl, rtl, first);
+
+ if (optimize > 0 && TREE_CONSTANT (decl))
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+
+ /* If we are referencing a function with default visibility that is
+ weak then encode a long call flag in the function name, otherwise
+ if the function is static or or known to be defined in this file
+ then encode a short call flag. */
+ if (DECL_P (decl))
+ {
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_WEAK (decl)
+ && DECL_VISIBILITY (decl) == VISIBILITY_DEFAULT)
+ arm_encode_call_attribute (decl, SYMBOL_LONG_CALL);
+ /* Should this be binds_local_p??? */
+ else if (! TREE_PUBLIC (decl))
+ arm_encode_call_attribute (decl, SYMBOL_SHORT_CALL);
+ }
+}
+#endif
+/* APPLE LOCAL end ARM darwin section_info */
+
+static void
+arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
+{
+ if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
+ && !strcmp (prefix, "L"))
+ {
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+ default_internal_label (stream, prefix, labelno);
+}
+
+/* Output code to add DELTA to the first argument, and then jump
+ to FUNCTION. Used for C++ multiple inheritance. */
+static void
+arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
+ tree function)
+{
+ /* APPLE LOCAL begin ARM 4620953 4745175 5920116 */
+ static int thunk_label = 0;
+ char label[256];
+ char labelpc[256];
+ int mi_delta = delta;
+ const char *const mi_op = mi_delta < 0 ? "sub" : "add";
+ int shift = 0;
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
+ ? 1 : 0);
+ rtx function_rtx = XEXP (DECL_RTL (function), 0);
+ const char *function_name;
+ bool is_longcall = arm_is_longcall_p (function_rtx,
+ SYMBOL_REF_FLAGS (function_rtx),
+ 1);
+ bool is_indirected = false;
+
+
+ /* Darwin/mach-o: use a stub for dynamic references. */
+#if TARGET_MACHO
+ if (TARGET_MACHO
+ && MACHOPIC_INDIRECT
+ && (! machopic_data_defined_p (function_rtx)))
+ {
+ function_name = machopic_indirection_name (function_rtx, !is_longcall);
+ /* APPLE LOCAL 6858124 don't indirect if it's just a stub */
+ is_indirected = is_longcall;
+ }
+ else
+#endif
+ function_name = XSTR (function_rtx, 0);
+
+ if (mi_delta < 0)
+ mi_delta = - mi_delta;
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* APPLE LOCAL 6361608 begin Thumb-2 longcall thunks */
+ /* When generating 16-bit thumb-1 code, thunks are entered in arm mode.
+ In thumb-2, thunks can be in thumb mode. */
+ /* APPLE LOCAL 6361608 end Thumb-2 longcall thunks */
+ if (TARGET_THUMB1 || is_longcall)
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ {
+ int labelno = thunk_label++;
+ ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
+ fputs ("\tldr\tr12, ", file);
+ assemble_name (file, label);
+ fputc ('\n', file);
+ if (flag_pic)
+ {
+ /* If we are generating PIC, the ldr instruction below loads
+ "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
+ the address of the add + 8, so we have:
+
+ r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
+ = target + 1.
+
+ Note that we have "+ 1" because some versions of GNU ld
+ don't set the low bit of the result for R_ARM_REL32
+ relocations against thumb function symbols. */
+ ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
+ assemble_name (file, labelpc);
+ fputs (":\n", file);
+ fputs ("\tadd\tr12, pc, r12\n", file);
+ }
+ if (is_indirected)
+ fputs ("\tldr\tr12, [r12]\n", file);
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* TODO: Use movw/movt for large constants when available. */
+ while (mi_delta != 0)
+ {
+ if ((mi_delta & (3 << shift)) == 0)
+ shift += 2;
+ else
+ {
+ asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
+ mi_op, this_regno, this_regno,
+ mi_delta & (0xff << shift));
+ mi_delta &= ~(0xff << shift);
+ shift += 8;
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1 || is_longcall)
+ {
+ fprintf (file, "\tbx\tr12\n");
+ ASM_OUTPUT_ALIGN (file, 2);
+ assemble_name (file, label);
+ fputs (":\n", file);
+ if (flag_pic)
+ {
+ /* APPLE LOCAL 6361608 begin Thumb-2 longcall thunks */
+ int pc_offset;
+ /* If we're branching to a local Thumb routine, output:
+ ".word .LTHUNKn-7-.LTHUNKPCn".
+ Otherwise, output:
+ ".word .LTHUNKn-8-.LTHUNKPCn".
+ (inter-module thumbness is fixed up by the linker).
+ If we're in a Thumb2 thunk, it's -4 and -3, respectively. */
+ rtx tem = gen_rtx_SYMBOL_REF (Pmode, function_name);
+
+ /* Thumb2 add instructions w/ PC source have a +4 bias. ARM
+ mode has +8. */
+ pc_offset = TARGET_THUMB2 ? -4 : -8;
+ if (TARGET_MACHO && (TARGET_ARM || is_indirected))
+ tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (pc_offset));
+ else
+ tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (pc_offset+1));
+ /* APPLE LOCAL 6361608 end Thumb-2 longcall thunks */
+
+ tem = gen_rtx_MINUS (GET_MODE (tem),
+ tem,
+ gen_rtx_SYMBOL_REF (Pmode,
+ ggc_strdup (labelpc)));
+ assemble_integer (tem, 4, BITS_PER_WORD, 1);
+ }
+ else
+ /* Output ".word .LTHUNKn". */
+ assemble_integer (gen_rtx_SYMBOL_REF (Pmode, function_name),
+ 4, BITS_PER_WORD, 1);
+ }
+ else
+ {
+ /* APPLE LOCAL begin 6297258 */
+ if (TARGET_THUMB2)
+ fputs ("\tb.w\t", file);
+ else
+ fputs ("\tb\t", file);
+ /* APPLE LOCAL end 6297258 */
+
+ assemble_name (file, function_name);
+ if (NEED_PLT_RELOC)
+ fputs ("(PLT)", file);
+ fputc ('\n', file);
+ }
+ /* APPLE LOCAL end ARM 4620953 4745175 5920116 */
+}
+
+int
+arm_emit_vector_const (FILE *file, rtx x)
+{
+ int i;
+ const char * pattern;
+
+ gcc_assert (GET_CODE (x) == CONST_VECTOR);
+
+ switch (GET_MODE (x))
+ {
+ case V2SImode: pattern = "%08x"; break;
+ case V4HImode: pattern = "%04x"; break;
+ case V8QImode: pattern = "%02x"; break;
+ default: gcc_unreachable ();
+ }
+
+ fprintf (file, "0x");
+ for (i = CONST_VECTOR_NUNITS (x); i--;)
+ {
+ rtx element;
+
+ element = CONST_VECTOR_ELT (x, i);
+ fprintf (file, pattern, INTVAL (element));
+ }
+
+ return 1;
+}
+
+const char *
+arm_output_load_gr (rtx *operands)
+{
+ rtx reg;
+ rtx offset;
+ rtx wcgr;
+ rtx sum;
+
+ if (GET_CODE (operands [1]) != MEM
+ || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
+ || GET_CODE (reg = XEXP (sum, 0)) != REG
+ || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
+ || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
+ return "wldrw%?\t%0, %1";
+
+ /* Fix up an out-of-range load of a GR register. */
+ output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
+ wcgr = operands[0];
+ operands[0] = reg;
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+
+ operands[0] = wcgr;
+ operands[1] = reg;
+ output_asm_insn ("tmcr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
+
+ return "";
+}
+
+/* Worker function for TARGET_SETUP_INCOMING_VARARGS.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+
+static void
+arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ int *pretend_size,
+ int second_time ATTRIBUTE_UNUSED)
+{
+ cfun->machine->uses_anonymous_args = 1;
+ if (cum->nregs < NUM_ARG_REGS)
+ *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
+}
+
+/* Return nonzero if the CONSUMER instruction (a store) does not need
+ PRODUCER's value to calculate the address. */
+
+int
+arm_no_early_store_addr_dep (rtx producer, rtx consumer)
+{
+ rtx value = PATTERN (producer);
+ rtx addr = PATTERN (consumer);
+
+ if (GET_CODE (value) == COND_EXEC)
+ value = COND_EXEC_CODE (value);
+ if (GET_CODE (value) == PARALLEL)
+ value = XVECEXP (value, 0, 0);
+ value = XEXP (value, 0);
+ if (GET_CODE (addr) == COND_EXEC)
+ addr = COND_EXEC_CODE (addr);
+ if (GET_CODE (addr) == PARALLEL)
+ addr = XVECEXP (addr, 0, 0);
+ addr = XEXP (addr, 0);
+
+ return !reg_overlap_mentioned_p (value, addr);
+}
+
+/* Return nonzero if the CONSUMER instruction (an ALU op) does not
+ have an early register shift value or amount dependency on the
+ result of PRODUCER. */
+
+int
+arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
+{
+ rtx value = PATTERN (producer);
+ rtx op = PATTERN (consumer);
+ rtx early_op;
+
+ if (GET_CODE (value) == COND_EXEC)
+ value = COND_EXEC_CODE (value);
+ if (GET_CODE (value) == PARALLEL)
+ value = XVECEXP (value, 0, 0);
+ value = XEXP (value, 0);
+ if (GET_CODE (op) == COND_EXEC)
+ op = COND_EXEC_CODE (op);
+ if (GET_CODE (op) == PARALLEL)
+ op = XVECEXP (op, 0, 0);
+ op = XEXP (op, 1);
+
+ early_op = XEXP (op, 0);
+ /* This is either an actual independent shift, or a shift applied to
+ the first operand of another operation. We want the whole shift
+ operation. */
+ if (GET_CODE (early_op) == REG)
+ early_op = op;
+
+ return !reg_overlap_mentioned_p (value, early_op);
+}
+
+/* Return nonzero if the CONSUMER instruction (an ALU op) does not
+ have an early register shift value dependency on the result of
+ PRODUCER. */
+
+int
+arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
+{
+ rtx value = PATTERN (producer);
+ rtx op = PATTERN (consumer);
+ rtx early_op;
+
+ if (GET_CODE (value) == COND_EXEC)
+ value = COND_EXEC_CODE (value);
+ if (GET_CODE (value) == PARALLEL)
+ value = XVECEXP (value, 0, 0);
+ value = XEXP (value, 0);
+ if (GET_CODE (op) == COND_EXEC)
+ op = COND_EXEC_CODE (op);
+ if (GET_CODE (op) == PARALLEL)
+ op = XVECEXP (op, 0, 0);
+ op = XEXP (op, 1);
+
+ early_op = XEXP (op, 0);
+
+ /* This is either an actual independent shift, or a shift applied to
+ the first operand of another operation. We want the value being
+ shifted, in either case. */
+ if (GET_CODE (early_op) != REG)
+ early_op = XEXP (early_op, 0);
+
+ return !reg_overlap_mentioned_p (value, early_op);
+}
+
+/* Return nonzero if the CONSUMER (a mul or mac op) does not
+ have an early register mult dependency on the result of
+ PRODUCER. */
+
+int
+arm_no_early_mul_dep (rtx producer, rtx consumer)
+{
+ rtx value = PATTERN (producer);
+ rtx op = PATTERN (consumer);
+
+ if (GET_CODE (value) == COND_EXEC)
+ value = COND_EXEC_CODE (value);
+ if (GET_CODE (value) == PARALLEL)
+ value = XVECEXP (value, 0, 0);
+ value = XEXP (value, 0);
+ if (GET_CODE (op) == COND_EXEC)
+ op = COND_EXEC_CODE (op);
+ if (GET_CODE (op) == PARALLEL)
+ op = XVECEXP (op, 0, 0);
+ op = XEXP (op, 1);
+
+ return (GET_CODE (op) == PLUS
+ && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Return non-zero if the consumer (a multiply-accumulate instruction)
+ has an accumulator dependency on the result of the producer (a
+ multiplication instruction) and no other dependency on that result. */
+int
+arm_mac_accumulator_is_mul_result (rtx producer, rtx consumer)
+{
+ rtx mul = PATTERN (producer);
+ rtx mac = PATTERN (consumer);
+ rtx mul_result;
+ rtx mac_op0, mac_op1, mac_acc;
+
+ if (GET_CODE (mul) == COND_EXEC)
+ mul = COND_EXEC_CODE (mul);
+ if (GET_CODE (mac) == COND_EXEC)
+ mac = COND_EXEC_CODE (mac);
+
+ /* Check that mul is of the form (set (...) (mult ...))
+ and mla is of the form (set (...) (plus (mult ...) (...))). */
+ if ((GET_CODE (mul) != SET || GET_CODE (XEXP (mul, 1)) != MULT)
+ || (GET_CODE (mac) != SET || GET_CODE (XEXP (mac, 1)) != PLUS
+ || GET_CODE (XEXP (XEXP (mac, 1), 0)) != MULT))
+ return 0;
+
+ mul_result = XEXP (mul, 0);
+ mac_op0 = XEXP (XEXP (XEXP (mac, 1), 0), 0);
+ mac_op1 = XEXP (XEXP (XEXP (mac, 1), 0), 1);
+ mac_acc = XEXP (XEXP (mac, 1), 1);
+
+ return (reg_overlap_mentioned_p (mul_result, mac_acc)
+ && !reg_overlap_mentioned_p (mul_result, mac_op0)
+ && !reg_overlap_mentioned_p (mul_result, mac_op1));
+}
+
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* We can't rely on the caller doing the proper promotion when
+ using APCS or ATPCS. */
+
+static bool
+arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
+{
+ return !TARGET_AAPCS_BASED;
+}
+
+
+/* AAPCS based ABIs use short enums by default. */
+
+static bool
+arm_default_short_enums (void)
+{
+ return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
+}
+
+
+/* AAPCS requires that anonymous bitfields affect structure alignment. */
+
+static bool
+arm_align_anon_bitfield (void)
+{
+ return TARGET_AAPCS_BASED;
+}
+
+
+/* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
+
+static tree
+arm_cxx_guard_type (void)
+{
+ return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
+}
+
+
+/* The EABI says test the least significant bit of a guard variable. */
+
+static bool
+arm_cxx_guard_mask_bit (void)
+{
+ return TARGET_AAPCS_BASED;
+}
+
+
+/* The EABI specifies that all array cookies are 8 bytes long. */
+
+static tree
+arm_get_cookie_size (tree type)
+{
+ tree size;
+
+ if (!TARGET_AAPCS_BASED)
+ return default_cxx_get_cookie_size (type);
+
+ size = build_int_cst (sizetype, 8);
+ return size;
+}
+
+
+/* The EABI says that array cookies should also contain the element size. */
+
+static bool
+arm_cookie_has_size (void)
+{
+ return TARGET_AAPCS_BASED;
+}
+
+
+/* The EABI says constructors and destructors should return a pointer to
+ the object constructed/destroyed. */
+
+static bool
+arm_cxx_cdtor_returns_this (void)
+{
+ return TARGET_AAPCS_BASED;
+}
+
+/* The EABI says that an inline function may never be the key
+ method. */
+
+static bool
+arm_cxx_key_method_may_be_inline (void)
+{
+ return !TARGET_AAPCS_BASED;
+}
+
+static void
+arm_cxx_determine_class_data_visibility (tree decl)
+{
+ if (!TARGET_AAPCS_BASED)
+ return;
+
+ /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
+ is exported. However, on systems without dynamic vague linkage,
+ \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
+ if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
+ DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
+ else
+ DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
+ DECL_VISIBILITY_SPECIFIED (decl) = 1;
+}
+
+static bool
+arm_cxx_class_data_always_comdat (void)
+{
+/* APPLE LOCAL begin ARM follow Darwin semantics on Darwin */
+#if TARGET_MACHO
+ return false;
+#endif
+/* APPLE LOCAL end ARM follow Darwin semantics on Darwin */
+
+ /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
+ vague linkage if the class has no key function. */
+ return !TARGET_AAPCS_BASED;
+}
+
+
+/* The EABI says __aeabi_atexit should be used to register static
+ destructors. */
+
+static bool
+arm_cxx_use_aeabi_atexit (void)
+{
+ return TARGET_AAPCS_BASED;
+}
+
+
+void
+arm_set_return_address (rtx source, rtx scratch)
+{
+ arm_stack_offsets *offsets;
+ HOST_WIDE_INT delta;
+ rtx addr;
+ unsigned long saved_regs;
+
+ saved_regs = arm_compute_save_reg_mask ();
+
+ if ((saved_regs & (1 << LR_REGNUM)) == 0)
+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
+ else
+ {
+ if (frame_pointer_needed)
+ /* APPLE LOCAL ARM custom frame layout */
+ addr = plus_constant(hard_frame_pointer_rtx, 4);
+ else
+ {
+ /* LR will be the first saved register. */
+ offsets = arm_get_frame_offsets ();
+ /* APPLE LOCAL ARM custom frame layout */
+ delta = offsets->outgoing_args - (offsets->frame - 4);
+
+
+ if (delta >= 4096)
+ {
+ emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
+ GEN_INT (delta & ~4095)));
+ addr = scratch;
+ delta &= 4095;
+ }
+ else
+ addr = stack_pointer_rtx;
+
+ addr = plus_constant (addr, delta);
+ }
+ emit_move_insn (gen_frame_mem (Pmode, addr), source);
+ }
+}
+
+
+void
+thumb_set_return_address (rtx source, rtx scratch)
+{
+ arm_stack_offsets *offsets;
+ HOST_WIDE_INT delta;
+ int reg;
+ rtx addr;
+ unsigned long mask;
+
+ emit_insn (gen_rtx_USE (VOIDmode, source));
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ mask = thumb1_compute_save_reg_mask ();
+ if (mask & (1 << LR_REGNUM))
+ {
+ offsets = arm_get_frame_offsets ();
+
+ /* Find the saved regs. */
+ if (frame_pointer_needed)
+ {
+ /* APPLE LOCAL ARM custom frame layout */
+ delta = 4;
+ reg = THUMB_HARD_FRAME_POINTER_REGNUM;
+ }
+ else
+ {
+ /* APPLE LOCAL ARM custom frame layout */
+ delta = offsets->outgoing_args - (offsets->saved_args + 4);
+ reg = SP_REGNUM;
+ }
+ /* Allow for the stack frame. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1 && TARGET_BACKTRACE)
+ delta -= 16;
+ /* APPLE LOCAL ARM custom frame layout */
+ /* Removed lines. */
+
+ /* Construct the address. */
+ addr = gen_rtx_REG (SImode, reg);
+ if ((reg != SP_REGNUM && delta >= 128)
+ || delta >= 1024)
+ {
+ emit_insn (gen_movsi (scratch, GEN_INT (delta)));
+ emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
+ addr = scratch;
+ }
+ else
+ addr = plus_constant (addr, delta);
+
+ emit_move_insn (gen_frame_mem (Pmode, addr), source);
+ }
+ else
+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
+}
+
+/* Implements target hook vector_mode_supported_p. */
+bool
+arm_vector_mode_supported_p (enum machine_mode mode)
+{
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ /* Neon also supports V2SImode, etc. listed in the clause below. */
+ if (TARGET_NEON && (mode == V2SFmode || mode == V4SImode || mode == V8HImode
+ || mode == V16QImode || mode == V4SFmode || mode == V2DImode))
+ return true;
+
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if ((mode == V2SImode)
+ || (mode == V4HImode)
+ || (mode == V8QImode))
+ return true;
+
+ return false;
+}
+
+/* APPLE LOCAL begin 7083296 Build without warnings. */
+/* Define a separate function to avoid build warnings about missing a
+ prototype for arm_vector_mode_supported_p. The MODE argument is an int
+ because arm.h is used in contexts where "enum machine_mode" is not
+ defined. The return type is "int" instead of "bool" for the same reason. */
+int
+valid_iwmmxt_reg_mode (int mode)
+{
+ return (arm_vector_mode_supported_p (mode) || mode == DImode);
+}
+/* APPLE LOCAL end 7083296 Build without warnings. */
+
+/* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
+ ARM insns and therefore guarantee that the shift count is modulo 256.
+ DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
+ guarantee no particular behavior for out-of-range counts. */
+
+static unsigned HOST_WIDE_INT
+arm_shift_truncation_mask (enum machine_mode mode)
+{
+ return mode == SImode ? 255 : 0;
+}
+
+
+/* Map internal gcc register numbers to DWARF2 register numbers. */
+
+unsigned int
+arm_dbx_register_number (unsigned int regno)
+{
+ if (regno < 16)
+ return regno;
+
+ /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
+ compatibility. The EABI defines them as registers 96-103. */
+ if (IS_FPA_REGNUM (regno))
+ return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
+
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ if (IS_VFP_REGNUM (regno))
+ {
+ /* See comment in arm_dwarf_register_span. */
+ if (VFP_REGNO_OK_FOR_SINGLE (regno))
+ /* APPLE LOCAL ARM 5757769 */
+ return 256 + regno - FIRST_VFP_REGNUM;
+ else
+ return 256 + (regno - FIRST_VFP_REGNUM) / 2;
+ }
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+ if (IS_IWMMXT_GR_REGNUM (regno))
+ return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
+
+ if (IS_IWMMXT_REGNUM (regno))
+ return 112 + regno - FIRST_IWMMXT_REGNUM;
+
+ gcc_unreachable ();
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Dwarf models VFPv3 registers as 32 64-bit registers.
+ GCC models tham as 64 32-bit registers, so we need to describe this to
+ the DWARF generation code. Other registers can use the default. */
+static rtx
+arm_dwarf_register_span(rtx rtl)
+{
+ unsigned regno;
+ int nregs;
+ int i;
+ rtx p;
+
+ regno = REGNO (rtl);
+ if (!IS_VFP_REGNUM (regno))
+ return NULL_RTX;
+
+ /* The EABI defines two VFP register ranges:
+ 64-95: Legacy VFPv2 numbering for S0-S31 (obsolescent)
+ 256-287: D0-D31
+ The recommended encodings for s0-s31 is a DW_OP_bit_piece of the
+ corresponding D register. However gdb6.6 does not support this, so
+ we use the legacy encodings. We also use these encodings for D0-D15
+ for compatibility with older debuggers. */
+ if (VFP_REGNO_OK_FOR_SINGLE (regno))
+ return NULL_RTX;
+
+ nregs = GET_MODE_SIZE (GET_MODE (rtl)) / 8;
+ p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(nregs));
+ regno = (regno - FIRST_VFP_REGNUM) / 2;
+ for (i = 0; i < nregs; i++)
+ XVECEXP (p, 0, i) = gen_rtx_REG (DImode, 256 + regno + i);
+
+ return p;
+}
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+#ifdef TARGET_UNWIND_INFO
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Emit unwind directives for a store-multiple instruction or stack pointer
+ push during alignment.
+ These should only ever be generated by the function prologue code, so
+ expect them to have a particular form. */
+
+static void
+arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+{
+ int i;
+ HOST_WIDE_INT offset;
+ HOST_WIDE_INT nregs;
+ int reg_size;
+ unsigned reg;
+ unsigned lastreg;
+ rtx e;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ e = XVECEXP (p, 0, 0);
+ if (GET_CODE (e) != SET)
+ abort ();
+
+ /* First insn will adjust the stack pointer. */
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ if (GET_CODE (e) != SET
+ || GET_CODE (XEXP (e, 0)) != REG
+ || REGNO (XEXP (e, 0)) != SP_REGNUM
+ || GET_CODE (XEXP (e, 1)) != PLUS)
+ abort ();
+
+ offset = -INTVAL (XEXP (XEXP (e, 1), 1));
+ nregs = XVECLEN (p, 0) - 1;
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
+ if (reg < 16)
+ {
+ /* The function prologue may also push pc, but not annotate it as it is
+ never restored. We turn this into a stack pointer adjustment. */
+ if (nregs * 4 == offset - 4)
+ {
+ fprintf (asm_out_file, "\t.pad #4\n");
+ offset -= 4;
+ }
+ reg_size = 4;
+ fprintf (asm_out_file, "\t.save {");
+ }
+ else if (IS_VFP_REGNUM (reg))
+ {
+ reg_size = 8;
+ fprintf (asm_out_file, "\t.vsave {");
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
+ {
+ /* FPA registers are done differently. */
+ asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
+ return;
+ }
+ else
+ /* Unknown register type. */
+ abort ();
+
+ /* If the stack increment doesn't match the size of the saved registers,
+ something has gone horribly wrong. */
+ if (offset != nregs * reg_size)
+ abort ();
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* Removed lines */
+ offset = 0;
+ lastreg = 0;
+ /* The remaining insns will describe the stores. */
+ for (i = 1; i <= nregs; i++)
+ {
+ /* Expect (set (mem <addr>) (reg)).
+ Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
+ e = XVECEXP (p, 0, i);
+ if (GET_CODE (e) != SET
+ || GET_CODE (XEXP (e, 0)) != MEM
+ || GET_CODE (XEXP (e, 1)) != REG)
+ abort ();
+
+ reg = REGNO (XEXP (e, 1));
+ if (reg < lastreg)
+ abort ();
+
+ if (i != 1)
+ fprintf (asm_out_file, ", ");
+ /* We can't use %r for vfp because we need to use the
+ double precision register names. */
+ if (IS_VFP_REGNUM (reg))
+ asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
+ else
+ asm_fprintf (asm_out_file, "%r", reg);
+
+#ifdef ENABLE_CHECKING
+ /* Check that the addresses are consecutive. */
+ e = XEXP (XEXP (e, 0), 0);
+ if (GET_CODE (e) == PLUS)
+ {
+ offset += reg_size;
+ if (GET_CODE (XEXP (e, 0)) != REG
+ || REGNO (XEXP (e, 0)) != SP_REGNUM
+ || GET_CODE (XEXP (e, 1)) != CONST_INT
+ || offset != INTVAL (XEXP (e, 1)))
+ abort ();
+ }
+ else if (i != 1
+ || GET_CODE (e) != REG
+ || REGNO (e) != SP_REGNUM)
+ abort ();
+#endif
+ }
+ fprintf (asm_out_file, "}\n");
+}
+
+/* Emit unwind directives for a SET. */
+
+static void
+arm_unwind_emit_set (FILE * asm_out_file, rtx p)
+{
+ rtx e0;
+ rtx e1;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ unsigned reg;
+
+ e0 = XEXP (p, 0);
+ e1 = XEXP (p, 1);
+ switch (GET_CODE (e0))
+ {
+ case MEM:
+ /* Pushing a single register. */
+ if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
+ || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
+ || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
+ abort ();
+
+ asm_fprintf (asm_out_file, "\t.save ");
+ if (IS_VFP_REGNUM (REGNO (e1)))
+ asm_fprintf(asm_out_file, "{d%d}\n",
+ (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
+ else
+ asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
+ break;
+
+ case REG:
+ if (REGNO (e0) == SP_REGNUM)
+ {
+ /* A stack increment. */
+ if (GET_CODE (e1) != PLUS
+ || GET_CODE (XEXP (e1, 0)) != REG
+ || REGNO (XEXP (e1, 0)) != SP_REGNUM
+ || GET_CODE (XEXP (e1, 1)) != CONST_INT)
+ abort ();
+
+ asm_fprintf (asm_out_file, "\t.pad #%wd\n",
+ -INTVAL (XEXP (e1, 1)));
+ }
+ else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
+ {
+ HOST_WIDE_INT offset;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ /* moved 'reg' to function level scope */
+
+ if (GET_CODE (e1) == PLUS)
+ {
+ if (GET_CODE (XEXP (e1, 0)) != REG
+ || GET_CODE (XEXP (e1, 1)) != CONST_INT)
+ abort ();
+ reg = REGNO (XEXP (e1, 0));
+ offset = INTVAL (XEXP (e1, 1));
+ asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
+ HARD_FRAME_POINTER_REGNUM, reg,
+ INTVAL (XEXP (e1, 1)));
+ }
+ else if (GET_CODE (e1) == REG)
+ {
+ reg = REGNO (e1);
+ asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
+ HARD_FRAME_POINTER_REGNUM, reg);
+ }
+ else
+ abort ();
+ }
+ else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
+ {
+ /* Move from sp to reg. */
+ asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
+ }
+ else if (GET_CODE (e1) == PLUS
+ && GET_CODE (XEXP (e1, 0)) == REG
+ && REGNO (XEXP (e1, 0)) == SP_REGNUM
+ && GET_CODE (XEXP (e1, 1)) == CONST_INT)
+ {
+ /* Set reg to offset from sp. */
+ asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
+ REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
+ }
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (GET_CODE (e1) == UNSPEC && XINT (e1, 1) == UNSPEC_STACK_ALIGN)
+ {
+ /* Stack pointer save before alignment. */
+ reg = REGNO (e0);
+ asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n",
+ reg + 0x90, reg);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ else
+ abort ();
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+
+/* Emit unwind directives for the given insn. */
+
+static void
+arm_unwind_emit (FILE * asm_out_file, rtx insn)
+{
+ rtx pat;
+
+ if (!ARM_EABI_UNWIND_TABLES)
+ return;
+
+ if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
+ return;
+
+ pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
+ if (pat)
+ pat = XEXP (pat, 0);
+ else
+ pat = PATTERN (insn);
+
+ switch (GET_CODE (pat))
+ {
+ case SET:
+ arm_unwind_emit_set (asm_out_file, pat);
+ break;
+
+ case SEQUENCE:
+ /* Store multiple. */
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ arm_unwind_emit_sequence (asm_out_file, pat);
+ break;
+
+ default:
+ abort();
+ }
+}
+
+
+/* Output a reference from a function exception table to the type_info
+ object X. The EABI specifies that the symbol should be relocated by
+ an R_ARM_TARGET2 relocation. */
+
+static bool
+arm_output_ttype (rtx x)
+{
+ fputs ("\t.word\t", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ /* Use special relocations for symbol references. */
+ if (GET_CODE (x) != CONST_INT)
+ fputs ("(TARGET2)", asm_out_file);
+ fputc ('\n', asm_out_file);
+
+ return TRUE;
+}
+#endif /* TARGET_UNWIND_INFO */
+
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Handle UNSPEC DWARF call frame instructions. These are needed for dynamic
+ stack alignment. */
+
+static void
+arm_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
+{
+ rtx unspec = SET_SRC (pattern);
+ gcc_assert (GET_CODE (unspec) == UNSPEC);
+
+ switch (index)
+ {
+ case UNSPEC_STACK_ALIGN:
+ /* ??? We should set the CFA = (SP & ~7). At this point we haven't
+ put anything on the stack, so hopefully it won't matter.
+ CFA = SP will be correct after alignment. */
+ dwarf2out_reg_save_reg (label, stack_pointer_rtx,
+ SET_DEST (pattern));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Output unwind directives for the start/end of a function. */
+
+void
+arm_output_fn_unwind (FILE * f, bool prologue)
+{
+ if (!ARM_EABI_UNWIND_TABLES)
+ return;
+
+ if (prologue)
+ fputs ("\t.fnstart\n", f);
+ else
+ fputs ("\t.fnend\n", f);
+}
+
+static bool
+arm_emit_tls_decoration (FILE *fp, rtx x)
+{
+ enum tls_reloc reloc;
+ rtx val;
+
+ val = XVECEXP (x, 0, 0);
+ reloc = INTVAL (XVECEXP (x, 0, 1));
+
+ output_addr_const (fp, val);
+
+ switch (reloc)
+ {
+ case TLS_GD32:
+ fputs ("(tlsgd)", fp);
+ break;
+ case TLS_LDM32:
+ fputs ("(tlsldm)", fp);
+ break;
+ case TLS_LDO32:
+ fputs ("(tlsldo)", fp);
+ break;
+ case TLS_IE32:
+ fputs ("(gottpoff)", fp);
+ break;
+ case TLS_LE32:
+ fputs ("(tpoff)", fp);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (reloc)
+ {
+ case TLS_GD32:
+ case TLS_LDM32:
+ case TLS_IE32:
+ fputs (" + (. - ", fp);
+ output_addr_const (fp, XVECEXP (x, 0, 2));
+ fputs (" - ", fp);
+ output_addr_const (fp, XVECEXP (x, 0, 3));
+ fputc (')', fp);
+ break;
+ default:
+ break;
+ }
+
+ return TRUE;
+}
+
+bool
+arm_output_addr_const_extra (FILE *fp, rtx x)
+{
+ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
+ return arm_emit_tls_decoration (fp, x);
+ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
+ {
+ char label[256];
+ int labelno = INTVAL (XVECEXP (x, 0, 0));
+
+ ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
+ assemble_name_raw (fp, label);
+
+ return TRUE;
+ }
+ else if (GET_CODE (x) == CONST_VECTOR)
+ return arm_emit_vector_const (fp, x);
+
+ return FALSE;
+}
+
+/* APPLE LOCAL begin ARM darwin local binding */
+#if TARGET_MACHO
+/* Cross-module name binding. Darwin does not support overriding
+ functions at dynamic-link time. */
+
+static bool
+arm_binds_local_p (tree decl)
+{
+ return default_binds_local_p_1 (decl,
+ flag_apple_kext && lang_hooks.vtable_p (decl));
+}
+#endif
+/* APPLE LOCAL end ARM darwin local binding */
+
+/* APPLE LOCAL begin ARM setjmp/longjmp interworking */
+static rtx
+arm_builtin_setjmp_frame_value (void)
+{
+ static rtx arm_hard_frame_pointer_rtx;
+ if (! arm_hard_frame_pointer_rtx)
+ arm_hard_frame_pointer_rtx =
+ gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM);
+ return arm_hard_frame_pointer_rtx;
+}
+/* APPLE LOCAL end ARM setjmp/longjmp interworking */
+
+/* APPLE LOCAL begin ARM optimization pragmas */
+/* Version of the above for use from #pragma optimization_level. Only
+ per-function flags are reset. */
+#if TARGET_MACHO
+void
+reset_optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
+{
+}
+#endif
+/* APPLE LOCAL end ARM optimization pragmas */
+
+/* APPLE LOCAL begin ARM pic support */
+#ifdef OBJECT_FORMAT_MACHO
+
+/* Generate PIC and indirect symbol stubs. */
+
+void
+machopic_output_stub (FILE *file, const char *symb, const char *stub)
+{
+ unsigned int length;
+ char *symbol_name, *lazy_ptr_name, *slp_label_name;
+ static int label = 0;
+
+ /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
+ symb = (*targetm.strip_name_encoding) (symb);
+
+ length = strlen (symb);
+ symbol_name = alloca (length + 32);
+ GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
+
+ lazy_ptr_name = alloca (length + 32);
+ GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
+
+ slp_label_name = alloca (length + 32);
+ GEN_SUFFIXED_NAME_FOR_SYMBOL (slp_label_name, symb, length, "$slp");
+
+ if (flag_pic == 2)
+ switch_to_section (darwin_sections[machopic_picsymbol_stub4_section]);
+ else
+ switch_to_section (darwin_sections[machopic_symbol_stub4_section]);
+
+ fprintf (file, "\t.align 2\n");
+
+ if (TARGET_THUMB)
+ fprintf (file, "\t.code 32\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+ fprintf (file, "\tldr\tip, %s\n", slp_label_name);
+
+ label++;
+
+ if (flag_pic == 2)
+ fprintf (file, "L%d$scv:\tadd\tip, pc, ip\n", label);
+
+ fprintf (file, "\tldr\tpc, [ip, #0]\n");
+
+ if (flag_pic == 2)
+ fprintf (file, "%s:\n\t.long\t%s - (L%d$scv + 8)\n",
+ slp_label_name, lazy_ptr_name, label);
+ else
+ fprintf (file, "%s:\n\t.long\t%s\n",
+ slp_label_name, lazy_ptr_name);
+
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
+ fprintf (file, "%s:\n", lazy_ptr_name);
+ fprintf (file, "\t.indirect_symbol\t%s\n", symbol_name);
+ fprintf (file, "\t.long\tdyld_stub_binding_helper\n");
+}
+
+#endif
+/* APPLE LOCAL end ARM pic support */
+
+/* APPLE LOCAL begin ARM MACH assembler */
+extern bool iasm_memory_clobber (const char *);
+
+/* FIXME: we can be more specific here. */
+bool iasm_memory_clobber (const char *ARG_UNUSED (opcode))
+{
+ return true;
+}
+/* APPLE LOCAL end ARM MACH assembler */
+
+/* APPLE LOCAL begin ARM darwin optimization defaults */
+void
+optimization_options (int level, int size ATTRIBUTE_UNUSED)
+{
+ /* disable strict aliasing; breaks too much existing code. */
+#if TARGET_MACHO
+ flag_strict_aliasing = 0;
+
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+
+ /* APPLE LOCAL conditionally disable local RA */
+ flag_local_alloc = 0;
+ /* APPLE LOCAL rerun cse after combine */
+ /* flag_rerun_cse_after_combine = 1; */
+
+ /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
+ make the problem with not enough registers even worse. */
+#ifdef INSN_SCHEDULING
+ if (level > 1)
+ flag_schedule_insns = 0;
+#endif
+
+ /* radar 4094534. */
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+#endif
+
+#ifdef SUBTARGET_OPTIMIZATION_OPTIONS
+ SUBTARGET_OPTIMIZATION_OPTIONS;
+#endif
+}
+/* APPLE LOCAL end ARM darwin optimization defaults */
+
+/* APPLE LOCAL begin ARM prefer SP to FP */
+/* Stabs is so much fun. Stabs doesn't distinguish between a SP and a
+ FP offset -- if your function has a frame pointer, it is assumed
+ that is what offsets to locals are from. So, in the cases where we
+ have a FP, but are using a SP anyway, we have to adjust the values
+ to be FP-based. */
+HOST_WIDE_INT
+arm_local_debug_offset (rtx var)
+{
+ int offset;
+ int reg;
+
+ if (GET_CODE (var) == PLUS)
+ {
+ reg = REGNO (XEXP (var, 0));
+ offset = INTVAL (XEXP (var, 1));
+ }
+ else if (GET_CODE (var) == REG)
+ {
+ reg = REGNO (var);
+ offset = 0;
+ }
+ else
+ {
+ return 0;
+ }
+
+ if (frame_pointer_needed && reg == SP_REGNUM)
+ {
+ arm_stack_offsets *offsets = arm_get_frame_offsets();
+
+ return offset + (offsets->frame - offsets->outgoing_args);
+ }
+ else
+ return offset;
+}
+/* APPLE LOCAL end ARM prefer SP to FP */
+
+/* APPLE LOCAL begin ARM compact switch tables */
+int arm_label_align (rtx label)
+{
+ rtx insn = NEXT_INSN (label);
+ if (insn
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE)
+ {
+ /* APPLE LOCAL 7083296 Build without warnings. */
+ if (XINT (PATTERN (insn), 1) == VUNSPEC_ALIGN)
+ return 2;
+ /* APPLE LOCAL 7083296 Build without warnings. */
+ if (XINT (PATTERN (insn), 1) == VUNSPEC_ALIGN8)
+ return 3;
+ }
+ return align_labels_log;
+}
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Output assembly for a shift instruction.
+ SET_FLAGS determines how the instruction modifies the condition codes.
+ 0 - Do not set conditiona codes.
+ 1 - Set condition codes.
+ 2 - Use smallest instruction. */
+const char *
+arm_output_shift(rtx * operands, int set_flags)
+{
+ char pattern[100];
+ static const char flag_chars[3] = {'?', '.', '!'};
+ const char *shift;
+ HOST_WIDE_INT val;
+ char c;
+
+ c = flag_chars[set_flags];
+ if (TARGET_UNIFIED_ASM)
+ {
+ shift = shift_op(operands[3], &val);
+ if (shift)
+ {
+ if (val != -1)
+ operands[2] = GEN_INT(val);
+ sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
+ }
+ else
+ sprintf (pattern, "mov%%%c\t%%0, %%1", c);
+ }
+ else
+ sprintf (pattern, "mov%%%c\t%%0, %%1%%S3", c);
+ output_asm_insn (pattern, operands);
+ return "";
+}
+
+/* Output a Thumb-2 casesi instruction. */
+const char *
+thumb2_output_casesi (rtx *operands)
+{
+ rtx diff_vec = PATTERN (next_real_insn (operands[2]));
+
+ gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
+
+ output_asm_insn ("cmp\t%0, %1", operands);
+ output_asm_insn ("bhi\t%l3", operands);
+ switch (GET_MODE(diff_vec))
+ {
+ case QImode:
+ return "tbb\t[%|pc, %0]";
+ case HImode:
+ return "tbh\t[%|pc, %0, lsl #1]";
+ case SImode:
+ /* APPLE LOCAL begin 6152801 SImode thumb2 switch table dispatch */
+ output_asm_insn ("adr\t%4, %l2", operands);
+ output_asm_insn ("add\t%4, %4, %0, lsl #2", operands);
+ return "mov\t%|pc, %4";
+ /* APPLE LOCAL end 6152801 SImode thumb2 switch table dispatch */
+ default:
+ gcc_unreachable ();
+ }
+}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+
+/* A table and a function to perform ARM-specific name mangling for
+ NEON vector types in order to conform to the AAPCS (see "Procedure
+ Call Standard for the ARM Architecture", Appendix A). To qualify
+ for emission with the mangled names defined in that document, a
+ vector type must not only be of the correct mode but also be
+ composed of NEON vector element types (e.g. __builtin_neon_qi). */
+typedef struct
+{
+ enum machine_mode mode;
+ const char *element_type_name;
+ const char *aapcs_name;
+} arm_mangle_map_entry;
+
+static arm_mangle_map_entry arm_mangle_map[] = {
+ /* 64-bit containerized types. */
+ { V8QImode, "__builtin_neon_qi", "15__simd64_int8_t" },
+ { V8QImode, "__builtin_neon_uqi", "16__simd64_uint8_t" },
+ { V4HImode, "__builtin_neon_hi", "16__simd64_int16_t" },
+ { V4HImode, "__builtin_neon_uhi", "17__simd64_uint16_t" },
+ { V2SImode, "__builtin_neon_si", "16__simd64_int32_t" },
+ { V2SImode, "__builtin_neon_usi", "17__simd64_uint32_t" },
+ { V2SFmode, "__builtin_neon_sf", "18__simd64_float32_t" },
+ { V8QImode, "__builtin_neon_poly8", "16__simd64_poly8_t" },
+ { V4HImode, "__builtin_neon_poly16", "17__simd64_poly16_t" },
+ /* 128-bit containerized types. */
+ { V16QImode, "__builtin_neon_qi", "16__simd128_int8_t" },
+ { V16QImode, "__builtin_neon_uqi", "17__simd128_uint8_t" },
+ { V8HImode, "__builtin_neon_hi", "17__simd128_int16_t" },
+ { V8HImode, "__builtin_neon_uhi", "18__simd128_uint16_t" },
+ { V4SImode, "__builtin_neon_si", "17__simd128_int32_t" },
+ { V4SImode, "__builtin_neon_usi", "18__simd128_uint32_t" },
+ { V4SFmode, "__builtin_neon_sf", "19__simd128_float32_t" },
+ { V16QImode, "__builtin_neon_poly8", "17__simd128_poly8_t" },
+ { V8HImode, "__builtin_neon_poly16", "18__simd128_poly16_t" },
+ { VOIDmode, NULL, NULL }
+};
+
+const char *
+arm_mangle_type (tree type)
+{
+ arm_mangle_map_entry *pos = arm_mangle_map;
+
+ if (TREE_CODE (type) != VECTOR_TYPE)
+ return NULL;
+
+ /* Check the mode of the vector type, and the name of the vector
+ element type, against the table. */
+ while (pos->mode != VOIDmode)
+ {
+ tree elt_type = TREE_TYPE (type);
+
+ if (pos->mode == TYPE_MODE (type)
+ && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
+ pos->element_type_name))
+ return pos->aapcs_name;
+
+ pos++;
+ }
+
+ /* Use the default mangling for unrecognized (possibly user-defined)
+ vector types. */
+ return NULL;
+}
+
+void
+arm_asm_output_addr_diff_vec (FILE *file, rtx label, rtx body)
+{
+ int idx, size = GET_MODE_SIZE (GET_MODE (body));
+ int pack = (TARGET_THUMB) ? 2 : 4;
+ /* APPLE LOCAL 5837498 assembler expr for (L1-L2)/2 */
+ /* removed unused variable "base_addr" */
+ int base_label_no = CODE_LABEL_NUMBER (label);
+ int vlen = XVECLEN (body, 1); /*includes trailing default */
+ const char* directive;
+ if (GET_MODE (body) == QImode)
+ directive = ".byte";
+ else if (GET_MODE (body) == HImode)
+ directive = ".short";
+ else
+ {
+ pack = 1;
+ directive = ".long";
+ }
+ /* Alignment of table was handled by aligning its label,
+ in final_scan_insn. */
+ targetm.asm_out.internal_label (file, "L", base_label_no);
+ /* Default is not included in output count */
+ if (TARGET_COMPACT_SWITCH_TABLES)
+ asm_fprintf (file, "\t%s\t%d @ size\n", directive, vlen - 1);
+ for (idx = 0; idx < vlen; idx++)
+ {
+ rtx target_label = XEXP (XVECEXP (body, 1, idx), 0);
+ /* APPLE LOCAL begin 5837498 assembler expr for (L1-L2)/2 */
+ if (GET_MODE (body) != SImode)
+ {
+ /* ARM mode is always SImode bodies */
+ asm_fprintf (file, "\t%s\t(L%d-L%d)/%d\n",
+ directive,
+ CODE_LABEL_NUMBER (target_label), base_label_no, pack);
+ }
+ /* APPLE LOCAL end 5837498 assembler expr for (L1-L2)/2 */
+ /* APPLE LOCAL begin 6152801 SImode thumb2 switch table dispatch */
+ else if (TARGET_ARM)
+ asm_fprintf (file, "\tb\tL%d\n",
+ CODE_LABEL_NUMBER (target_label));
+ else if (TARGET_THUMB2)
+ asm_fprintf (file, "\tb.w\tL%d\n",
+ CODE_LABEL_NUMBER (target_label));
+ /* APPLE LOCAL end 6152801 SImode thumb2 switch table dispatch */
+ else if (TARGET_COMPACT_SWITCH_TABLES || flag_pic)
+ /* Let the assembler do the computation here; one case that
+ uses is this is when there are asm's, which makes
+ compile time computations unreliable. */
+ asm_fprintf (file, "\t%s\tL%d-L%d\n",
+ directive,
+ CODE_LABEL_NUMBER (target_label), base_label_no);
+ else
+ asm_fprintf (file, "\t%s\tL%d\n", directive,
+ CODE_LABEL_NUMBER (target_label));
+ }
+ /* Pad to instruction boundary. */
+ if (TARGET_COMPACT_SWITCH_TABLES)
+ vlen = (vlen + 1/*count*/) * size;
+ else
+ vlen = vlen * size;
+ while (vlen % pack != 0)
+ {
+ asm_fprintf (file, "\t%s\t0 @ pad\n", directive);
+ vlen += size;
+ }
+}
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* APPLE LOCAL begin ARM enhance conditional insn generation */
+/* A C expression to modify the code described by the conditional if
+ information CE_INFO, for the basic block BB, possibly updating the tests in
+ TRUE_EXPR, and FALSE_EXPR for converting the && and || parts of if-then or
+ if-then-else code to conditional instructions. Set either TRUE_EXPR or
+ FALSE_EXPR to a null pointer if the tests cannot be converted. */
+
+/* p_true and p_false are given expressions of the form:
+
+ (and (relop:CC (reg:CC) (const_int 0))
+ (relop:CC (reg:CC) (const_int 0)))
+
+ We try to simplify them to something that will work in a branch instruction.
+ If we can't do anything useful, return; the caller will try to substitute
+ the complex expression and will fail.
+ Currently the true and false cases are not handled.
+ It's surprising that there isn't already a routine somewhere that does this,
+ but I couldn't find one. */
+
+void
+arm_ifcvt_modify_multiple_tests (ce_if_block_t *ce_info ATTRIBUTE_UNUSED,
+ basic_block bb ATTRIBUTE_UNUSED,
+ rtx *p_true,
+ rtx *p_false)
+{
+ /* There is a dependency here on the order of codes in rtl.def,
+ also an assumption that none of the useful enum values will
+ collide with 0 or 1.
+ Order is: NE EQ GE GT LE LT GEU GTU LEU LTU */
+ static RTX_CODE and_codes[10][10] =
+ { { NE, 0, GT, GT, LT, LT, GTU, GTU, LTU, LTU },
+ { 0, EQ, EQ, 0, EQ, 0, EQ, 0, EQ, 0 },
+ { GT, EQ, GE, GT, EQ, 0, 0, 0, 0, 0 },
+ { GT, 0, GT, GT, 0, 0, 0, 0, 0, 0 },
+ { LT, EQ, EQ, 0, LE, LT, 0, 0, 0, 0 },
+ { LT, 0, 0, 0, LT, LT, 0, 0, 0, 0 },
+ { GTU, EQ, 0, 0, 0, 0, GEU, GTU, EQ, 0 },
+ { GTU, 0, 0, 0, 0, 0, GTU, GTU, 0, 0 },
+ { LTU, EQ, 0, 0, 0, 0, EQ, 0, LEU, LTU },
+ { LTU, 0, 0, 0, 0, 0, 0, 0, LTU, LTU } };
+
+ static RTX_CODE or_codes[10][10] =
+ { { NE, 1, 1, NE, 1, NE, 1, NE, 1, NE },
+ { 1, EQ, GE, GE, LE, LE, GEU, GEU, LEU, LEU },
+ { 1, GE, GE, GE, 1, 1, 0, 0, 0, 0 },
+ { NE, GE, GE, GT, 1, NE, 0, 0, 0, 0 },
+ { 1, LE, 1, 1, LE, LE, 0, 0, 0, 0 },
+ { NE, LE, 1, NE, LE, LT, 0, 0, 0, 0 },
+ { 1, GEU, 0, 0, 0, 0, GEU, GEU, 1, 1 },
+ { NE, GEU, 0, 0, 0, 0, GEU, GTU, 1, NE },
+ { 1, LEU, 0, 0, 0, 0, 1, 1, LEU, LEU },
+ { NE, LEU, 0, 0, 0, 0, 1, NE, LEU, LTU } };
+
+ rtx true_lhs = XEXP (*p_true, 0);
+ rtx false_lhs = XEXP (*p_false, 0);
+ rtx true_rhs = XEXP (*p_true, 1);
+ rtx false_rhs = XEXP (*p_false, 1);
+ int true_and_p, false_and_p;
+ RTX_CODE merged_code;
+
+ if (!TARGET_ARM)
+ return;
+
+ if (GET_CODE (*p_true) == AND)
+ true_and_p = true;
+ else if (GET_CODE (*p_true) == IOR)
+ true_and_p = false;
+ else
+ return;
+
+ if (GET_CODE (*p_false) == AND)
+ false_and_p = true;
+ else if (GET_CODE (*p_false) == IOR)
+ false_and_p = false;
+ else
+ return;
+
+ if (!cc_register (XEXP (true_lhs, 0), CCmode)
+ || !cc_register (XEXP (true_lhs, 0), CCmode)
+ || !cc_register (XEXP (true_lhs, 0), CCmode)
+ || !cc_register (XEXP (true_lhs, 0), CCmode))
+ return;
+
+ if (XEXP (true_lhs, 1) != const0_rtx
+ || XEXP (true_rhs, 1) != const0_rtx
+ || XEXP (false_lhs, 1) != const0_rtx
+ || XEXP (false_rhs, 1) != const0_rtx)
+ return;
+
+ if (GET_CODE (true_lhs) < NE || GET_CODE (true_lhs) > LTU
+ || GET_CODE (true_rhs) < NE || GET_CODE (true_rhs) > LTU)
+ *p_true = 0;
+ else
+ {
+ if (true_and_p)
+ merged_code = and_codes [GET_CODE (true_lhs) - NE][GET_CODE (true_rhs) - NE];
+ else
+ merged_code = or_codes [GET_CODE (true_lhs) - NE][GET_CODE (true_rhs) - NE];
+ if (merged_code == 0 || merged_code == 1)
+ *p_true = 0;
+ else
+ *p_true = gen_rtx_fmt_ee (merged_code, VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), const0_rtx);
+ }
+
+ if (GET_CODE (false_lhs) < NE || GET_CODE (false_lhs) > LTU
+ || GET_CODE (false_rhs) < NE || GET_CODE (false_rhs) > LTU)
+ *p_false = 0;
+ else
+ {
+ if (false_and_p)
+ merged_code = and_codes [GET_CODE (false_lhs) - NE][GET_CODE (false_rhs) - NE];
+ else
+ merged_code = or_codes [GET_CODE (false_lhs) - NE][GET_CODE (false_rhs) - NE];
+ if (merged_code == 0 || merged_code == 1)
+ *p_false = 0;
+ else
+ *p_false = gen_rtx_fmt_ee (merged_code, VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), const0_rtx);
+ }
+}
+/* APPLE LOCAL end ARM enhance conditional insn generation */
+
+/* APPLE LOCAL begin 5946347 ms_struct support */
+/* Handle a "ms_struct" attribute; arguments as in struct
+ attribute_spec.handler. */
+static tree
+arm_handle_ms_struct_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ tree *type = NULL;
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ type = &TREE_TYPE (*node);
+ }
+ else
+ type = node;
+
+ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
+ || TREE_CODE (*type) == UNION_TYPE)))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else if (lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
+ {
+ warning (OPT_Wattributes, "%qs incompatible attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle a "gcc_struct" attribute; arguments as in struct
+ attribute_spec.handler. */
+static tree
+arm_handle_gcc_struct_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ tree *type = NULL;
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ type = &TREE_TYPE (*node);
+ }
+ else
+ type = node;
+
+ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
+ || TREE_CODE (*type) == UNION_TYPE)))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else if (lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))
+ {
+ /* ms_struct may be on the type by default (-mms-bitfields or
+ #pragma ms_struct), so gcc_struct simply means that if there
+ is an ms_struct attribute on type, remove it. */
+ remove_attribute ("ms_struct", TYPE_ATTRIBUTES (*type));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static bool
+arm_ms_bitfield_layout_p (tree record_type)
+{
+ return (lookup_attribute ("ms_struct",
+ TYPE_ATTRIBUTES (record_type)) != NULL);
+}
+
+/* Return the alignment necessary for the field when it's part of
+ an ms_struct attributed structure. */
+int
+arm_field_ms_struct_align (tree field)
+{
+ tree type = TREE_TYPE (field);
+ int desired_align;
+
+ if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE)
+ desired_align = TYPE_ALIGN (type);
+ else
+ {
+ enum machine_mode mode;
+ /* For non-aggregate types of BIGGEST_ALIGNMENT bits or greater,
+ the alignment should be the size of the type. For arrays, it
+ should be the alignement of the members of the array. */
+ mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
+ ? get_inner_array_type (type) : type);
+ desired_align = GET_MODE_BITSIZE (mode) > BIGGEST_ALIGNMENT ?
+ GET_MODE_BITSIZE (mode) : TYPE_ALIGN (type);
+ gcc_assert (desired_align <= BIGGEST_MS_STRUCT_ALIGNMENT);
+ }
+ return desired_align;
+}
+
+/* APPLE LOCAL end 5946347 ms_struct support */
+
+/* APPLE LOCAL begin ARM 6008578 */
+/* Minimum alignment of a function entry point, in bits. */
+int
+arm_function_boundary (void)
+{
+ /* APPLE LOCAL 6357106 thumb2 functions should be 4-byte aligned */
+ int min_align = TARGET_32BIT ? 32 : 16;
+
+ /* Even in Thumb mode, thunks are output as ARM functions. */
+ if (cfun && current_function_is_thunk)
+ min_align = MAX (min_align, 32);
+
+ /* e.g., Thumb functions with jump tables. */
+ if (cfun && cfun->needs_4byte_alignment)
+ min_align = MAX (min_align, 32);
+
+ /* If -falign-loops was specified, use that alignment. This is _not_
+ needed to guarantee that loop alignments within the function are
+ honored -- that's handled by the assembler and linker. However,
+ if we don't align the function, then our address calculations (in
+ arm_reorg) are incorrect, potentially wreaking havoc on the
+ constant pool calculations. */
+ min_align = MAX (min_align, align_loops * BITS_PER_UNIT);
+
+ return min_align;
+}
+/* APPLE LOCAL end ARM 6008578 */
+
+/* APPLE LOCAL begin 6160917 */
+/* Handle the cases where SECONDARY_INPUT_RELOAD_CLASS said that we
+ needed a scratch register. Currently, we only handle the case
+ where there was indexed literal addressing with an out-of-range
+ offset. */
+void
+neon_reload_in (rtx *operands, enum machine_mode mode)
+{
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ rtx mem_addr = XEXP (operands[1], 0);
+ if (GET_CODE (mem_addr) == PLUS
+ && GET_CODE (XEXP (mem_addr, 0)) == REG
+ && REG_MODE_OK_FOR_BASE_P (XEXP (mem_addr, 0), VOIDmode)
+ && ! arm_legitimate_index_p (mode, XEXP (mem_addr, 1), SET, 0))
+ {
+ rtx scratch;
+
+ /* Load the address into the scratch register provided,
+ and then indirect it. */
+ emit_move_insn (operands[2], mem_addr);
+ scratch = gen_rtx_MEM (mode, operands[2]);
+ emit_move_insn (operands[0], scratch);
+ return;
+ }
+ }
+ /* If you reach here, SECONDARY_INPUT_RELOAD_CLASS is indicating that
+ a scratch register is needed, but we don't have any code to
+ handle it. Add that code here. */
+ gcc_unreachable ();
+}
+
+/* Handle the cases where SECONDARY_OUTPUT_RELOAD_CLASS said that we
+ needed a scratch register. Currently, we only handle the case
+ where there was indexed literal addressing with an out-of-range
+ offset. */
+void
+neon_reload_out (rtx *operands, enum machine_mode mode)
+{
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ rtx mem_addr = XEXP (operands[0], 0);
+ if (GET_CODE (mem_addr) == PLUS
+ && GET_CODE (XEXP (mem_addr, 0)) == REG
+ && REG_MODE_OK_FOR_BASE_P (XEXP (mem_addr, 0), VOIDmode)
+ && ! arm_legitimate_index_p (mode, XEXP (mem_addr, 1), SET, 0))
+ {
+ rtx scratch;
+
+ /* Load the address into the scratch register provided,
+ and then indirect it. */
+ emit_move_insn (operands[2], mem_addr);
+ scratch = gen_rtx_MEM (mode, operands[2]);
+ emit_move_insn (scratch, operands[1]);
+ return;
+ }
+ }
+ /* If you reach here, SECONDARY_OUTPUT_RELOAD_CLASS is indicating that
+ a scratch register is needed, but we don't have any code to
+ handle it. Add that code here. */
+ gcc_unreachable ();
+}
+/* APPLE LOCAL end 6160917 */
+
+/* APPLE LOCAL begin 5571707 Allow R9 as caller-saved register */
+/* For v4 and v5, we always reserve R9 for thread local data. For v6 and
+ v7, we can make it available when the target is iPhoneOS v3.0 or later. */
+void
+arm_darwin_subtarget_conditional_register_usage (void)
+{
+ if (!(arm_arch6 && !darwin_reserve_r9_on_v6) && !arm_arch7a)
+ fixed_regs[9] = 1;
+ call_used_regs[9] = 1;
+
+ if (TARGET_THUMB)
+ {
+ fixed_regs[THUMB_HARD_FRAME_POINTER_REGNUM] = 1;
+ call_used_regs[THUMB_HARD_FRAME_POINTER_REGNUM] = 1;
+ global_regs[THUMB_HARD_FRAME_POINTER_REGNUM] = 1;
+ }
+}
+/* APPLE LOCAL end 5571707 Allow R9 as caller-saved register */
+
+/* APPLE LOCAL begin 6902792 Q register clobbers in inline asm */
+/* Worker function for TARGET_MD_ASM_CLOBBERS.
+ We do this to translate references to Qn registers into the equivalent
+ D(2n)/D(2n+1) register pairs. */
+static tree
+arm_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
+ tree inputs ATTRIBUTE_UNUSED,
+ tree clobbers)
+{
+ tree tail;
+
+ for (tail = clobbers; tail; tail = TREE_CHAIN (tail))
+ {
+ const char *clobber_name;
+ clobber_name = TREE_STRING_POINTER (TREE_VALUE (tail));
+ if (tolower (clobber_name[0]) == 'q' && isdigit (clobber_name[1])
+ && (isdigit (clobber_name[2]) || clobber_name[2] == '\0'))
+ {
+ char regname[4] = "dXX";
+ /* found a Q register in the clobber list, so add the D reference
+ to the upper dword of it. The existing clobber for the Q
+ register will automatically translate to the low dword. */
+ int regno = atoi (clobber_name + 1) * 2 + 1;
+ snprintf (regname + 1, 3, "%d", regno);
+ clobbers =
+ tree_cons (NULL_TREE, build_string (strlen(regname), regname),
+ clobbers);
+ }
+ }
+ return clobbers;
+}
+/* APPLE LOCAL end 6902792 Q register clobbers in inline asm */
+
+#include "gt-arm.h"
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm.h b/gcc-4.2.1-5666.3/gcc/config/arm/arm.h
new file mode 100644
index 000000000..5bc8fe82d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm.h
@@ -0,0 +1,3196 @@
+/* Definitions of target machine for GNU compiler, for ARM.
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com)
+ Minor hacks by Nick Clifton (nickc@cygnus.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#ifndef GCC_ARM_H
+#define GCC_ARM_H
+
+/* APPLE LOCAL begin ARM darwin target */
+/* Overridden by arm/darwin.h, whether it is included first or not. */
+#ifndef TARGET_MACHO
+#define TARGET_MACHO 0
+#endif
+/* APPLE LOCAL end ARM darwin target */
+
+/* APPLE LOCAL begin 6150882 use thumb2 by default for v7 */
+/* thumb_option is initialized to -1, so we can tell whether the user
+ explicitly passed -mthumb or -mno-thumb. override_options will
+ set thumb_option = 1 if -mno-thumb was not seen. */
+#define TARGET_THUMB (thumb_option == 1)
+/* APPLE LOCAL end 6150882 use thumb2 by default for v7 */
+
+/* APPLE LOCAL ARM interworking */
+#define TARGET_INTERWORK (interwork_option == 1)
+
+/* The architecture define. */
+extern char arm_arch_name[];
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ /* Define __arm__ even when in thumb mode, for \
+ consistency with armcc. */ \
+ builtin_define ("__arm__"); \
+ builtin_define ("__APCS_32__"); \
+ if (TARGET_THUMB) \
+ builtin_define ("__thumb__"); \
+/* APPLE LOCAL begin v7 support. Merge from mainline */ \
+ if (TARGET_THUMB2) \
+ builtin_define ("__thumb2__"); \
+/* APPLE LOCAL end v7 support. Merge from mainline */ \
+ \
+ if (TARGET_BIG_END) \
+ { \
+ builtin_define ("__ARMEB__"); \
+ if (TARGET_THUMB) \
+ builtin_define ("__THUMBEB__"); \
+ if (TARGET_LITTLE_WORDS) \
+ builtin_define ("__ARMWEL__"); \
+ } \
+ else \
+ { \
+ builtin_define ("__ARMEL__"); \
+ if (TARGET_THUMB) \
+ builtin_define ("__THUMBEL__"); \
+ } \
+ \
+ if (TARGET_SOFT_FLOAT) \
+ builtin_define ("__SOFTFP__"); \
+ \
+ if (TARGET_VFP) \
+ builtin_define ("__VFP_FP__"); \
+ \
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */ \
+ if (TARGET_NEON) \
+ builtin_define ("__ARM_NEON__"); \
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */ \
+ /* Add a define for interworking. \
+ Needed when building libgcc.a. */ \
+ if (arm_cpp_interwork) \
+ builtin_define ("__THUMB_INTERWORK__"); \
+ \
+ builtin_assert ("cpu=arm"); \
+ builtin_assert ("machine=arm"); \
+ \
+ builtin_define (arm_arch_name); \
+ if (arm_arch_cirrus) \
+ builtin_define ("__MAVERICK__"); \
+ if (arm_arch_xscale) \
+ builtin_define ("__XSCALE__"); \
+ if (arm_arch_iwmmxt) \
+ builtin_define ("__IWMMXT__"); \
+ if (TARGET_AAPCS_BASED) \
+ builtin_define ("__ARM_EABI__"); \
+ } while (0)
+
+/* The various ARM cores. */
+enum processor_type
+{
+#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+ IDENT,
+#include "arm-cores.def"
+#undef ARM_CORE
+ /* Used to indicate that no processor has been specified. */
+ arm_none
+};
+
+enum target_cpus
+{
+#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+ TARGET_CPU_##IDENT,
+#include "arm-cores.def"
+#undef ARM_CORE
+ TARGET_CPU_generic
+};
+
+/* The processor for which instructions should be scheduled. */
+extern enum processor_type arm_tune;
+
+typedef enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+}
+arm_cc;
+
+extern arm_cc arm_current_cc;
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((arm_cc) (((int)X) ^ 1))
+
+extern int arm_target_label;
+extern int arm_ccfsm_state;
+extern GTY(()) rtx arm_target_insn;
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. */
+extern GTY(()) rtx arm_compare_op0;
+extern GTY(()) rtx arm_compare_op1;
+/* The label of the current constant pool. */
+extern rtx pool_vector_label;
+/* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+extern int return_used_this_function;
+/* Used to produce AOF syntax assembler. */
+extern GTY(()) rtx aof_pic_label;
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+
+#undef CPP_SPEC
+#define CPP_SPEC "%(subtarget_cpp_spec) \
+%{msoft-float:%{mhard-float: \
+ %e-msoft-float and -mhard_float may not be used together}} \
+%{mbig-endian:%{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together}}"
+
+#ifndef CC1_SPEC
+#define CC1_SPEC ""
+#endif
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC ""
+#endif
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/generic)", stderr);
+#endif
+
+#define TARGET_SOFT_FLOAT (arm_float_abi == ARM_FLOAT_ABI_SOFT)
+/* Use hardware floating point instructions. */
+#define TARGET_HARD_FLOAT (arm_float_abi != ARM_FLOAT_ABI_SOFT)
+/* Use hardware floating point calling convention. */
+#define TARGET_HARD_FLOAT_ABI (arm_float_abi == ARM_FLOAT_ABI_HARD)
+#define TARGET_FPA (arm_fp_model == ARM_FP_MODEL_FPA)
+#define TARGET_MAVERICK (arm_fp_model == ARM_FP_MODEL_MAVERICK)
+#define TARGET_VFP (arm_fp_model == ARM_FP_MODEL_VFP)
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define TARGET_IWMMXT (arm_arch_iwmmxt)
+#define TARGET_REALLY_IWMMXT (TARGET_IWMMXT && TARGET_32BIT)
+#define TARGET_IWMMXT_ABI (TARGET_32BIT && arm_abi == ARM_ABI_IWMMXT)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#define TARGET_ARM (! TARGET_THUMB)
+#define TARGET_EITHER 1 /* (TARGET_ARM | TARGET_THUMB) */
+#define TARGET_BACKTRACE (leaf_function_p () \
+ ? TARGET_TPCS_LEAF_FRAME \
+ : TARGET_TPCS_FRAME)
+#define TARGET_LDRD (arm_arch5e && ARM_DOUBLEWORD_ALIGN)
+#define TARGET_AAPCS_BASED \
+ (arm_abi != ARM_ABI_APCS && arm_abi != ARM_ABI_ATPCS)
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* True if we should avoid generating conditional execution instructions. */
+#define TARGET_NO_COND_EXEC (arm_tune_marvell_f && !optimize_size)
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+#define TARGET_HARD_TP (target_thread_pointer == TP_CP15)
+#define TARGET_SOFT_TP (target_thread_pointer == TP_SOFT)
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Only 16-bit thumb code. */
+#define TARGET_THUMB1 (TARGET_THUMB && !arm_arch_thumb2)
+/* Arm or Thumb-2 32-bit code. */
+#define TARGET_32BIT (TARGET_ARM || arm_arch_thumb2)
+/* 32-bit Thumb-2 code. */
+#define TARGET_THUMB2 (TARGET_THUMB && arm_arch_thumb2)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Thumb-1 only. */
+#define TARGET_THUMB1_ONLY (TARGET_THUMB1 && !arm_arch_notm)
+
+/* The following two macros concern the ability to execute coprocessor
+ instructions for VFPv3 or NEON. TARGET_VFP3 is currently only ever
+ tested when we know we are generating for VFP hardware; we need to
+ be more careful with TARGET_NEON as noted below. */
+
+/* FPU is VFPv3 (with twice the number of D registers). Setting the FPU to
+ Neon automatically enables VFPv3 too. */
+#define TARGET_VFP3 (arm_fp_model == ARM_FP_MODEL_VFP \
+ && (arm_fpu_arch == FPUTYPE_VFP3 \
+ || arm_fpu_arch == FPUTYPE_NEON))
+/* FPU supports Neon instructions. The setting of this macro gets
+ revealed via __ARM_NEON__ so we add extra guards upon TARGET_32BIT
+ and TARGET_HARD_FLOAT to ensure that NEON instructions are
+ available. */
+#define TARGET_NEON (TARGET_32BIT && TARGET_HARD_FLOAT \
+ && arm_fp_model == ARM_FP_MODEL_VFP \
+ && arm_fpu_arch == FPUTYPE_NEON)
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+
+/* "DSP" multiply instructions, eg. SMULxy. */
+#define TARGET_DSP_MULTIPLY \
+ (TARGET_32BIT && arm_arch5e && arm_arch_notm)
+/* Integer SIMD instructions, and extend-accumulate instructions. */
+#define TARGET_INT_SIMD \
+ (TARGET_32BIT && arm_arch6 && arm_arch_notm)
+
+/* We could use unified syntax for arm mode, but for now we just use it
+ for Thumb-2. */
+#define TARGET_UNIFIED_ASM TARGET_THUMB2
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* Use compact switch tables with libgcc handlers. */
+#define TARGET_COMPACT_SWITCH_TABLES \
+ (TARGET_THUMB1 && !TARGET_LONG_CALLS)
+/* APPLE LOCAL end ARM compact switch tables */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* True iff the full BPABI is being used. If TARGET_BPABI is true,
+ then TARGET_AAPCS_BASED must be true -- but the converse does not
+ hold. TARGET_BPABI implies the use of the BPABI runtime library,
+ etc., in addition to just the AAPCS calling conventions. */
+#ifndef TARGET_BPABI
+#define TARGET_BPABI false
+#endif
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-arch is ignored if -march or -mcpu are specified.
+ --with-cpu is ignored if -march or -mcpu are specified, and is overridden
+ by --with-arch.
+ --with-tune is ignored if -mtune or -mcpu are specified (but not affected
+ by -march).
+ --with-float is ignored if -mhard-float, -msoft-float or -mfloat-abi are
+ specified.
+ --with-fpu is ignored if -mfpu is specified.
+ --with-abi is ignored is -mabi is specified. */
+#define OPTION_DEFAULT_SPECS \
+ {"arch", "%{!march=*:%{!mcpu=*:-march=%(VALUE)}}" }, \
+ {"cpu", "%{!march=*:%{!mcpu=*:-mcpu=%(VALUE)}}" }, \
+ {"tune", "%{!mcpu=*:%{!mtune=*:-mtune=%(VALUE)}}" }, \
+ {"float", \
+ "%{!msoft-float:%{!mhard-float:%{!mfloat-abi=*:-mfloat-abi=%(VALUE)}}}" }, \
+ {"fpu", "%{!mfpu=*:-mfpu=%(VALUE)}"}, \
+ {"abi", "%{!mabi=*:-mabi=%(VALUE)}"}, \
+ {"mode", "%{!marm:%{!mthumb:-m%(VALUE)}}"},
+
+/* Which floating point model to use. */
+enum arm_fp_model
+{
+ ARM_FP_MODEL_UNKNOWN,
+ /* FPA model (Hardware or software). */
+ ARM_FP_MODEL_FPA,
+ /* Cirrus Maverick floating point model. */
+ ARM_FP_MODEL_MAVERICK,
+ /* VFP floating point model. */
+ ARM_FP_MODEL_VFP
+};
+
+extern enum arm_fp_model arm_fp_model;
+
+/* Which floating point hardware is available. Also update
+ fp_model_for_fpu in arm.c when adding entries to this list. */
+enum fputype
+{
+ /* No FP hardware. */
+ FPUTYPE_NONE,
+ /* Full FPA support. */
+ FPUTYPE_FPA,
+ /* Emulated FPA hardware, Issue 2 emulator (no LFM/SFM). */
+ FPUTYPE_FPA_EMU2,
+ /* Emulated FPA hardware, Issue 3 emulator. */
+ FPUTYPE_FPA_EMU3,
+ /* Cirrus Maverick floating point co-processor. */
+ FPUTYPE_MAVERICK,
+ /* VFP. */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ FPUTYPE_VFP,
+ /* VFPv3. */
+ FPUTYPE_VFP3,
+ /* Neon. */
+ FPUTYPE_NEON
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu_tune)
+
+/* What type of floating point to tune for */
+extern enum fputype arm_fpu_tune;
+
+/* What type of floating point instructions are available */
+extern enum fputype arm_fpu_arch;
+
+enum float_abi_type
+{
+ ARM_FLOAT_ABI_SOFT,
+ ARM_FLOAT_ABI_SOFTFP,
+ ARM_FLOAT_ABI_HARD
+};
+
+extern enum float_abi_type arm_float_abi;
+
+#ifndef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT
+#endif
+
+/* Which ABI to use. */
+enum arm_abi_type
+{
+ ARM_ABI_APCS,
+ ARM_ABI_ATPCS,
+ ARM_ABI_AAPCS,
+ ARM_ABI_IWMMXT,
+ ARM_ABI_AAPCS_LINUX
+};
+
+extern enum arm_abi_type arm_abi;
+
+#ifndef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_APCS
+#endif
+
+/* Which thread pointer access sequence to use. */
+enum arm_tp_type {
+ TP_AUTO,
+ TP_SOFT,
+ TP_CP15
+};
+
+extern enum arm_tp_type target_thread_pointer;
+
+/* Nonzero if this chip supports the ARM Architecture 3M extensions. */
+extern int arm_arch3m;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions. */
+extern int arm_arch4;
+
+/* Nonzero if this chip supports the ARM Architecture 4T extensions. */
+extern int arm_arch4t;
+
+/* Nonzero if this chip supports the ARM Architecture 5 extensions. */
+extern int arm_arch5;
+
+/* Nonzero if this chip supports the ARM Architecture 5E extensions. */
+extern int arm_arch5e;
+
+/* Nonzero if this chip supports the ARM Architecture 6 extensions. */
+extern int arm_arch6;
+
+/* APPLE LOCAL begin 6258536 Atomic builtins */
+/* Nonzero if this chip supports the ARM Architecture 7a extensions. */
+extern int arm_arch7a;
+/* APPLE LOCAL end 6258536 Atomic builtins */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Nonzero if instructions not present in the 'M' profile can be used. */
+extern int arm_arch_notm;
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+
+/* Nonzero if generating thumb code. */
+extern int thumb_code;
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_tune_strongarm;
+
+/* Nonzero if this chip is a Cirrus variant. */
+extern int arm_arch_cirrus;
+
+/* Nonzero if this chip supports Intel XScale with Wireless MMX technology. */
+extern int arm_arch_iwmmxt;
+
+/* Nonzero if this chip is an XScale. */
+extern int arm_arch_xscale;
+
+/* Nonzero if tuning for XScale. */
+extern int arm_tune_xscale;
+
+/* Nonzero if tuning for stores via the write buffer. */
+extern int arm_tune_wbuf;
+
+/* Nonzero if we should define __THUMB_INTERWORK__ in the
+ preprocessor.
+ XXX This is a bit of a hack, it's intended to help work around
+ problems in GLD which doesn't understand that armv5t code is
+ interworking clean. */
+extern int arm_cpp_interwork;
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Nonzero if chip supports Thumb 2. */
+extern int arm_arch_thumb2;
+
+/* Nonzero if chip supports integer division instruction. */
+extern int arm_arch_hwdiv;
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+#define CAN_DEBUG_WITHOUT_FP
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Nonzero if PIC code requires explicit qualifiers to generate
+ PLT and GOT relocs rather than the assembler doing so implicitly.
+ Subtargets can override these if required. */
+#ifndef NEED_GOT_RELOC
+#define NEED_GOT_RELOC 0
+#endif
+#ifndef NEED_PLT_RELOC
+#define NEED_PLT_RELOC 0
+#endif
+
+/* Nonzero if we need to refer to the GOT with a PC-relative
+ offset. In other words, generate
+
+ .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
+
+ rather than
+
+ .word _GLOBAL_OFFSET_TABLE_ - (.Lxx + 8)
+
+ The default is true, which matches NetBSD. Subtargets can
+ override this if required. */
+#ifndef GOT_PCREL
+#define GOT_PCREL 1
+#endif
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = 1; \
+ (MODE) = SImode; \
+ }
+
+#define PROMOTE_FUNCTION_MODE(MODE, UNSIGNEDP, TYPE) \
+ if ((GET_MODE_CLASS (MODE) == MODE_INT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT) \
+ && GET_MODE_SIZE (MODE) < 4) \
+ (MODE) = SImode; \
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ The rules are different based on whether or not we use FPA-format,
+ VFP-format or some other floating point co-processor's format doubles. */
+#define FLOAT_WORDS_BIG_ENDIAN (arm_float_words_big_endian ())
+
+#define UNITS_PER_WORD 4
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Use the option -mvectorize-with-neon-quad to override the use of doubleword
+ registers when autovectorizing for Neon, at least until multiple vector
+ widths are supported properly by the middle-end. */
+#define UNITS_PER_SIMD_WORD \
+ (TARGET_NEON ? (TARGET_NEON_VECTORIZE_QUAD ? 16 : 8) : UNITS_PER_WORD)
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* True if natural alignment is used for doubleword types. */
+#define ARM_DOUBLEWORD_ALIGN TARGET_AAPCS_BASED
+
+#define DOUBLEWORD_ALIGNMENT 64
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY (ARM_DOUBLEWORD_ALIGN ? DOUBLEWORD_ALIGNMENT : 32)
+
+#define PREFERRED_STACK_BOUNDARY \
+ (arm_abi == ARM_ABI_ATPCS ? 64 : STACK_BOUNDARY)
+
+/* APPLE LOCAL begin ARM 6008578 */
+#define FUNCTION_BOUNDARY arm_function_boundary ()
+extern int arm_function_boundary (void);
+/* APPLE LOCAL end ARM 6008578 */
+
+/* The lowest bit is used to indicate Thumb-mode functions, so the
+ vbit must go into the delta field of pointers to member
+ functions. */
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT (ARM_DOUBLEWORD_ALIGN ? DOUBLEWORD_ALIGNMENT : 32)
+
+/* APPLE LOCAL begin 5946347 ms_struct support */
+#define TARGET_FIELD_MS_STRUCT_ALIGN(FIELD) arm_field_ms_struct_align (FIELD)
+#define BIGGEST_MS_STRUCT_ALIGNMENT 128
+/* APPLE LOCAL end 5946347 ms_struct support */
+
+/* XXX Blah -- this macro is used directly by libobjc. Since it
+ supports no vector modes, cut out the complexity and fall back
+ on BIGGEST_FIELD_ALIGNMENT. */
+#ifdef IN_TARGET_LIBS
+#define BIGGEST_FIELD_ALIGNMENT 64
+#endif
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT_FACTOR (TARGET_THUMB || ! arm_tune_xscale ? 1 : 2)
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
+ ? BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR : (ALIGN))
+
+/* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
+ value set in previous versions of this toolchain was 8, which produces more
+ compact structures. The command line option -mstructure_size_boundary=<n>
+ can be used to change this value. For compatibility with the ARM SDK
+ however the value should be left at 32. ARM SDT Reference Manual (ARM DUI
+ 0020D) page 2-20 says "Structures are aligned on word boundaries".
+ The AAPCS specifies a value of 8. */
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+extern int arm_structure_size_boundary;
+
+/* This is the value used to initialize arm_structure_size_boundary. If a
+ particular arm target wants to change the default value it should change
+ the definition of this macro, not STRUCTURE_SIZE_BOUNDARY. See netbsd.h
+ for an example of this. */
+#ifndef DEFAULT_STRUCTURE_SIZE_BOUNDARY
+#define DEFAULT_STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* wchar_t is unsigned under the AAPCS. */
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "int")
+
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "long unsigned int")
+#endif
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_AAPCS_BASED ? "int" : "long int")
+#endif
+
+/* AAPCS requires that structure alignment is affected by bitfields. */
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS TARGET_AAPCS_BASED
+#endif
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/*
+ mvf0 Cirrus floating point result
+ mvf1-mvf3 Cirrus floating point scratch
+ mvf4-mvf15 S Cirrus floating point variable. */
+
+/* s0-s15 VFP scratch (aka d0-d7).
+ s16-s31 S VFP variable (aka d8-d15).
+ vfpcc Not a real register. Represents the VFP condition
+ code flags. */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+/* APPLE LOCAL begin v7 support. Merge from mainline */ \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+/* APPLE LOCAL end v7 support. Merge from mainline */ \
+ 1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,0,0,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+/* APPLE LOCAL begin v7 support. Merge from mainline */ \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+/* APPLE LOCAL end v7 support. Merge from mainline */ \
+ 1 \
+}
+
+#ifndef SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#define SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#endif
+
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ int regno; \
+ \
+ /* APPLE LOCAL v7 support. Merge from mainline */ \
+ if (TARGET_SOFT_FLOAT || TARGET_THUMB1 || !TARGET_FPA) \
+ { \
+ for (regno = FIRST_FPA_REGNUM; \
+ regno <= LAST_FPA_REGNUM; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ \
+ if (TARGET_THUMB && optimize_size) \
+ { \
+ /* When optimizing for size, it's better not to use \
+ the HI regs, because of the overhead of stacking \
+ them. */ \
+ /* APPLE LOCAL v7 support. Merge from mainline */ \
+ /* ??? Is this still true for thumb2? */ \
+ for (regno = FIRST_HI_REGNUM; \
+ regno <= LAST_HI_REGNUM; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ \
+ /* The link register can be clobbered by any branch insn, \
+ but we have no way to track that at present, so mark \
+ it as unavailable. */ \
+ /* APPLE LOCAL v7 support. Merge from mainline */ \
+ if (TARGET_THUMB1) \
+ fixed_regs[LR_REGNUM] = call_used_regs[LR_REGNUM] = 1; \
+ \
+ /* APPLE LOCAL v7 support. Merge from mainline */ \
+ if (TARGET_32BIT && TARGET_HARD_FLOAT) \
+ { \
+ if (TARGET_MAVERICK) \
+ { \
+ for (regno = FIRST_FPA_REGNUM; \
+ regno <= LAST_FPA_REGNUM; ++ regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ for (regno = FIRST_CIRRUS_FP_REGNUM; \
+ regno <= LAST_CIRRUS_FP_REGNUM; ++ regno) \
+ { \
+ fixed_regs[regno] = 0; \
+ call_used_regs[regno] = regno < FIRST_CIRRUS_FP_REGNUM + 4; \
+ } \
+ } \
+ /* APPLE LOCAL begin v7 support. Merge from mainline */ \
+ if (TARGET_VFP) \
+ { \
+ /* VFPv3 registers are disabled when earlier VFP \
+ versions are selected due to the definition of \
+ LAST_VFP_REGNUM. */ \
+ for (regno = FIRST_VFP_REGNUM; \
+ regno <= LAST_VFP_REGNUM; ++ regno) \
+ { \
+ fixed_regs[regno] = 0; \
+ call_used_regs[regno] = regno < FIRST_VFP_REGNUM + 16 \
+ || regno >= FIRST_VFP_REGNUM + 32; \
+ } \
+ } \
+ /* APPLE LOCAL end v7 support. Merge from mainline */ \
+ } \
+ \
+ if (TARGET_REALLY_IWMMXT) \
+ { \
+ regno = FIRST_IWMMXT_GR_REGNUM; \
+ /* The 2002/10/09 revision of the XScale ABI has wCG0 \
+ and wCG1 as call-preserved registers. The 2002/11/21 \
+ revision changed this so that all wCG registers are \
+ scratch registers. */ \
+ for (regno = FIRST_IWMMXT_GR_REGNUM; \
+ regno <= LAST_IWMMXT_GR_REGNUM; ++ regno) \
+ fixed_regs[regno] = 0; \
+ /* The XScale ABI has wR0 - wR9 as scratch registers, \
+ the rest as call-preserved registers. */ \
+ for (regno = FIRST_IWMMXT_REGNUM; \
+ regno <= LAST_IWMMXT_REGNUM; ++ regno) \
+ { \
+ fixed_regs[regno] = 0; \
+ call_used_regs[regno] = regno < FIRST_IWMMXT_REGNUM + 10; \
+ } \
+ } \
+ \
+ if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ } \
+ else if (TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 1; \
+ call_used_regs[10] = 1; \
+ } \
+ /* -mcaller-super-interworking reserves r11 for calls to \
+ _interwork_r11_call_via_rN(). Making the register global \
+ is an easy way of ensuring that it remains valid for all \
+ calls. */ \
+ if (TARGET_APCS_FRAME || TARGET_CALLER_INTERWORKING \
+ || TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) \
+ { \
+ fixed_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1; \
+ call_used_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1; \
+ if (TARGET_CALLER_INTERWORKING) \
+ global_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1; \
+ } \
+ SUBTARGET_CONDITIONAL_REGISTER_USAGE \
+}
+
+/* These are a couple of extensions to the formats accepted
+ by asm_fprintf:
+ %@ prints out ASM_COMMENT_START
+ %r prints out REGISTER_PREFIX reg_names[arg] */
+#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \
+ case '@': \
+ fputs (ASM_COMMENT_START, FILE); \
+ break; \
+ \
+ case 'r': \
+ fputs (REGISTER_PREFIX, FILE); \
+ fputs (reg_names [va_arg (ARGS, int)], FILE); \
+ break;
+
+/* Round X up to the nearest word. */
+#define ROUND_UP_WORD(X) (((X) + 3) & ~3)
+
+/* Convert fron bytes to ints. */
+#define ARM_NUM_INTS(X) (((X) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* The number of (integer) registers required to hold a quantity of type MODE.
+ Also used for VFP registers. */
+#define ARM_NUM_REGS(MODE) \
+ ARM_NUM_INTS (GET_MODE_SIZE (MODE))
+
+/* The number of (integer) registers required to hold a quantity of TYPE MODE. */
+#define ARM_NUM_REGS2(MODE, TYPE) \
+ ARM_NUM_INTS ((MODE) == BLKmode ? \
+ int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE))
+
+/* The number of (integer) argument register available. */
+#define NUM_ARG_REGS 4
+
+/* Return the register number of the N'th (integer) argument. */
+#define ARG_REGISTER(N) (N - 1)
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* The number of the last argument register. */
+#define LAST_ARG_REGNUM ARG_REGISTER (NUM_ARG_REGS)
+
+/* The numbers of the Thumb register ranges. */
+#define FIRST_LO_REGNUM 0
+#define LAST_LO_REGNUM 7
+#define FIRST_HI_REGNUM 8
+#define LAST_HI_REGNUM 11
+
+#ifndef TARGET_UNWIND_INFO
+/* We use sjlj exceptions for backwards compatibility. */
+#define MUST_USE_SJLJ_EXCEPTIONS 1
+#endif
+
+/* We can generate DWARF2 Unwind info, even though we don't use it. */
+#define DWARF2_UNWIND_INFO 1
+
+/* Use r0 and r1 to pass exception handling information. */
+#define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? N : INVALID_REGNUM)
+
+/* The register that holds the return address in exception handlers. */
+#define ARM_EH_STACKADJ_REGNUM 2
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (SImode, ARM_EH_STACKADJ_REGNUM)
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+/* APPLE LOCAL v7 support. Merge from mainline */
+#define STATIC_CHAIN_REGNUM 12
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated.
+
+ For the Thumb we have another problem. The TPCS defines the frame pointer
+ as r11, and GCC believes that it is always possible to use the frame pointer
+ as base register for addressing purposes. (See comments in
+ find_reloads_address()). But - the Thumb does not allow high registers,
+ including r11, to be used as base address registers. Hence our problem.
+
+ The solution used here, and in the old thumb port is to use r7 instead of
+ r11 as the hard frame pointer and to have special code to generate
+ backtrace structures on the stack (if required to do so via a command line
+ option) using r11. This is the only 'user visible' use of r11 as a frame
+ pointer. */
+/* APPLE LOCAL ARM custom frame layout */
+#define ARM_HARD_FRAME_POINTER_REGNUM 7
+#define THUMB_HARD_FRAME_POINTER_REGNUM 7
+
+#define HARD_FRAME_POINTER_REGNUM \
+ (TARGET_ARM \
+ ? ARM_HARD_FRAME_POINTER_REGNUM \
+ : THUMB_HARD_FRAME_POINTER_REGNUM)
+
+#define FP_REGNUM HARD_FRAME_POINTER_REGNUM
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM SP_REGNUM
+
+/* ARM floating pointer registers. */
+#define FIRST_FPA_REGNUM 16
+#define LAST_FPA_REGNUM 23
+#define IS_FPA_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_FPA_REGNUM) && ((REGNUM) <= LAST_FPA_REGNUM))
+
+#define FIRST_IWMMXT_GR_REGNUM 43
+#define LAST_IWMMXT_GR_REGNUM 46
+#define FIRST_IWMMXT_REGNUM 47
+#define LAST_IWMMXT_REGNUM 62
+#define IS_IWMMXT_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_IWMMXT_REGNUM) && ((REGNUM) <= LAST_IWMMXT_REGNUM))
+#define IS_IWMMXT_GR_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_IWMMXT_GR_REGNUM) && ((REGNUM) <= LAST_IWMMXT_GR_REGNUM))
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+#define FIRST_CIRRUS_FP_REGNUM 27
+#define LAST_CIRRUS_FP_REGNUM 42
+#define IS_CIRRUS_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_CIRRUS_FP_REGNUM) && ((REGNUM) <= LAST_CIRRUS_FP_REGNUM))
+
+#define FIRST_VFP_REGNUM 63
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define D7_VFP_REGNUM 78 /* Registers 77 and 78 == VFP reg D7. */
+#define LAST_VFP_REGNUM \
+ (TARGET_VFP3 ? LAST_HI_VFP_REGNUM : LAST_LO_VFP_REGNUM)
+
+#define IS_VFP_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_VFP_REGNUM) && ((REGNUM) <= LAST_VFP_REGNUM))
+
+/* VFP registers are split into two types: those defined by VFP versions < 3
+ have D registers overlaid on consecutive pairs of S registers. VFP version 3
+ defines 16 new D registers (d16-d31) which, for simplicity and correctness
+ in various parts of the backend, we implement as "fake" single-precision
+ registers (which would be S32-S63, but cannot be used in that way). The
+ following macros define these ranges of registers. */
+#define LAST_LO_VFP_REGNUM 94
+#define FIRST_HI_VFP_REGNUM 95
+#define LAST_HI_VFP_REGNUM 126
+
+/* APPLE LOCAL 6150859 begin use NEON instructions for SF math */
+/* For NEON, SFmode values are only valid in even registers. */
+#define VFP_REGNO_OK_FOR_SINGLE(REGNUM) \
+ (((REGNUM) <= LAST_LO_VFP_REGNUM) \
+ && (TARGET_NEON ? ((((REGNUM) - FIRST_VFP_REGNUM) & 1) == 0): 1))
+/* APPLE LOCAL 6150859 end use NEON instructions for SF math */
+
+/* DFmode values are only valid in even register pairs. */
+#define VFP_REGNO_OK_FOR_DOUBLE(REGNUM) \
+ ((((REGNUM) - FIRST_VFP_REGNUM) & 1) == 0)
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Neon Quad values must start at a multiple of four registers. */
+#define NEON_REGNO_OK_FOR_QUAD(REGNUM) \
+ ((((REGNUM) - FIRST_VFP_REGNUM) & 3) == 0)
+
+/* Neon structures of vectors must be in even register pairs and there
+ must be enough registers available. Because of various patterns
+ requiring quad registers, we require them to start at a multiple of
+ four. */
+#define NEON_REGNO_OK_FOR_NREGS(REGNUM, N) \
+ ((((REGNUM) - FIRST_VFP_REGNUM) & 3) == 0 \
+ && (LAST_VFP_REGNUM - (REGNUM) >= 2 * (N) - 1))
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* The number of hard registers is 16 ARM + 8 FPA + 1 CC + 1 SFP + 1 AFP. */
+/* + 16 Cirrus registers take us up to 43. */
+/* Intel Wireless MMX Technology registers add 16 + 4 more. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* VFP (VFP3) adds 32 (64) + 1 more. */
+#define FIRST_PSEUDO_REGISTER 128
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+#define DBX_REGISTER_NUMBER(REGNO) arm_dbx_register_number (REGNO)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+
+#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
+#define SUBTARGET_FRAME_POINTER_REQUIRED 0
+#endif
+
+/* APPLE LOCAL begin ARM thumb requires FP */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || SUBTARGET_FRAME_POINTER_REQUIRED \
+ || current_function_calls_builtin_ret_addr \
+ || current_function_calls_builtin_frame_addr \
+ || ! flag_omit_frame_pointer \
+ || (TARGET_THUMB && ! leaf_function_p ()) \
+ || (TARGET_ARM && TARGET_APCS_FRAME && ! leaf_function_p ()) \
+ || (TARGET_ARM && regs_ever_live [LR_REGNUM]))
+/* APPLE LOCAL end ARM thumb requires FP */
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPA regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+/* APPLE LOCAL v7 support. Merge from mainline */ \
+ ((TARGET_32BIT \
+ && REGNO >= FIRST_FPA_REGNUM \
+ && REGNO != FRAME_POINTER_REGNUM \
+ && REGNO != ARG_POINTER_REGNUM) \
+ && !IS_VFP_REGNUM (REGNO) \
+ ? 1 : ARM_NUM_REGS (MODE))
+
+/* Return true if REGNO is suitable for holding a quantity of type MODE. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ arm_hard_regno_mode_ok ((REGNO), (MODE))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* APPLE LOCAL begin 7083296 Build without warnings. */
+/* The VALID_IWMMXT_REG_MODE macro is used in vec-common.md as a predicate so
+ that it is referenced from the generated insn-opinit.c file, which does
+ not include arm-protos.h. Define a separate function to avoid warnings. */
+#define VALID_IWMMXT_REG_MODE(MODE) \
+ (valid_iwmmxt_reg_mode (MODE))
+extern int valid_iwmmxt_reg_mode (int);
+/* APPLE LOCAL end 7083296 Build without warnings. */
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Modes valid for Neon D registers. */
+#define VALID_NEON_DREG_MODE(MODE) \
+ ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \
+ || (MODE) == V2SFmode || (MODE) == DImode)
+
+/* Modes valid for Neon Q registers. */
+#define VALID_NEON_QREG_MODE(MODE) \
+ ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
+ || (MODE) == V4SFmode || (MODE) == V2DImode)
+
+/* Structure modes valid for Neon registers. */
+#define VALID_NEON_STRUCT_MODE(MODE) \
+ ((MODE) == TImode || (MODE) == EImode || (MODE) == OImode \
+ || (MODE) == CImode || (MODE) == XImode)
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ For VFP/VFPv3, allocate caller-saved registers first (D0-D7), then D16-D31,
+ then D8-D15. The reason for doing this is to attempt to reduce register
+ pressure when both single- and double-precision registers are used in a
+ function, but hopefully not force double-precision registers to be
+ callee-saved when it's not necessary. */
+
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 27, 28, 29, 30, 31, 32, 33, 34, \
+ 35, 36, 37, 38, 39, 40, 41, 42, \
+ 43, 44, 45, 46, 47, 48, 49, 50, \
+ 51, 52, 53, 54, 55, 56, 57, 58, \
+ 59, 60, 61, 62, \
+ 24, 25, 26, \
+ 78, 77, 76, 75, 74, 73, 72, 71, \
+ 70, 69, 68, 67, 66, 65, 64, 63, \
+ 95, 96, 97, 98, 99, 100, 101, 102, \
+ 103, 104, 105, 106, 107, 108, 109, 110, \
+ 111, 112, 113, 114, 115, 116, 117, 118, \
+ 119, 120, 121, 122, 123, 124, 125, 126, \
+ 79, 80, 81, 82, 83, 84, 85, 86, \
+ 87, 88, 89, 90, 91, 92, 93, 94, \
+ 127 \
+}
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* APPLE LOCAL begin 5831562 add DIMODE_REG_ALLOC_ORDER */
+#define DIMODE_REG_ALLOC_ORDER \
+{ \
+ 2, 3, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 27, 28, 29, 30, 31, 32, 33, 34, \
+ 35, 36, 37, 38, 39, 40, 41, 42, \
+ 43, 44, 45, 46, 47, 48, 49, 50, \
+ 51, 52, 53, 54, 55, 56, 57, 58, \
+ 59, 60, 61, 62, \
+ 24, 25, 26, \
+ 78, 77, 76, 75, 74, 73, 72, 71, \
+ 70, 69, 68, 67, 66, 65, 64, 63, \
+ 79, 80, 81, 82, 83, 84, 85, 86, \
+ 87, 88, 89, 90, 91, 92, 93, 94, \
+ 95 \
+}
+/* APPLE LOCAL end 5831562 add DIMODE_REG_ALLOC_ORDER */
+
+/* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+#define HARD_REGNO_RENAME_OK(SRC, DST) \
+ (! IS_INTERRUPT (cfun->machine->func_type) || \
+ regs_ever_live[DST])
+
+/* Register and constant classes. */
+
+/* Register classes: used to be simple, just all ARM regs or all FPA regs
+ Now that the Thumb is involved it has become more complicated. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+enum reg_class
+{
+ NO_REGS,
+ FPA_REGS,
+ CIRRUS_REGS,
+ VFP_D0_D7_REGS,
+ VFP_LO_REGS,
+ VFP_HI_REGS,
+ VFP_REGS,
+ IWMMXT_GR_REGS,
+ IWMMXT_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ CC_REG,
+ VFPCC_REG,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPA_REGS", \
+ "CIRRUS_REGS", \
+ "VFP_D0_D7_REGS", \
+ "VFP_LO_REGS", \
+ "VFP_HI_REGS", \
+ "VFP_REGS", \
+ "IWMMXT_GR_REGS", \
+ "IWMMXT_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "CC_REG", \
+ "VFPCC_REG", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x00FF0000, 0x00000000, 0x00000000, 0x00000000 }, /* FPA_REGS */ \
+ { 0xF8000000, 0x000007FF, 0x00000000, 0x00000000 }, /* CIRRUS_REGS */ \
+ { 0x00000000, 0x80000000, 0x00007FFF, 0x00000000 }, /* VFP_D0_D7_REGS */ \
+ { 0x00000000, 0x80000000, 0x7FFFFFFF, 0x00000000 }, /* VFP_LO_REGS */ \
+ { 0x00000000, 0x00000000, 0x80000000, 0x7FFFFFFF }, /* VFP_HI_REGS */ \
+ { 0x00000000, 0x80000000, 0xFFFFFFFF, 0x7FFFFFFF }, /* VFP_REGS */ \
+ { 0x00000000, 0x00007800, 0x00000000, 0x00000000 }, /* IWMMXT_GR_REGS */ \
+ { 0x00000000, 0x7FFF8000, 0x00000000, 0x00000000 }, /* IWMMXT_REGS */ \
+ { 0x000000FF, 0x00000000, 0x00000000, 0x00000000 }, /* LO_REGS */ \
+ { 0x00002000, 0x00000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
+ { 0x000020FF, 0x00000000, 0x00000000, 0x00000000 }, /* BASE_REGS */ \
+ { 0x0000FF00, 0x00000000, 0x00000000, 0x00000000 }, /* HI_REGS */ \
+ { 0x01000000, 0x00000000, 0x00000000, 0x00000000 }, /* CC_REG */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, /* VFPCC_REG */ \
+ { 0x0200FFFF, 0x00000000, 0x00000000, 0x00000000 }, /* GENERAL_REGS */ \
+ { 0xFAFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF } /* ALL_REGS */ \
+}
+
+/* Any of the VFP register classes. */
+#define IS_VFP_CLASS(X) \
+ ((X) == VFP_D0_D7_REGS || (X) == VFP_LO_REGS \
+ || (X) == VFP_HI_REGS || (X) == VFP_REGS)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) arm_regno_class (REGNO)
+
+/* FPA registers can't do subreg as all values are reformatted to internal
+ precision. VFP registers may only be accessed in the mode they
+ were set. */
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? reg_classes_intersect_p (FPA_REGS, (CLASS)) \
+ || reg_classes_intersect_p (VFP_REGS, (CLASS)) \
+ : 0)
+
+/* We need to define this for LO_REGS on thumb. Otherwise we can end up
+ using r0-r4 for function arguments, r7 for the stack frame and don't
+ have enough left over to do doubleword arithmetic. */
+#define CLASS_LIKELY_SPILLED_P(CLASS) \
+ ((TARGET_THUMB && (CLASS) == LO_REGS) \
+ || (CLASS) == CC_REG)
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS (TARGET_THUMB1 ? LO_REGS : GENERAL_REGS)
+#define BASE_REG_CLASS (TARGET_THUMB1 ? LO_REGS : GENERAL_REGS)
+
+/* For the Thumb the high registers cannot be used as base registers
+ when addressing quantities in QI or HI mode; if we don't know the
+ mode, then we must be conservative. */
+#define MODE_BASE_REG_CLASS(MODE) \
+ (TARGET_32BIT ? GENERAL_REGS : \
+ (((MODE) == SImode) ? BASE_REGS : LO_REGS))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* For Thumb we can not support SP+reg addressing, so we return LO_REGS
+ instead of BASE_REGS. */
+#define MODE_BASE_REG_REG_CLASS(MODE) BASE_REG_CLASS
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+#define SMALL_REGISTER_CLASSES TARGET_THUMB1
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS, but for the Thumb core registers and
+ immediate constants we prefer a LO_REGS class or a subset. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) \
+ (TARGET_ARM ? (CLASS) : \
+ ((CLASS) == GENERAL_REGS || (CLASS) == HI_REGS \
+ || (CLASS) == NO_REGS ? LO_REGS : (CLASS)))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Must leave BASE_REGS reloads alone */
+#define THUMB_SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ /* Restrict which direct reloads are allowed for VFP/iWMMXt regs. */ \
+ ((TARGET_VFP && TARGET_HARD_FLOAT \
+ && IS_VFP_CLASS (CLASS)) \
+ ? coproc_secondary_reload_class (MODE, X, FALSE) \
+ : (TARGET_IWMMXT && (CLASS) == IWMMXT_REGS) \
+ ? coproc_secondary_reload_class (MODE, X, TRUE) \
+ : TARGET_32BIT \
+ ? (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS) \
+ : THUMB_SECONDARY_OUTPUT_RELOAD_CLASS (CLASS, MODE, X))
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ /* Restrict which direct reloads are allowed for VFP/iWMMXt regs. */ \
+ ((TARGET_VFP && TARGET_HARD_FLOAT \
+ && IS_VFP_CLASS (CLASS)) \
+ ? coproc_secondary_reload_class (MODE, X, FALSE) : \
+ (TARGET_IWMMXT && (CLASS) == IWMMXT_REGS) ? \
+ coproc_secondary_reload_class (MODE, X, TRUE) : \
+ /* Cannot load constants into Cirrus registers. */ \
+ (TARGET_MAVERICK && TARGET_HARD_FLOAT \
+ && (CLASS) == CIRRUS_REGS \
+ && (CONSTANT_P (X) || GET_CODE (X) == SYMBOL_REF)) \
+ ? GENERAL_REGS : \
+ (TARGET_32BIT ? \
+ (((CLASS) == IWMMXT_REGS || (CLASS) == IWMMXT_GR_REGS) \
+ && CONSTANT_P (X)) \
+ ? GENERAL_REGS : \
+ (((MODE) == HImode && ! arm_arch4 \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS) \
+ : THUMB_SECONDARY_INPUT_RELOAD_CLASS (CLASS, MODE, X)))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define ARM_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND, WIN) \
+ do \
+ { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (MODE == DFmode && TARGET_SOFT_FLOAT)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (TARGET_MAVERICK && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -256 is not a valid offset. */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (MODE == SImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || ((MODE == HImode || MODE == QImode) && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset. */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if ((MODE == HImode || MODE == QImode) && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset. */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT && TARGET_FPA) \
+ /* Need to be careful, -1024 is not a valid offset. */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & (unsigned HOST_WIDE_INT) 0xffffffff) \
+ ^ (unsigned HOST_WIDE_INT) 0x80000000) \
+ - (unsigned HOST_WIDE_INT) 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL, \
+ MODE_BASE_REG_CLASS (MODE), GET_MODE (X), \
+ VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+ } \
+ while (0)
+
+/* XXX If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* ??? This should be safe for thumb2, but we may be able to do better. */
+#define THUMB_LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \
+do { \
+ rtx new_x = thumb_legitimize_reload_address (&X, MODE, OPNUM, TYPE, IND_L); \
+ if (new_x) \
+ { \
+ X = new_x; \
+ goto WIN; \
+ } \
+} while (0)
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_LEVELS, WIN) \
+ if (TARGET_ARM) \
+ ARM_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN); \
+ else \
+ THUMB_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPA regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (((CLASS) == FPA_REGS || (CLASS) == CIRRUS_REGS) ? 1 : ARM_NUM_REGS (MODE))
+
+/* If defined, gives a class of registers that cannot be used as the
+ operand of a SUBREG that changes the mode of the object illegally. */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Moves between FPA_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(MODE, FROM, TO) \
+ (TARGET_32BIT ? \
+ ((FROM) == FPA_REGS && (TO) != FPA_REGS ? 20 : \
+ (FROM) != FPA_REGS && (TO) == FPA_REGS ? 20 : \
+ IS_VFP_CLASS (FROM) && !IS_VFP_CLASS (TO) ? 10 : \
+ !IS_VFP_CLASS (FROM) && IS_VFP_CLASS (TO) ? 10 : \
+ (FROM) == IWMMXT_REGS && (TO) != IWMMXT_REGS ? 4 : \
+ (FROM) != IWMMXT_REGS && (TO) == IWMMXT_REGS ? 4 : \
+ (FROM) == IWMMXT_GR_REGS || (TO) == IWMMXT_GR_REGS ? 20 : \
+ (FROM) == CIRRUS_REGS && (TO) != CIRRUS_REGS ? 20 : \
+ (FROM) != CIRRUS_REGS && (TO) == CIRRUS_REGS ? 20 : \
+ 2) \
+ : \
+ ((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* The amount of scratch space needed by _interwork_{r7,r11}_call_via_rN().
+ When present, it is one word in size, and sits at the top of the frame,
+ between the soft frame pointer and either r7 or r11.
+
+ We only need _interwork_rM_call_via_rN() for -mcaller-super-interworking,
+ and only then if some outgoing arguments are passed on the stack. It would
+ be tempting to also check whether the stack arguments are passed by indirect
+ calls, but there seems to be no reason in principle why a post-reload pass
+ couldn't convert a direct call into an indirect one. */
+#define CALLER_INTERWORKING_SLOT_SIZE \
+ (TARGET_CALLER_INTERWORKING \
+ && current_function_outgoing_args_size != 0 \
+ ? UNITS_PER_WORD : 0)
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly.
+ So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) ROUND_UP_WORD (NPUSHED) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) (TARGET_ARM ? 4 : 0)
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) 0
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_FPA \
+ && GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ ? gen_rtx_REG (MODE, FIRST_FPA_REGNUM) \
+ : TARGET_32BIT && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK \
+ && GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ ? gen_rtx_REG (MODE, FIRST_CIRRUS_FP_REGNUM) \
+ : TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (MODE) \
+ ? gen_rtx_REG (MODE, FIRST_IWMMXT_REGNUM) \
+ : gen_rtx_REG (MODE, ARG_REGISTER (1)))
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ arm_function_value (VALTYPE, FUNC);
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+/* On a Cirrus chip, mvf0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == ARG_REGISTER (1) \
+ || (TARGET_32BIT && ((REGNO) == FIRST_CIRRUS_FP_REGNUM) \
+ && TARGET_HARD_FLOAT_ABI && TARGET_MAVERICK) \
+ || ((REGNO) == FIRST_IWMMXT_REGNUM && TARGET_IWMMXT_ABI) \
+ || (TARGET_32BIT && ((REGNO) == FIRST_FPA_REGNUM) \
+ && TARGET_HARD_FLOAT_ABI && TARGET_FPA))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Amount of memory needed for an untyped call to save all possible return
+ registers. */
+#define APPLY_RESULT_SIZE arm_apply_result_size()
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+/* These bits describe the different types of function supported
+ by the ARM backend. They are exclusive. i.e. a function cannot be both a
+ normal function and an interworked function, for example. Knowing the
+ type of a function is important for determining its prologue and
+ epilogue sequences.
+ Note value 7 is currently unassigned. Also note that the interrupt
+ function types all have bit 2 set, so that they can be tested for easily.
+ Note that 0 is deliberately chosen for ARM_FT_UNKNOWN so that when the
+ machine_function structure is initialized (to zero) func_type will
+ default to unknown. This will force the first use of arm_current_func_type
+ to call arm_compute_func_type. */
+#define ARM_FT_UNKNOWN 0 /* Type has not yet been determined. */
+#define ARM_FT_NORMAL 1 /* Your normal, straightforward function. */
+#define ARM_FT_INTERWORKED 2 /* A function that supports interworking. */
+#define ARM_FT_ISR 4 /* An interrupt service routine. */
+#define ARM_FT_FIQ 5 /* A fast interrupt service routine. */
+#define ARM_FT_EXCEPTION 6 /* An ARM exception handler (subcase of ISR). */
+
+#define ARM_FT_TYPE_MASK ((1 << 3) - 1)
+
+/* In addition functions can have several type modifiers,
+ outlined by these bit masks: */
+#define ARM_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR and above. */
+#define ARM_FT_NAKED (1 << 3) /* No prologue or epilogue. */
+#define ARM_FT_VOLATILE (1 << 4) /* Does not return. */
+#define ARM_FT_NESTED (1 << 5) /* Embedded inside another func. */
+/* APPLE LOCAL v7 support. Merge from mainline */
+#define ARM_FT_STACKALIGN (1 << 6) /* Called with misaligned stack. */
+
+/* Some macros to test these flags. */
+#define ARM_FUNC_TYPE(t) (t & ARM_FT_TYPE_MASK)
+#define IS_INTERRUPT(t) (t & ARM_FT_INTERRUPT)
+#define IS_VOLATILE(t) (t & ARM_FT_VOLATILE)
+#define IS_NAKED(t) (t & ARM_FT_NAKED)
+#define IS_NESTED(t) (t & ARM_FT_NESTED)
+/* APPLE LOCAL v7 support. Merge from mainline */
+#define IS_STACKALIGN(t) (t & ARM_FT_STACKALIGN)
+
+
+/* Structure used to hold the function stack frame layout. Offsets are
+ relative to the stack pointer on function entry. Positive offsets are
+ in the direction of stack growth.
+ Only soft_frame is used in thumb mode. */
+
+typedef struct arm_stack_offsets GTY(())
+{
+ int saved_args; /* ARG_POINTER_REGNUM. */
+ int frame; /* ARM_HARD_FRAME_POINTER_REGNUM. */
+ int saved_regs;
+ int soft_frame; /* FRAME_POINTER_REGNUM. */
+ int locals_base; /* THUMB_HARD_FRAME_POINTER_REGNUM. */
+ int outgoing_args; /* STACK_POINTER_REGNUM. */
+}
+arm_stack_offsets;
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+typedef struct machine_function GTY(())
+{
+ /* Additional stack adjustment in __builtin_eh_throw. */
+ rtx eh_epilogue_sp_ofs;
+ /* Records if LR has to be saved for far jumps. */
+ int far_jump_used;
+ /* Records if ARG_POINTER was ever live. */
+ int arg_pointer_live;
+ /* Records if the save of LR has been eliminated. */
+ int lr_save_eliminated;
+ /* The size of the stack frame. Only valid after reload. */
+ arm_stack_offsets stack_offsets;
+ /* Records the type of the current function. */
+ unsigned long func_type;
+ /* Record if the function has a variable argument list. */
+ int uses_anonymous_args;
+ /* Records if sibcalls are blocked because an argument
+ register is needed to preserve stack alignment. */
+ int sibcall_blocked;
+ /* The PIC register for this function. This might be a pseudo. */
+ rtx pic_reg;
+ /* Labels for per-function Thumb call-via stubs. One per potential calling
+ register. We can never call via LR or PC. We can call via SP if a
+ trampoline happens to be on the top of the stack. */
+ rtx call_via[14];
+}
+machine_function;
+
+/* As in the machine_function, a global set of call-via labels, for code
+ that is in text_section. */
+extern GTY(()) rtx thumb_call_via_label[14];
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* This is the number of iWMMXt register arguments scanned so far. */
+ int iwmmxt_nregs;
+ int named_count;
+ int nargs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT. */
+ int call_cookie;
+ int can_split;
+} CUMULATIVE_ARGS;
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since TARGET_SETUP_INCOMING_VARARGS is
+ defined), say it is passed in the stack (function_prologue will
+ indeed make it pass in the stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ arm_function_arg (&(CUM), (MODE), (TYPE), (NAMED))
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ (arm_pad_arg_upward (MODE, TYPE) ? upward : downward)
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (arm_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward)
+
+/* For AAPCS, padding should never be below the argument. For other ABIs,
+ * mimic the default. */
+#define PAD_VARARGS_DOWN \
+ ((TARGET_AAPCS_BASED) ? 0 : BYTES_BIG_ENDIAN)
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ arm_init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nargs += 1; \
+ if (arm_vector_mode_supported_p (MODE) \
+ && (CUM).named_count > (CUM).nargs \
+ && TARGET_IWMMXT_ABI) \
+ (CUM).iwmmxt_nregs += 1; \
+ else \
+ (CUM).nregs += ARM_NUM_REGS2 (MODE, TYPE)
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* If defined, a C expression that gives the alignment boundary, in bits, of an
+ argument with the specified mode and type. If it is not defined,
+ `PARM_BOUNDARY' is used for all arguments. */
+#define FUNCTION_ARG_BOUNDARY(MODE,TYPE) \
+ ((ARM_DOUBLEWORD_ALIGN && arm_needs_doubleword_align (MODE, TYPE)) \
+ ? DOUBLEWORD_ALIGNMENT \
+ : PARM_BOUNDARY )
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ (IN_RANGE ((REGNO), 0, 3) \
+ || (TARGET_IWMMXT_ABI \
+ && IN_RANGE ((REGNO), FIRST_IWMMXT_REGNUM, FIRST_IWMMXT_REGNUM + 9)))
+
+
+/* If your target environment doesn't prefix user functions with an
+ underscore, you may wish to re-define this to prevent any conflicts.
+ e.g. AOF may prefix mcount with an underscore. */
+#ifndef ARM_MCOUNT_NAME
+#define ARM_MCOUNT_NAME "*mcount"
+#endif
+
+/* Call the function profiler with a given profile label. The Acorn
+ compiler puts this BEFORE the prolog but gcc puts it afterwards.
+ On the ARM the full profile code will look like:
+ .data
+ LP1
+ .word 0
+ .text
+ mov ip, lr
+ bl mcount
+ .word LP1
+
+ profile_function() in final.c outputs the .data section, FUNCTION_PROFILER
+ will output the .text section.
+
+ The ``mov ip,lr'' seems like a good idea to stick with cc convention.
+ ``prof'' doesn't seem to mind about this!
+
+ Note - this version of the code is designed to work in both ARM and
+ Thumb modes. */
+#ifndef ARM_FUNCTION_PROFILER
+#define ARM_FUNCTION_PROFILER(STREAM, LABELNO) \
+{ \
+ char temp[20]; \
+ rtx sym; \
+ \
+ asm_fprintf (STREAM, "\tmov\t%r, %r\n\tbl\t", \
+ IP_REGNUM, LR_REGNUM); \
+ assemble_name (STREAM, ARM_MCOUNT_NAME); \
+ fputc ('\n', STREAM); \
+ ASM_GENERATE_INTERNAL_LABEL (temp, "LP", LABELNO); \
+ sym = gen_rtx_SYMBOL_REF (Pmode, temp); \
+ assemble_aligned_integer (UNITS_PER_WORD, sym); \
+}
+#endif
+
+#ifdef THUMB_FUNCTION_PROFILER
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+ if (TARGET_ARM) \
+ ARM_FUNCTION_PROFILER (STREAM, LABELNO) \
+ else \
+ THUMB_FUNCTION_PROFILER (STREAM, LABELNO)
+#else
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+ ARM_FUNCTION_PROFILER (STREAM, LABELNO)
+#endif
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+#define EPILOGUE_USES(REGNO) (reload_completed && (REGNO) == LR_REGNUM)
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* This is disabled for Thumb-2 because it will confuse the
+ conditional insn counter. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#define USE_RETURN_INSN(ISCOND) \
+ (TARGET_ARM ? use_return_insn (ISCOND, NULL) : 0)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. Note we have to use {ARM|THUMB}_HARD_FRAME_POINTER_REGNUM
+ because the definition of HARD_FRAME_POINTER_REGNUM is not a constant. */
+
+/* APPLE LOCAL begin ARM custom frame layout */
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },\
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM },\
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM },\
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }}
+/* APPLE LOCAL end ARM custom frame layout */
+
+/* Given FROM and TO register numbers, say whether this elimination is
+ allowed. Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM or
+ ARG_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == FRAME_POINTER_REGNUM && (FROM) == ARG_POINTER_REGNUM) ? 0 : \
+ /* APPLE LOCAL begin ARM prefer SP to FP */ \
+ ((TO) == STACK_POINTER_REGNUM \
+ && !current_function_sp_is_unchanging) ? 0 : \
+ /* APPLE LOCAL end ARM prefer SP to FP */ \
+ /* APPLE LOCAL ARM custom frame layout */ \
+ /* Removed lines. */ \
+ 1)
+
+/* Define the offset between two registers, one to be eliminated, and the
+ other its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ if (TARGET_ARM) \
+ (OFFSET) = arm_compute_initial_elimination_offset (FROM, TO); \
+ else \
+ (OFFSET) = thumb_compute_initial_elimination_offset (FROM, TO)
+
+/* Special case handling of the location of arguments passed on the stack. */
+/* APPLE LOCAL ARM prefer SP to FP */
+#define DEBUGGER_ARG_OFFSET(value, addr) arm_debugger_arg_offset (value, addr)
+
+/* Initialize data used by insn expanders. This is called from insn_emit,
+ once for every function before code is generated. */
+#define INIT_EXPANDERS arm_init_expanders ()
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ XXX FIXME: When the trampoline returns, r8 will be clobbered. */
+#define ARM_TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ asm_fprintf (FILE, "\tldr\t%r, [%r, #0]\n", \
+ STATIC_CHAIN_REGNUM, PC_REGNUM); \
+ asm_fprintf (FILE, "\tldr\t%r, [%r, #0]\n", \
+ PC_REGNUM, PC_REGNUM); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+}
+
+/* APPLE LOCAL begin ARM MACH assembler */
+#define DOT_WORD ".word"
+/* APPLE LOCAL end ARM MACH assembler */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* The Thumb-2 trampoline is similar to the arm implementation.
+ Unlike 16-bit Thumb, we enter the stub in thumb mode. */
+#define THUMB2_TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ asm_fprintf (FILE, "\tldr.w\t%r, [%r, #4]\n", \
+ STATIC_CHAIN_REGNUM, PC_REGNUM); \
+ asm_fprintf (FILE, "\tldr.w\t%r, [%r, #4]\n", \
+ PC_REGNUM, PC_REGNUM); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+}
+
+#define THUMB1_TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ ASM_OUTPUT_ALIGN(FILE, 2); \
+ fprintf (FILE, "\t.code\t16\n"); \
+ fprintf (FILE, ".Ltrampoline_start:\n"); \
+ asm_fprintf (FILE, "\tpush\t{r0, r1}\n"); \
+ asm_fprintf (FILE, "\tldr\tr0, [%r, #8]\n", \
+ PC_REGNUM); \
+ asm_fprintf (FILE, "\tmov\t%r, r0\n", \
+ STATIC_CHAIN_REGNUM); \
+ asm_fprintf (FILE, "\tldr\tr0, [%r, #8]\n", \
+ PC_REGNUM); \
+ asm_fprintf (FILE, "\tstr\tr0, [%r, #4]\n", \
+ SP_REGNUM); \
+ asm_fprintf (FILE, "\tpop\t{r0, %r}\n", \
+ PC_REGNUM); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+ assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
+}
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+ if (TARGET_ARM) \
+ ARM_TRAMPOLINE_TEMPLATE (FILE) \
+ else if (TARGET_THUMB2) \
+ THUMB2_TRAMPOLINE_TEMPLATE (FILE) \
+ else \
+ THUMB1_TRAMPOLINE_TEMPLATE (FILE)
+
+/* Thumb trampolines should be entered in thumb mode, so set the bottom bit
+ of the address. */
+#define TRAMPOLINE_ADJUST_ADDRESS(ADDR) do \
+{ \
+ if (TARGET_THUMB) \
+ (ADDR) = expand_simple_binop (Pmode, IOR, (ADDR), GEN_INT(1), \
+ gen_reg_rtx (Pmode), 0, OPTAB_LIB_WIDEN); \
+} while(0)
+
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Length in units of the trampoline for entering a nested function. */
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+#define TRAMPOLINE_SIZE (TARGET_32BIT ? 16 : 20)
+
+/* Alignment required for a trampoline in bits. */
+#define TRAMPOLINE_ALIGNMENT 32
+
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#ifndef INITIALIZE_TRAMPOLINE
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx_MEM (SImode, \
+ plus_constant (TRAMP, \
+ TARGET_32BIT ? 8 : 12)), \
+ CXT); \
+ emit_move_insn (gen_rtx_MEM (SImode, \
+ plus_constant (TRAMP, \
+ TARGET_32BIT ? 12 : 16)), \
+ FNADDR); \
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"), \
+ 0, VOIDmode, 2, TRAMP, Pmode, \
+ plus_constant (TRAMP, TRAMPOLINE_SIZE), Pmode); \
+}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#endif
+
+
+/* Addressing modes, and classification of registers for them. */
+#define HAVE_POST_INCREMENT 1
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define HAVE_PRE_INCREMENT TARGET_32BIT
+#define HAVE_POST_DECREMENT TARGET_32BIT
+#define HAVE_PRE_DECREMENT TARGET_32BIT
+#define HAVE_PRE_MODIFY_DISP TARGET_32BIT
+#define HAVE_POST_MODIFY_DISP TARGET_32BIT
+#define HAVE_PRE_MODIFY_REG TARGET_32BIT
+#define HAVE_POST_MODIFY_REG TARGET_32BIT
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+#define TEST_REGNO(R, TEST, VALUE) \
+ ((R TEST VALUE) || ((unsigned) reg_renumber[R] TEST VALUE))
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Don't allow the pc to be used. */
+#define ARM_REGNO_OK_FOR_BASE_P(REGNO) \
+ (TEST_REGNO (REGNO, <, PC_REGNUM) \
+ || TEST_REGNO (REGNO, ==, FRAME_POINTER_REGNUM) \
+ || TEST_REGNO (REGNO, ==, ARG_POINTER_REGNUM))
+
+#define THUMB1_REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ (TEST_REGNO (REGNO, <=, LAST_LO_REGNUM) \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && TEST_REGNO (REGNO, ==, STACK_POINTER_REGNUM)))
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ (TARGET_THUMB1 \
+ ? THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO, MODE) \
+ : ARM_REGNO_OK_FOR_BASE_P (REGNO))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Nonzero if X can be the base register in a reg+reg addressing mode.
+ For Thumb, we can not use SP + reg, so reject SP. */
+#define REGNO_MODE_OK_FOR_REG_BASE_P(X, MODE) \
+ REGNO_OK_FOR_INDEX_P (X)
+
+/* For ARM code, we don't care about the mode, but for Thumb, the index
+ must be suitable for use in a QImode load. */
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO, QImode)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* ??? Should the TARGET_ARM here also apply to thumb2? */
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (TARGET_ARM && optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX.
+
+ When generating pic allow anything. */
+#define ARM_LEGITIMATE_CONSTANT_P(X) (flag_pic || ! label_mentioned_p (X))
+
+#define THUMB_LEGITIMATE_CONSTANT_P(X) \
+ ( GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X) \
+ || flag_pic)
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define LEGITIMATE_CONSTANT_P(X) \
+ (!arm_tls_referenced_p (X) \
+ && (TARGET_32BIT ? ARM_LEGITIMATE_CONSTANT_P (X) \
+ : THUMB_LEGITIMATE_CONSTANT_P (X)))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* APPLE LOCAL begin ARM longcall */
+#define SYMBOL_SHORT_CALL ((SYMBOL_FLAG_MACH_DEP) << 3)
+#define SYMBOL_LONG_CALL ((SYMBOL_FLAG_MACH_DEP) << 4)
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define SYMBOL_SHORT_CALL_ATTR_P(SYMBOL) \
+ (SYMBOL_REF_FLAGS (SYMBOL) & SYMBOL_SHORT_CALL)
+
+#define SYMBOL_LONG_CALL_ATTR_P(SYMBOL) \
+ (SYMBOL_REF_FLAGS (SYMBOL) & SYMBOL_LONG_CALL)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragment for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+/* APPLE LOCAL end ARM longcall */
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ arm_asm_output_labelref (FILE, NAME)
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Output IT instructions for conditonally executed Thumb-2 instructions. */
+#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
+ if (TARGET_THUMB2) \
+ thumb2_asm_output_opcode (STREAM);
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* The EABI specifies that constructors should go in .init_array.
+ Other targets use .ctors for compatibility. */
+#ifndef ARM_EABI_CTORS_SECTION_OP
+#define ARM_EABI_CTORS_SECTION_OP \
+ "\t.section\t.init_array,\"aw\",%init_array"
+#endif
+#ifndef ARM_EABI_DTORS_SECTION_OP
+#define ARM_EABI_DTORS_SECTION_OP \
+ "\t.section\t.fini_array,\"aw\",%fini_array"
+#endif
+#define ARM_CTORS_SECTION_OP \
+ "\t.section\t.ctors,\"aw\",%progbits"
+#define ARM_DTORS_SECTION_OP \
+ "\t.section\t.dtors,\"aw\",%progbits"
+
+/* Define CTORS_SECTION_ASM_OP. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
+#ifndef IN_LIBGCC2
+# define CTORS_SECTION_ASM_OP \
+ (TARGET_AAPCS_BASED ? ARM_EABI_CTORS_SECTION_OP : ARM_CTORS_SECTION_OP)
+# define DTORS_SECTION_ASM_OP \
+ (TARGET_AAPCS_BASED ? ARM_EABI_DTORS_SECTION_OP : ARM_DTORS_SECTION_OP)
+#else /* !defined (IN_LIBGCC2) */
+/* In libgcc, CTORS_SECTION_ASM_OP must be a compile-time constant,
+ so we cannot use the definition above. */
+# ifdef __ARM_EABI__
+/* The .ctors section is not part of the EABI, so we do not define
+ CTORS_SECTION_ASM_OP when in libgcc; that prevents crtstuff
+ from trying to use it. We do define it when doing normal
+ compilation, as .init_array can be used instead of .ctors. */
+/* There is no need to emit begin or end markers when using
+ init_array; the dynamic linker will compute the size of the
+ array itself based on special symbols created by the static
+ linker. However, we do need to arrange to set up
+ exception-handling here. */
+# define CTOR_LIST_BEGIN asm (ARM_EABI_CTORS_SECTION_OP)
+# define CTOR_LIST_END /* empty */
+# define DTOR_LIST_BEGIN asm (ARM_EABI_DTORS_SECTION_OP)
+# define DTOR_LIST_END /* empty */
+# else /* !defined (__ARM_EABI__) */
+# define CTORS_SECTION_ASM_OP ARM_CTORS_SECTION_OP
+# define DTORS_SECTION_ASM_OP ARM_DTORS_SECTION_OP
+# endif /* !defined (__ARM_EABI__) */
+#endif /* !defined (IN_LIBCC2) */
+
+/* True if the operating system can merge entities with vague linkage
+ (e.g., symbols in COMDAT group) during dynamic linking. */
+#ifndef TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P
+#define TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P true
+#endif
+
+/* Set the short-call flag for any function compiled in the current
+ compilation unit. We skip this for functions with the section
+ attribute when long-calls are in effect as this tells the compiler
+ that the section might be placed a long way from the caller.
+ See arm_is_longcall_p() for more information. */
+/* APPLE LOCAL begin ARM longcall */
+#define ARM_DECLARE_FUNCTION_SIZE(STREAM, NAME, DECL) \
+ if (!TARGET_LONG_CALLS || ! DECL_SECTION_NAME (DECL)) \
+ arm_encode_call_attribute (DECL, SYMBOL_SHORT_CALL)
+/* APPLE LOCAL end ARM longcall */
+
+#define ARM_OUTPUT_FN_UNWIND(F, PROLOGUE) arm_output_fn_unwind (F, PROLOGUE)
+
+#ifdef TARGET_UNWIND_INFO
+#define ARM_EABI_UNWIND_TABLES \
+ ((!USING_SJLJ_EXCEPTIONS && flag_exceptions) || flag_unwind_tables)
+#else
+#define ARM_EABI_UNWIND_TABLES 0
+#endif
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+ Thumb-2 has the same restictions as arm. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#ifndef REG_OK_STRICT
+
+#define ARM_REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) <= LAST_ARM_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM \
+ || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+#define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ (REGNO (X) <= LAST_LO_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == hard_frame_pointer_rtx \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_STRICT_P 0
+
+#else /* REG_OK_STRICT */
+
+#define ARM_REG_OK_FOR_BASE_P(X) \
+ ARM_REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+#define REG_STRICT_P 1
+
+#endif /* REG_OK_STRICT */
+
+/* Now define some helpers in terms of the above. */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ (TARGET_THUMB1 \
+ ? THUMB1_REG_MODE_OK_FOR_BASE_P (X, MODE) \
+ : ARM_REG_OK_FOR_BASE_P (X))
+
+#define ARM_REG_OK_FOR_INDEX_P(X) ARM_REG_OK_FOR_BASE_P (X)
+
+/* For 16-bit Thumb, a valid index register is anything that can be used in
+ a byte load instruction. */
+#define THUMB1_REG_OK_FOR_INDEX_P(X) \
+ THUMB1_REG_MODE_OK_FOR_BASE_P (X, QImode)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. On the Thumb, the stack pointer
+ is not suitable. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (TARGET_THUMB1 \
+ ? THUMB1_REG_OK_FOR_INDEX_P (X) \
+ : ARM_REG_OK_FOR_INDEX_P (X))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Nonzero if X can be the base register in a reg+reg addressing mode.
+ For Thumb, we can not use SP + reg, so reject SP. */
+#define REG_MODE_OK_FOR_REG_BASE_P(X, MODE) \
+ REG_OK_FOR_INDEX_P (X)
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address. */
+
+#define ARM_BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && ARM_REG_OK_FOR_BASE_P (X))
+
+#define ARM_INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && ARM_REG_OK_FOR_INDEX_P (X))
+
+#define ARM_GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+ { \
+ if (arm_legitimate_address_p (MODE, X, SET, REG_STRICT_P)) \
+ goto WIN; \
+ }
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define THUMB2_GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+ { \
+ if (thumb2_legitimate_address_p (MODE, X, REG_STRICT_P)) \
+ goto WIN; \
+ }
+
+#define THUMB1_GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+ { \
+ if (thumb1_legitimate_address_p (MODE, X, REG_STRICT_P)) \
+ goto WIN; \
+ }
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, WIN) \
+ if (TARGET_ARM) \
+ ARM_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN) \
+ else if (TARGET_THUMB2) \
+ THUMB2_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN) \
+ else /* if (TARGET_THUMB1) */ \
+ THUMB1_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address. */
+#define ARM_LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+do { \
+ X = arm_legitimize_address (X, OLDX, MODE); \
+} while (0)
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* ??? Implement LEGITIMIZE_ADDRESS for thumb2. */
+#define THUMB2_LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+do { \
+} while (0)
+
+#define THUMB1_LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+do { \
+ X = thumb_legitimize_address (X, OLDX, MODE); \
+} while (0)
+
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+do { \
+ if (TARGET_ARM) \
+ ARM_LEGITIMIZE_ADDRESS (X, OLDX, MODE, WIN); \
+ else if (TARGET_THUMB2) \
+ THUMB2_LEGITIMIZE_ADDRESS (X, OLDX, MODE, WIN); \
+ else \
+ THUMB1_LEGITIMIZE_ADDRESS (X, OLDX, MODE, WIN); \
+ \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+} while (0)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define ARM_GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
+{ \
+ if ( GET_CODE (ADDR) == PRE_DEC || GET_CODE (ADDR) == POST_DEC \
+ || GET_CODE (ADDR) == PRE_INC || GET_CODE (ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Nothing helpful to do for the Thumb */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
+/* APPLE LOCAL v7 support. Merge from mainline */ \
+ if (TARGET_32BIT) \
+ ARM_GO_IF_MODE_DEPENDENT_ADDRESS (ADDR, LABEL)
+
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+/* APPLE LOCAL begin ARM compact switch tables */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define CASE_VECTOR_PC_RELATIVE (TARGET_THUMB || TARGET_THUMB2)
+
+#define CASE_VECTOR_SHORTEN_MODE(MIN_OFFSET, MAX_OFFSET, BODY) \
+((TARGET_ARM || \
+ (TARGET_THUMB2 && (MIN_OFFSET < 0 || MAX_OFFSET >= 0x2000))) ? SImode \
+ : TARGET_THUMB2 ? ((MAX_OFFSET >= 0x200) ? HImode : QImode) \
+ : !TARGET_COMPACT_SWITCH_TABLES ? SImode \
+ /* TARGET_THUMB1 */ \
+ : (MIN_OFFSET) >= -256 && (MAX_OFFSET) <= 254 \
+ ? (ADDR_DIFF_VEC_FLAGS (BODY).offset_unsigned = 0, QImode) \
+ : (MIN_OFFSET) >= 0 && (MAX_OFFSET) <= 510 \
+ ? (ADDR_DIFF_VEC_FLAGS (BODY).offset_unsigned = 1, QImode) \
+ : (MIN_OFFSET) >= -65536 && (MAX_OFFSET) <= 65534 \
+ ? (ADDR_DIFF_VEC_FLAGS (BODY).offset_unsigned = 0, HImode) \
+ : SImode)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+
+/* This macro uses variable "file" that exists at
+ the single place it is invoked, in final.c. INSN_ADDRESSES
+ and INSN_UID also expand to variables visible at that point,
+ but not everywhere. Ewww.
+ Table in RTL includes default target as the last element (via
+ local change in stmt.c). Table in .s file additionally includes
+ count as first element, count does not include the last element.
+ All that is dealt with here. */
+
+#define ASM_OUTPUT_ADDR_DIFF_VEC(LABEL, BODY) \
+ arm_asm_output_addr_diff_vec (file, LABEL, BODY)
+
+/* This is identical to the default code when ASM_OUTPUT_ADDR_VEC is
+ not defined; however, final_scan_insn() will not invoke that
+ code when ASM_OUTPUT_ADDR_DIFF_VEC is defined. In other words
+ if one of these is defined the other must be also, assuming you
+ want to use both kinds of tables in different circumstances.
+ Grr. This requirement is undocumented. */
+
+#define ASM_OUTPUT_ADDR_VEC(LABEL, BODY) \
+do \
+ { \
+ int vlen = XVECLEN (BODY, 0); \
+ int idx; \
+ if (GET_CODE (BODY) != ADDR_VEC) \
+ gcc_unreachable (); \
+ for (idx = 0; idx < vlen; idx++) \
+ { \
+ ASM_OUTPUT_ADDR_VEC_ELT \
+ (file, CODE_LABEL_NUMBER (XEXP \
+ (XVECEXP (BODY, 0, idx), 0))); \
+ } \
+ } \
+while (0)
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+#undef MOVE_RATIO
+#define MOVE_RATIO (arm_tune_xscale ? 4 : 2)
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ (TARGET_THUMB ? ZERO_EXTEND : \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)))
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* APPLE LOCAL begin DImode multiply enhancement */
+/* Enable a new optimization in combine.c, see there. */
+#define COMBINE_TRY_RETAIN 1
+/* APPLE LOCAL end DImode multiply enhancement */
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* APPLE LOCAL begin ARM enhance conditional insn generation */
+/* A C expression to modify the code described by the conditional if
+ information CE_INFO, for the basic block BB, possibly updating the tests in
+ TRUE_EXPR, and FALSE_EXPR for converting the && and || parts of if-then or
+ if-then-else code to conditional instructions. OLD_TRUE and OLD_FALSE are
+ the previous tests. Set either TRUE_EXPR or FALSE_EXPR to a null pointer if
+ the tests cannot be converted. */
+#define IFCVT_MODIFY_MULTIPLE_TESTS(CE_INFO, BB, TRUE_EXPR, FALSE_EXPR) \
+arm_ifcvt_modify_multiple_tests (CE_INFO, BB, &TRUE_EXPR, &FALSE_EXPR)
+/* APPLE LOCAL end ARM enhance conditional insn generation */
+
+#define ARM_FRAME_RTX(X) \
+ ( (X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+/* Moves to and from memory are quite expensive */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define MEMORY_MOVE_COST(M, CLASS, IN) \
+ (TARGET_32BIT ? 10 : \
+ ((GET_MODE_SIZE (M) < 4 ? 8 : 2 * GET_MODE_SIZE (M)) \
+ * (CLASS == LO_REGS ? 1 : 2)))
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST \
+ (TARGET_32BIT ? 4 : (optimize > 0 ? 2 : 0))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern unsigned arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+/* We can't directly access anything that contains a symbol,
+ nor can we indirect via the constant pool. One exception is
+ UNSPEC_TLS, which is always PIC. */
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ (!(symbol_mentioned_p (X) \
+ || label_mentioned_p (X) \
+ || (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X) \
+ && (symbol_mentioned_p (get_pool_constant (X)) \
+ || label_mentioned_p (get_pool_constant (X))))) \
+ || tls_mentioned_p (X))
+
+/* APPLE LOCAL begin ARM -mdynamic-no-pic support */
+#define LEGITIMATE_DYNAMIC_NO_PIC_OPERAND_P(X) \
+ (! non_local_symbol_mentioned_p (X))
+
+/* Unfortunately, the places where LEGITIMATE_PIC_OPERAND_P appear in
+ the source code are potential hazards for -mdynamic-no-pic, too.
+ This macro is similar in usage to LEGITIMATE_PIC_OPERAND_P, but it
+ doesn't assume flag_pic is set. */
+#define LEGITIMATE_INDIRECT_OPERAND_P(X) \
+ ((! flag_pic || LEGITIMATE_PIC_OPERAND_P(X)) \
+ && (! MACHO_DYNAMIC_NO_PIC_P \
+ || LEGITIMATE_DYNAMIC_NO_PIC_OPERAND_P(X)))
+/* APPLE LOCAL end ARM -mdynamic-no-pic support */
+
+/* We need to know when we are making a constant pool; this determines
+ whether data needs to be in the GOT or can be referenced via a GOT
+ offset. */
+extern int making_const_table;
+
+/* Handle pragmas for compatibility with Intel's compilers. */
+#define REGISTER_TARGET_PRAGMAS() do { \
+ c_register_pragma (0, "long_calls", arm_pr_long_calls); \
+ c_register_pragma (0, "no_long_calls", arm_pr_no_long_calls); \
+ c_register_pragma (0, "long_calls_off", arm_pr_long_calls_off); \
+} while (0)
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+
+#define SELECT_CC_MODE(OP, X, Y) arm_select_cc_mode (OP, X, Y)
+
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+#define REVERSE_CONDITION(CODE,MODE) \
+ (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
+ ? reverse_condition_maybe_unordered (code) \
+ : reverse_condition (code))
+
+#define CANONICALIZE_COMPARISON(CODE, OP0, OP1) \
+ do \
+ { \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), GET_MODE (OP0), \
+ &const_op); \
+ OP1 = const_op; \
+ } \
+ } \
+ while (0)
+
+/* The arm5 clz instruction returns 32. */
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
+
+#undef ASM_APP_OFF
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define ASM_APP_OFF (TARGET_THUMB1 ? "\t.code\t16\n" : \
+ TARGET_THUMB2 ? "\t.thumb\n" : "")
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \
+ do \
+ { \
+ if (TARGET_ARM) \
+ asm_fprintf (STREAM,"\tstmfd\t%r!,{%r}\n", \
+ STACK_POINTER_REGNUM, REGNO); \
+ else \
+ asm_fprintf (STREAM, "\tpush {%r}\n", REGNO); \
+ } while (0)
+
+
+#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \
+ do \
+ { \
+ if (TARGET_ARM) \
+ asm_fprintf (STREAM, "\tldmfd\t%r!,{%r}\n", \
+ STACK_POINTER_REGNUM, REGNO); \
+ else \
+ asm_fprintf (STREAM, "\tpop {%r}\n", REGNO); \
+ } while (0)
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
+#define ADDR_VEC_ALIGN(JUMPTABLE) 0
+
+/* This is how to output a label which precedes a jumptable. Since
+ Thumb instructions are 2 bytes, we may need explicit alignment here. */
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
+ do \
+ { \
+ if (TARGET_THUMB && GET_MODE (PATTERN (JUMPTABLE)) == SImode) \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
+ } \
+ while (0)
+
+/* Make sure subsequent insns are aligned after a TBB. */
+#define ASM_OUTPUT_CASE_END(FILE, NUM, JUMPTABLE) \
+ do \
+ { \
+ if (GET_MODE (PATTERN (JUMPTABLE)) == QImode) \
+ ASM_OUTPUT_ALIGN (FILE, 1); \
+ } \
+ while (0)
+
+#define ARM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ if (TARGET_THUMB) \
+ { \
+ if (is_called_in_ARM_mode (DECL) \
+ || (TARGET_THUMB1 && !TARGET_THUMB1_ONLY \
+ && current_function_is_thunk)) \
+ { \
+ fprintf (STREAM, "\t.align 2\n") ; \
+ fprintf (STREAM, "\t.code 32\n") ; \
+ } \
+ else \
+/* APPLE LOCAL begin ARM thumb_func <symbol_name> */ \
+ { \
+ if (TARGET_THUMB1) \
+ fputs ("\t.code 16\n", STREAM); \
+ else \
+ fputs ("\t.thumb\n", STREAM); \
+ fputs ("\t.thumb_func ", STREAM); \
+ if (TARGET_MACHO) \
+ assemble_name (STREAM, (char *) NAME); \
+ putc ('\n', STREAM); \
+} \
+/* APPLE LOCAL end ARM thumb_func <symbol_name> */ \
+ } \
+ if (TARGET_POKE_FUNCTION_NAME) \
+ arm_poke_function_name (STREAM, (char *) NAME); \
+ } \
+ while (0)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* For aliases of functions we use .thumb_set instead. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL1, DECL2) \
+ do \
+ { \
+ const char *const LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ const char *const LABEL2 = IDENTIFIER_POINTER (DECL2); \
+ \
+ if (TARGET_THUMB && TREE_CODE (DECL1) == FUNCTION_DECL) \
+ { \
+ fprintf (FILE, "\t.thumb_set "); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+ else \
+ ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
+ } \
+ while (0)
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* To support -falign-* switches we need to use .p2align so
+ that alignment directives in code sections will be padded
+ with no-op instructions, rather than zeroes. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", \
+ (int) (LOG), (int) (MAX_SKIP)); \
+ }
+#endif
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimizing. For Thumb-2 check if any IT instructions need
+ outputting. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (TARGET_ARM && optimize) \
+ arm_final_prescan_insn (INSN); \
+ else if (TARGET_THUMB2) \
+ thumb2_final_prescan_insn (INSN); \
+ else if (TARGET_THUMB1) \
+ thumb1_final_prescan_insn (INSN)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ (CODE == '@' || CODE == '|' || CODE == '.' \
+ || CODE == '~' || CODE == '#' \
+ || CODE == '(' || CODE == ')' \
+ || (TARGET_32BIT && (CODE == '?')) \
+ || (TARGET_THUMB2 && (CODE == '!')) \
+ || (TARGET_THUMB && (CODE == '_')))
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (unsigned HOST_WIDE_INT) (x) \
+ : ((((unsigned HOST_WIDE_INT)(x)) & (unsigned HOST_WIDE_INT) 0xffffffff) |\
+ ((((unsigned HOST_WIDE_INT)(x)) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (unsigned HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define ARM_PRINT_OPERAND_ADDRESS(STREAM, X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ /* APPLE LOCAL 6258536 Atomic builtins */ \
+ asm_fprintf (STREAM, "[%r]", REGNO (X)); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register. */ \
+ /* (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ asm_fprintf (STREAM, "[%r, #%wd]", \
+ REGNO (base), offset); \
+ break; \
+ \
+ case REG: \
+ asm_fprintf (STREAM, "[%r, %s%r]", \
+ REGNO (base), is_minus ? "-" : "", \
+ REGNO (index)); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ asm_fprintf (STREAM, "[%r, %s%r", \
+ REGNO (base), is_minus ? "-" : "", \
+ REGNO (XEXP (index, 0))); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ gcc_unreachable (); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern enum machine_mode output_memory_reference_mode; \
+ \
+ gcc_assert (GET_CODE (XEXP (X, 0)) == REG); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ asm_fprintf (STREAM, "[%r, #%s%d]!", \
+ REGNO (XEXP (X, 0)), \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ asm_fprintf (STREAM, "[%r], #%s%d", \
+ REGNO (XEXP (X, 0)), \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else if (GET_CODE (X) == PRE_MODIFY) \
+ { \
+ asm_fprintf (STREAM, "[%r, ", REGNO (XEXP (X, 0))); \
+ if (GET_CODE (XEXP (XEXP (X, 1), 1)) == CONST_INT) \
+ asm_fprintf (STREAM, "#%wd]!", \
+ INTVAL (XEXP (XEXP (X, 1), 1))); \
+ else \
+ asm_fprintf (STREAM, "%r]!", \
+ REGNO (XEXP (XEXP (X, 1), 1))); \
+ } \
+ else if (GET_CODE (X) == POST_MODIFY) \
+ { \
+ asm_fprintf (STREAM, "[%r], ", REGNO (XEXP (X, 0))); \
+ if (GET_CODE (XEXP (XEXP (X, 1), 1)) == CONST_INT) \
+ asm_fprintf (STREAM, "#%wd", \
+ INTVAL (XEXP (XEXP (X, 1), 1))); \
+ else \
+ asm_fprintf (STREAM, "%r", \
+ REGNO (XEXP (XEXP (X, 1), 1))); \
+ } \
+ else output_addr_const (STREAM, X); \
+}
+
+#define THUMB_PRINT_OPERAND_ADDRESS(STREAM, X) \
+{ \
+ if (GET_CODE (X) == REG) \
+ asm_fprintf (STREAM, "[%r]", REGNO (X)); \
+ else if (GET_CODE (X) == POST_INC) \
+ asm_fprintf (STREAM, "%r!", REGNO (XEXP (X, 0))); \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ gcc_assert (GET_CODE (XEXP (X, 0)) == REG); \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ asm_fprintf (STREAM, "[%r, #%wd]", \
+ REGNO (XEXP (X, 0)), \
+ INTVAL (XEXP (X, 1))); \
+ else \
+ asm_fprintf (STREAM, "[%r, %r]", \
+ REGNO (XEXP (X, 0)), \
+ REGNO (XEXP (X, 1))); \
+ } \
+ else \
+ output_addr_const (STREAM, X); \
+}
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define PRINT_OPERAND_ADDRESS(STREAM, X) \
+ if (TARGET_32BIT) \
+ ARM_PRINT_OPERAND_ADDRESS (STREAM, X) \
+ else \
+ THUMB_PRINT_OPERAND_ADDRESS (STREAM, X)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+#define OUTPUT_ADDR_CONST_EXTRA(file, x, fail) \
+ if (arm_output_addr_const_extra (file, x) == FALSE) \
+ goto fail
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ arm_return_addr (COUNT, FRAME)
+
+/* Mask of the bits in the PC that contain the real return address
+ when running in 26-bit mode. */
+#define RETURN_ADDR_MASK26 (0x03fffffc)
+
+/* Pick up the return address upon entry to a procedure. Used for
+ dwarf2 unwind information. This also enables the table driven
+ mechanism. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((arm_arch4 || TARGET_THUMB) \
+ ? (gen_int_mode ((unsigned long)0xffffffff, Pmode)) \
+ : arm_gen_return_addr_mask ())
+
+/* APPLE LOCAL begin ARM darwin optimization defaults */
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
+ optimization_options ((LEVEL), (SIZE))
+/* APPLE LOCAL end ARM darwin optimization defaults */
+
+/* APPLE LOCAL begin 5831562 ARM pseudo-pseudo tying */
+#define TIE_PSEUDOS 1
+/* APPLE LOCAL end 5831562 ARM pseudo-pseudo tying */
+
+/* APPLE LOCAL begin ARM strings in code */
+/* APPLE LOCAL begin ARM compact switch tables */
+/* length for consttable_string needs to be done in code */
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
+ arm_adjust_insn_length ((INSN), &(LENGTH))
+/* APPLE LOCAL end ARM compact switch tables */
+/* APPLE LOCAL end ARM strings in code */
+
+/* APPLE LOCAL begin ARM prefer SP to FP */
+#define DEBUGGER_AUTO_OFFSET(X) arm_local_debug_offset (X)
+#define ALLOW_ELIMINATION_TO_SP
+/* APPLE LOCAL end ARM prefer SP to FP */
+
+/* APPLE LOCAL begin ARM compact switch tables */
+#define LABEL_ALIGN(LABEL) arm_label_align(LABEL)
+#define TARGET_EXTRA_CASES (TARGET_THUMB ? 1 : 0)
+
+/* Don't take shortcuts which may compromise preciseness of
+ address/alignment calculations. */
+#define TARGET_EXACT_SIZE_CALCULATIONS
+
+/* Count size of prologue */
+#define TARGET_UNEXPANDED_PROLOGUE_SIZE \
+ (TARGET_THUMB ? count_thumb_unexpanded_prologue () : 0)
+
+/* Align labels in ADDR_DIFF_VECs with the same alignment as
+ the table they are a part of. */
+#define TARGET_ALIGN_ADDR_DIFF_VEC_LABEL
+/* APPLE LOCAL end ARM compact switch tables */
+
+/* APPLE LOCAL begin ARM 4-byte align stack objects */
+/* In Thumb mode align stack objects on 4 bytes, so we can use
+ the %sp+N form of ADD to compute their addresses rather than
+ having to break this into 2 insns. */
+#if TARGET_MACHO
+#define LOCAL_ALIGNMENT(TYPE, BASIC_ALIGN) \
+ (TARGET_THUMB ? (MAX (BASIC_ALIGN, 4 * BITS_PER_UNIT)) : BASIC_ALIGN)
+#endif
+/* APPLE LOCAL end ARM 4-byte align stack objects */
+
+/* APPLE LOCAL begin ARM 6148015 */
+/* Tells us how to find the CFA == dwarf frame_base == address of stack
+ on entry to the function given the (virtual) arg-pointer. */
+#define ARG_POINTER_CFA_OFFSET(FNDECL) \
+ ((FIRST_PARM_OFFSET (FNDECL)) \
+ + (DECL_STRUCT_FUNCTION (FNDECL))->pretend_args_size)
+/* APPLE LOCAL end ARM 6148015 */
+
+/* APPLE LOCAL begin 6186914 */
+/* As per the ARM ABI, for double-width VFP regs:
+ Dx = DW_OP_regx(256+x)
+ For single-width VFP regs:
+ S[2x] = DW_OP_regx(256 + (x >> 1)) DW_OP_bit piece(32, 0)
+ S[2x+1] = DW_OP_regx(256 + (x >> 1)) DW_OP_bit_piece (32, 32)
+ It's unfortunate that we have to put this into inline code, but the
+ interfaces we need from dwarf2out.c aren't exposed. */
+#define TARGET_DWARF2_REG_HANDLER(reg) \
+ do { \
+ if (IS_VFP_REGNUM (REGNO (reg)) \
+ && (GET_MODE (reg) == SFmode || GET_MODE (reg) == DFmode)) \
+ { \
+ dw_loc_descr_ref loc_result = NULL; \
+ dw_loc_descr_ref temp; \
+ unsigned int relative_regno = REGNO (reg) - FIRST_VFP_REGNUM; \
+ unsigned int base_reg = 256 + (relative_regno >> 1); \
+ temp = one_reg_loc_descriptor (base_reg, initialized); \
+ add_loc_descr (&loc_result, temp); \
+ if (GET_MODE (reg) == SFmode) \
+ { \
+ int offset = relative_regno & 0x1 ? 32 : 0; \
+ temp = new_loc_descr (DW_OP_bit_piece, 32, offset); \
+ add_loc_descr (&loc_result, temp); \
+ } \
+ return loc_result; \
+ } \
+ } while (0)
+/* APPLE LOCAL end 6186914 */
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* Neon defines builtins from ARM_BUILTIN_MAX upwards, though they don't have
+ symbolic names defined here (which would require too much duplication).
+ FIXME? */
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+enum arm_builtins
+{
+ ARM_BUILTIN_GETWCX,
+ ARM_BUILTIN_SETWCX,
+
+ ARM_BUILTIN_WZERO,
+
+ ARM_BUILTIN_WAVG2BR,
+ ARM_BUILTIN_WAVG2HR,
+ ARM_BUILTIN_WAVG2B,
+ ARM_BUILTIN_WAVG2H,
+
+ ARM_BUILTIN_WACCB,
+ ARM_BUILTIN_WACCH,
+ ARM_BUILTIN_WACCW,
+
+ ARM_BUILTIN_WMACS,
+ ARM_BUILTIN_WMACSZ,
+ ARM_BUILTIN_WMACU,
+ ARM_BUILTIN_WMACUZ,
+
+ ARM_BUILTIN_WSADB,
+ ARM_BUILTIN_WSADBZ,
+ ARM_BUILTIN_WSADH,
+ ARM_BUILTIN_WSADHZ,
+
+ ARM_BUILTIN_WALIGN,
+
+ ARM_BUILTIN_TMIA,
+ ARM_BUILTIN_TMIAPH,
+ ARM_BUILTIN_TMIABB,
+ ARM_BUILTIN_TMIABT,
+ ARM_BUILTIN_TMIATB,
+ ARM_BUILTIN_TMIATT,
+
+ ARM_BUILTIN_TMOVMSKB,
+ ARM_BUILTIN_TMOVMSKH,
+ ARM_BUILTIN_TMOVMSKW,
+
+ ARM_BUILTIN_TBCSTB,
+ ARM_BUILTIN_TBCSTH,
+ ARM_BUILTIN_TBCSTW,
+
+ ARM_BUILTIN_WMADDS,
+ ARM_BUILTIN_WMADDU,
+
+ ARM_BUILTIN_WPACKHSS,
+ ARM_BUILTIN_WPACKWSS,
+ ARM_BUILTIN_WPACKDSS,
+ ARM_BUILTIN_WPACKHUS,
+ ARM_BUILTIN_WPACKWUS,
+ ARM_BUILTIN_WPACKDUS,
+
+ ARM_BUILTIN_WADDB,
+ ARM_BUILTIN_WADDH,
+ ARM_BUILTIN_WADDW,
+ ARM_BUILTIN_WADDSSB,
+ ARM_BUILTIN_WADDSSH,
+ ARM_BUILTIN_WADDSSW,
+ ARM_BUILTIN_WADDUSB,
+ ARM_BUILTIN_WADDUSH,
+ ARM_BUILTIN_WADDUSW,
+ ARM_BUILTIN_WSUBB,
+ ARM_BUILTIN_WSUBH,
+ ARM_BUILTIN_WSUBW,
+ ARM_BUILTIN_WSUBSSB,
+ ARM_BUILTIN_WSUBSSH,
+ ARM_BUILTIN_WSUBSSW,
+ ARM_BUILTIN_WSUBUSB,
+ ARM_BUILTIN_WSUBUSH,
+ ARM_BUILTIN_WSUBUSW,
+
+ ARM_BUILTIN_WAND,
+ ARM_BUILTIN_WANDN,
+ ARM_BUILTIN_WOR,
+ ARM_BUILTIN_WXOR,
+
+ ARM_BUILTIN_WCMPEQB,
+ ARM_BUILTIN_WCMPEQH,
+ ARM_BUILTIN_WCMPEQW,
+ ARM_BUILTIN_WCMPGTUB,
+ ARM_BUILTIN_WCMPGTUH,
+ ARM_BUILTIN_WCMPGTUW,
+ ARM_BUILTIN_WCMPGTSB,
+ ARM_BUILTIN_WCMPGTSH,
+ ARM_BUILTIN_WCMPGTSW,
+
+ ARM_BUILTIN_TEXTRMSB,
+ ARM_BUILTIN_TEXTRMSH,
+ ARM_BUILTIN_TEXTRMSW,
+ ARM_BUILTIN_TEXTRMUB,
+ ARM_BUILTIN_TEXTRMUH,
+ ARM_BUILTIN_TEXTRMUW,
+ ARM_BUILTIN_TINSRB,
+ ARM_BUILTIN_TINSRH,
+ ARM_BUILTIN_TINSRW,
+
+ ARM_BUILTIN_WMAXSW,
+ ARM_BUILTIN_WMAXSH,
+ ARM_BUILTIN_WMAXSB,
+ ARM_BUILTIN_WMAXUW,
+ ARM_BUILTIN_WMAXUH,
+ ARM_BUILTIN_WMAXUB,
+ ARM_BUILTIN_WMINSW,
+ ARM_BUILTIN_WMINSH,
+ ARM_BUILTIN_WMINSB,
+ ARM_BUILTIN_WMINUW,
+ ARM_BUILTIN_WMINUH,
+ ARM_BUILTIN_WMINUB,
+
+ ARM_BUILTIN_WMULUM,
+ ARM_BUILTIN_WMULSM,
+ ARM_BUILTIN_WMULUL,
+
+ ARM_BUILTIN_PSADBH,
+ ARM_BUILTIN_WSHUFH,
+
+ ARM_BUILTIN_WSLLH,
+ ARM_BUILTIN_WSLLW,
+ ARM_BUILTIN_WSLLD,
+ ARM_BUILTIN_WSRAH,
+ ARM_BUILTIN_WSRAW,
+ ARM_BUILTIN_WSRAD,
+ ARM_BUILTIN_WSRLH,
+ ARM_BUILTIN_WSRLW,
+ ARM_BUILTIN_WSRLD,
+ ARM_BUILTIN_WRORH,
+ ARM_BUILTIN_WRORW,
+ ARM_BUILTIN_WRORD,
+ ARM_BUILTIN_WSLLHI,
+ ARM_BUILTIN_WSLLWI,
+ ARM_BUILTIN_WSLLDI,
+ ARM_BUILTIN_WSRAHI,
+ ARM_BUILTIN_WSRAWI,
+ ARM_BUILTIN_WSRADI,
+ ARM_BUILTIN_WSRLHI,
+ ARM_BUILTIN_WSRLWI,
+ ARM_BUILTIN_WSRLDI,
+ ARM_BUILTIN_WRORHI,
+ ARM_BUILTIN_WRORWI,
+ ARM_BUILTIN_WRORDI,
+
+ ARM_BUILTIN_WUNPCKIHB,
+ ARM_BUILTIN_WUNPCKIHH,
+ ARM_BUILTIN_WUNPCKIHW,
+ ARM_BUILTIN_WUNPCKILB,
+ ARM_BUILTIN_WUNPCKILH,
+ ARM_BUILTIN_WUNPCKILW,
+
+ ARM_BUILTIN_WUNPCKEHSB,
+ ARM_BUILTIN_WUNPCKEHSH,
+ ARM_BUILTIN_WUNPCKEHSW,
+ ARM_BUILTIN_WUNPCKEHUB,
+ ARM_BUILTIN_WUNPCKEHUH,
+ ARM_BUILTIN_WUNPCKEHUW,
+ ARM_BUILTIN_WUNPCKELSB,
+ ARM_BUILTIN_WUNPCKELSH,
+ ARM_BUILTIN_WUNPCKELSW,
+ ARM_BUILTIN_WUNPCKELUB,
+ ARM_BUILTIN_WUNPCKELUH,
+ ARM_BUILTIN_WUNPCKELUW,
+
+ ARM_BUILTIN_THREAD_POINTER,
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ ARM_BUILTIN_NEON_BASE,
+
+ ARM_BUILTIN_MAX = ARM_BUILTIN_NEON_BASE /* FIXME: Wrong! */
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+};
+#endif /* ! GCC_ARM_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm.md
new file mode 100644
index 000000000..ab80d96d3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm.md
@@ -0,0 +1,11975 @@
+;;- Machine description for ARM for GNU compiler
+;; Copyright 1991, 1993, 1994, 1995, 1996, 1996, 1997, 1998, 1999, 2000,
+;; 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+
+;;---------------------------------------------------------------------------
+;; Constants
+
+;; Register numbers
+(define_constants
+ [(R0_REGNUM 0) ; First CORE register
+ (IP_REGNUM 12) ; Scratch register
+ (SP_REGNUM 13) ; Stack pointer
+ (LR_REGNUM 14) ; Return address register
+ (PC_REGNUM 15) ; Program counter
+ (CC_REGNUM 24) ; Condition code pseudo register
+ (LAST_ARM_REGNUM 15) ;
+ (FPA_F0_REGNUM 16) ; FIRST_FPA_REGNUM
+ (FPA_F7_REGNUM 23) ; LAST_FPA_REGNUM
+ ]
+)
+;; 3rd operand to select_dominance_cc_mode
+(define_constants
+ [(DOM_CC_X_AND_Y 0)
+ (DOM_CC_NX_OR_Y 1)
+ (DOM_CC_X_OR_Y 2)
+ ]
+)
+
+;; UNSPEC Usage:
+;; Note: sin and cos are no-longer used.
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+;; Unspec constants for Neon are defined in neon.md.
+
+(define_constants
+ [(UNSPEC_SIN 0) ; `sin' operation (MODE_FLOAT):
+ ; operand 0 is the result,
+ ; operand 1 the parameter.
+ (UNPSEC_COS 1) ; `cos' operation (MODE_FLOAT):
+ ; operand 0 is the result,
+ ; operand 1 the parameter.
+ (UNSPEC_PUSH_MULT 2) ; `push multiple' operation:
+ ; operand 0 is the first register,
+ ; subsequent registers are in parallel (use ...)
+ ; expressions.
+ (UNSPEC_PIC_SYM 3) ; A symbol that has been treated properly for pic
+ ; usage, that is, we will add the pic_register
+ ; value to it before trying to dereference it.
+ (UNSPEC_PIC_BASE 4) ; Adding the PC value to the offset to the
+ ; GLOBAL_OFFSET_TABLE. The operation is fully
+ ; described by the RTL but must be wrapped to
+ ; prevent combine from trying to rip it apart.
+ (UNSPEC_PRLG_STK 5) ; A special barrier that prevents frame accesses
+ ; being scheduled before the stack adjustment insn.
+ (UNSPEC_PROLOGUE_USE 6) ; As USE insns are not meaningful after reload,
+ ; this unspec is used to prevent the deletion of
+ ; instructions setting registers for EH handling
+ ; and stack frame generation. Operand 0 is the
+ ; register to "use".
+ (UNSPEC_CHECK_ARCH 7); Set CCs to indicate 26-bit or 32-bit mode.
+ (UNSPEC_WSHUFH 8) ; Used by the intrinsic form of the iWMMXt WSHUFH instruction.
+ (UNSPEC_WACC 9) ; Used by the intrinsic form of the iWMMXt WACC instruction.
+ (UNSPEC_TMOVMSK 10) ; Used by the intrinsic form of the iWMMXt TMOVMSK instruction.
+ (UNSPEC_WSAD 11) ; Used by the intrinsic form of the iWMMXt WSAD instruction.
+ (UNSPEC_WSADZ 12) ; Used by the intrinsic form of the iWMMXt WSADZ instruction.
+ (UNSPEC_WMACS 13) ; Used by the intrinsic form of the iWMMXt WMACS instruction.
+ (UNSPEC_WMACU 14) ; Used by the intrinsic form of the iWMMXt WMACU instruction.
+ (UNSPEC_WMACSZ 15) ; Used by the intrinsic form of the iWMMXt WMACSZ instruction.
+ (UNSPEC_WMACUZ 16) ; Used by the intrinsic form of the iWMMXt WMACUZ instruction.
+ (UNSPEC_CLRDI 17) ; Used by the intrinsic form of the iWMMXt CLRDI instruction.
+ (UNSPEC_WMADDS 18) ; Used by the intrinsic form of the iWMMXt WMADDS instruction.
+ (UNSPEC_WMADDU 19) ; Used by the intrinsic form of the iWMMXt WMADDU instruction.
+ (UNSPEC_TLS 20) ; A symbol that has been treated properly for TLS usage.
+ (UNSPEC_PIC_LABEL 21) ; A label used for PIC access that does not appear in the
+ ; instruction stream.
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ (UNSPEC_STACK_ALIGN 22) ; Doubleword aligned stack pointer. Used to
+ ; generate correct unwind information.
+ ; APPLE LOCAL ARM setjmp/longjmp interworking
+ (UNSPEC_JMP_XCHG 23) ; Indirect jump with possible change in ARM/Thumb state.
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+ ; APPLE LOCAL ARM UXTB support
+ (UNSPEC_UXTB16 27) ; The UXTB16 instruction (ARM only)
+;; APPLE LOCAL begin 6258536 atomic builtins
+ (UNSPEC_CMPXCHG 28) ; Atomic compare and swap operations
+ (UNSPEC_BARRIER 29) ; memory barrier
+ (UNSPEC_SYNC 30) ; memory sync
+;; APPLE LOCAL end 6258536 atomic builtins
+ ]
+)
+
+;; UNSPEC_VOLATILE Usage:
+
+(define_constants
+ [(VUNSPEC_BLOCKAGE 0) ; `blockage' insn to prevent scheduling across an
+ ; insn in the code.
+ (VUNSPEC_EPILOGUE 1) ; `epilogue' insn, used to represent any part of the
+ ; instruction epilogue sequence that isn't expanded
+ ; into normal RTL. Used for both normal and sibcall
+ ; epilogues.
+ (VUNSPEC_ALIGN 2) ; `align' insn. Used at the head of a minipool table
+ ; for inlined constants.
+ (VUNSPEC_POOL_END 3) ; `end-of-table'. Used to mark the end of a minipool
+ ; table.
+ (VUNSPEC_POOL_1 4) ; `pool-entry(1)'. An entry in the constant pool for
+ ; an 8-bit object.
+ (VUNSPEC_POOL_2 5) ; `pool-entry(2)'. An entry in the constant pool for
+ ; a 16-bit object.
+ (VUNSPEC_POOL_4 6) ; `pool-entry(4)'. An entry in the constant pool for
+ ; a 32-bit object.
+ (VUNSPEC_POOL_8 7) ; `pool-entry(8)'. An entry in the constant pool for
+ ; a 64-bit object.
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ (VUNSPEC_POOL_16 8) ; `pool-entry(16)'. An entry in the constant pool for
+ ; a 128-bit object.
+ (VUNSPEC_TMRC 9) ; Used by the iWMMXt TMRC instruction.
+ (VUNSPEC_TMCR 10) ; Used by the iWMMXt TMCR instruction.
+ (VUNSPEC_ALIGN8 11) ; 8-byte alignment version of VUNSPEC_ALIGN
+ (VUNSPEC_WCMP_EQ 12) ; Used by the iWMMXt WCMPEQ instructions
+ (VUNSPEC_WCMP_GTU 13) ; Used by the iWMMXt WCMPGTU instructions
+ (VUNSPEC_WCMP_GT 14) ; Used by the iwMMXT WCMPGT instructions
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+ (VUNSPEC_EH_RETURN 20); Use to override the return address for exception
+ ; handling.
+ ; APPLE LOCAL begin ARM strings in code
+ (VUNSPEC_POOL_STRING 21) ; `pool-entry(string)'. An entry in the constant
+ ; pool for a string.
+ ; APPLE LOCAL end ARM strings in code
+;; APPLE LOCAL begin 6258536 atomic builtins
+ (VUNSPEC_LL 22) ; Load locked (ldrex)
+ (VUNSPEC_SC 22) ; Store conditional (strex)
+;; APPLE LOCAL end 6258536 atomic builtins
+ ]
+)
+
+;;---------------------------------------------------------------------------
+;; Attributes
+
+; IS_THUMB is set to 'yes' when we are generating Thumb code, and 'no' when
+; generating ARM code. This is used to control the length of some insn
+; patterns that share the same RTL in both ARM and Thumb code.
+(define_attr "is_thumb" "no,yes" (const (symbol_ref "thumb_code")))
+
+; IS_STRONGARM is set to 'yes' when compiling for StrongARM, it affects
+; scheduling decisions for the load unit and the multiplier.
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_tune_strongarm")))
+
+; IS_XSCALE is set to 'yes' when compiling for XScale.
+(define_attr "is_xscale" "no,yes" (const (symbol_ref "arm_tune_xscale")))
+
+;; Operand number of an input operand that is shifted. Zero if the
+;; given instruction does not shift one of its input operands.
+(define_attr "shift" "" (const_int 0))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+(define_attr "fpu" "none,fpa,fpe2,fpe3,maverick,vfp"
+ (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; POOL_RANGE is how far away from a constant pool entry that this insn
+; can be placed. If the distance is zero, then this insn will never
+; reference the pool.
+; NEG_POOL_RANGE is nonzero for insns that can reference a constant pool entry
+; before its address.
+(define_attr "pool_range" "" (const_int 0))
+(define_attr "neg_pool_range" "" (const_int 0))
+
+; An assembler sequence may clobber the condition codes without us knowing.
+; If such an insn references the pool, then we have no way of knowing how,
+; so use the most conservative value for pool_range.
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")
+ (set_attr "pool_range" "250")])
+
+;; The instruction used to implement a particular pattern. This
+;; information is used by pipeline descriptions to provide accurate
+;; scheduling information.
+
+(define_attr "insn"
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ "mov,mvn,smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals,smlawy,smuad,smuadx,smlad,smladx,smusd,smusdx,smlsd,smlsdx,smmul,smmulr,smmla,smmls,umaal,smlald,smlsld,clz,mrs,msr,xtab,sdiv,udiv,other"
+ (const_string "other"))
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+; Note: vfp.md has different meanings for some of these, and some further
+; types as well. See that file for details.
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+; alu any alu instruction that doesn't hit memory or fp
+; regs or have a shifted source operand
+; alu_shift any data instruction that doesn't hit memory or fp
+; regs, but has a source operand shifted by a constant
+; alu_shift_reg any data instruction that doesn't hit memory or fp
+; regs, but has a source operand shifted by a register value
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_load[sd] single/double load from memory
+; f_store[sd] single/double store to memory
+; f_flag a transfer of co-processor flags to the CPSR
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; f_cvt convert floating<->integral
+; branch a branch
+; call a subroutine call
+; load_byte load byte(s) from memory to arm registers
+; load1 load 1 word from memory to arm registers
+; load2 load 2 words from memory to arm registers
+; load3 load 3 words from memory to arm registers
+; load4 load 4 words from memory to arm registers
+; store store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 (or more) words
+; Additions for Cirrus Maverick co-processor:
+; mav_farith Floating point arithmetic (4 cycle)
+; mav_dmult Double multiplies (7 cycle)
+;
+(define_attr "type"
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ "alu,alu_shift,alu_shift_reg,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,f_flag,float_em,f_load,f_store,f_loads,f_loadd,f_stores,f_stored,f_mem_r,r_mem_f,f_2_r,r_2_f,f_cvt,branch,call,load_byte,load1,load2,load3,load4,store1,store2,store3,store4,mav_farith,mav_dmult,fmuls,fmuld,fmacs,fmacd"
+ (if_then_else
+ (eq_attr "insn" "smulxy,smlaxy,smlalxy,smulwy,smlawx,mul,muls,mla,mlas,umull,umulls,umlal,umlals,smull,smulls,smlal,smlals")
+ (const_string "mult")
+ (const_string "alu")))
+
+; Load scheduling, set from the arm_ld_sched variable
+; initialized by arm_override_options()
+(define_attr "ldsched" "no,yes" (const (symbol_ref "arm_ld_sched")))
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+;
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+;
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+;
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+;
+; JUMP_CLOB is used when the condition cannot be represented by a single
+; instruction (UNEQ and LTGT). These cannot be predicated.
+;
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (const_string "clob")
+ (const_string "nocond")))
+
+; Predicable means that the insn can be conditionally executed based on
+; an automatically added predicate (additional patterns are generated by
+; gen...). We default to 'no' because no Thumb patterns match this rule
+; and not all ARM patterns do.
+(define_attr "predicable" "no,yes" (const_string "no"))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrant modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_tune_wbuf")))
+
+; WRITE_CONFLICT implies that a read following an unrelated write is likely
+; to stall the processor. Used with model_wbuf above.
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load1")
+ (const_string "yes")
+ (const_string "no")))
+
+; Classify the insns into those that take one cycle and those that take more
+; than one on the main cpu execution unit.
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "alu,alu_shift,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label. Only applicable to Thumb code.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; The number of machine instructions this pattern expands to.
+;; Used for Thumb-2 conditional execution.
+(define_attr "ce_count" "" (const_int 1))
+
+;; APPLE LOCAL end v7 support. Merge from mainline
+;;---------------------------------------------------------------------------
+;; Mode macros
+
+; A list of modes that are exactly 64 bits in size. We use this to expand
+; some splits that are the same for all modes when operating on ARM
+; registers.
+(define_mode_macro ANY64 [DI DF V8QI V4HI V2SI V2SF])
+
+;;---------------------------------------------------------------------------
+;; Predicates
+
+(include "predicates.md")
+(include "constraints.md")
+
+;;---------------------------------------------------------------------------
+;; Pipeline descriptions
+
+;; Processor type. This is created automatically from arm-cores.def.
+(include "arm-tune.md")
+
+;; True if the generic scheduling description should be used.
+
+(define_attr "generic_sched" "yes,no"
+ (const (if_then_else
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa8,cortexr4")
+ (const_string "no")
+ (const_string "yes"))))
+
+(define_attr "generic_vfp" "yes,no"
+ (const (if_then_else
+ (and (eq_attr "fpu" "vfp")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (eq_attr "tune" "!arm1020e,arm1022e,cortexa8"))
+ (const_string "yes")
+ (const_string "no"))))
+
+(include "arm-generic.md")
+(include "arm926ejs.md")
+(include "arm1020e.md")
+(include "arm1026ejs.md")
+(include "arm1136jfs.md")
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(include "cortex-a8.md")
+(include "cortex-r4.md")
+(include "vfp11.md")
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+
+;;---------------------------------------------------------------------------
+;; Insn patterns
+;;
+;; Addition insns.
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+;; Cirrus 64bit additions should not be split because we have a native
+;; 64bit addition instructions.
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_expand "adddi3"
+ [(parallel
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:DI 2 "arm_add64_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "TARGET_EITHER"
+ "
+ if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
+ {
+ if (!cirrus_fp_register (operands[0], DImode))
+ operands[0] = force_reg (DImode, operands[0]);
+ if (!cirrus_fp_register (operands[1], DImode))
+ operands[1] = force_reg (DImode, operands[1]);
+ emit_insn (gen_cirrus_adddi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ if (GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ if (GET_CODE (operands[2]) != REG)
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (TARGET_ARM
+ && (GET_CODE (operands[2]) == CONST_INT
+ || GET_CODE (operands[2]) == CONST_DOUBLE)
+ && !const64_ok_for_arm_immediate (operands[2]))
+ {
+ emit_insn (gen_subdi3 (operands[0], operands[1],
+ negate_rtx (DImode, operands[2])));
+ DONE;
+ }
+ "
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "l")))
+ (clobber (reg:CC CC_REGNUM))
+ ]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+ [(set_attr "length" "4")]
+)
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_insn_and_split "*arm_adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0, 0, r, 0")
+ (match_operand:DI 2 "arm_rhs64_operand" "r, 0, Dd,Dd")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && !(TARGET_HARD_FLOAT && TARGET_MAVERICK)"
+ "#"
+ "TARGET_32BIT && reload_completed"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(parallel [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 3) (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (plus:SI (match_dup 4) (match_dup 5))))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart_mode (SImode, DImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_insn_and_split "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && !(TARGET_HARD_FLOAT && TARGET_MAVERICK)"
+ "#"
+ "TARGET_32BIT && reload_completed"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(parallel [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 3) (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (plus:SI (ashiftrt:SI (match_dup 2)
+ (const_int 31))
+ (match_dup 4))))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn_and_split "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && !(TARGET_HARD_FLOAT && TARGET_MAVERICK)"
+ "#"
+ "TARGET_32BIT && reload_completed"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(parallel [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 3) (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (plus:SI (match_dup 4) (const_int 0))))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT && GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, NULL_RTX,
+ INTVAL (operands[2]), operands[0], operands[1],
+ optimize && !no_new_pseudos);
+ DONE;
+ }
+ "
+)
+
+; If there is a scratch available, this will be faster than synthesizing the
+; addition.
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (set (match_operand:SI 0 "arm_general_register_operand" "")
+ (plus:SI (match_operand:SI 1 "arm_general_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT &&
+ !(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))
+ && const_ok_for_arm (~INTVAL (operands[2]))"
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))]
+ ""
+)
+
+(define_insn_and_split "*arm_addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+ "TARGET_32BIT &&
+ GET_CODE (operands[2]) == CONST_INT
+ && !(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, curr_insn,
+ INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+ "
+ [(set_attr "length" "4,4,16")
+ (set_attr "predicable" "yes")]
+)
+
+;; Register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ static const char * const asms[] =
+ {
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+ };
+ if ((which_alternative == 2 || which_alternative == 6)
+ && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+ "
+ [(set_attr "length" "2")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; Reloading and elimination of the frame pointer can
+;; sometimes cause this optimization to be missed.
+(define_peephole2
+ [(set (match_operand:SI 0 "arm_general_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (reg:SI SP_REGNUM)))]
+ "TARGET_THUMB1
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ [(set (match_dup 0) (plus:SI (reg:SI SP_REGNUM) (match_dup 1)))]
+ ""
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin ARM peephole
+;; And sometimes greg will generate the same thing this way...
+
+(define_peephole2
+ [(set (match_operand:SI 0 "arm_general_register_operand" "")
+ (reg:SI SP_REGNUM))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "const_int_operand" "")))]
+ "TARGET_THUMB
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ [(set (match_dup 0) (plus:SI (reg:SI SP_REGNUM) (match_dup 1)))]
+ ""
+)
+;; APPLE LOCAL end ARM peephole
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? Make Thumb-2 variants which prefer low regs
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r, r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ add%.\\t%0, %1, %2
+ sub%.\\t%0, %1, #%n2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r, r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*compare_negsi_si"
+ [(set (reg:CC_Z CC_REGNUM)
+ (compare:CC_Z
+ (neg:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "cmn%?\\t%1, %0"
+ [(set_attr "conds" "set")]
+)
+
+;; This is the canonicalization of addsi3_compare0_for_combiner when the
+;; addend is a constant.
+(define_insn "*cmpsi2_addneg"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_addimm_operand" "I,L")))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 3 "arm_addimm_operand" "L,I")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && INTVAL (operands[2]) == -INTVAL (operands[3])"
+ "@
+ sub%.\\t%0, %1, %2
+ add%.\\t%0, %1, #%n2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+;; Convert the sequence
+;; sub rd, rn, #1
+;; cmn rd, #1 (equivalent to cmp rd, #-1)
+;; bne dest
+;; into
+;; subs rd, rn, #1
+;; bcs dest ((unsigned)rn >= 1)
+;; similarly for the beq variant using bcc.
+;; This is a common looping idiom (while (n--))
+(define_peephole2
+ [(set (match_operand:SI 0 "arm_general_register_operand" "")
+ (plus:SI (match_operand:SI 1 "arm_general_register_operand" "")
+ (const_int -1)))
+ (set (match_operand 2 "cc_register" "")
+ (compare (match_dup 0) (const_int -1)))
+ (set (pc)
+ (if_then_else (match_operator 3 "equality_operator"
+ [(match_dup 2) (const_int 0)])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && peep2_reg_dead_p (3, operands[2])"
+ [(parallel[
+ (set (match_dup 2)
+ (compare:CC
+ (match_dup 1) (const_int 1)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (const_int -1)))])
+ (set (pc)
+ (if_then_else (match_op_dup 3 [(match_dup 2) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "operands[2] = gen_rtx_REG (CCmode, CC_REGNUM);
+ operands[3] = gen_rtx_fmt_ee ((GET_CODE (operands[3]) == NE
+ ? GEU : LTU),
+ VOIDmode,
+ operands[2], const0_rtx);"
+)
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ add%.\\t%0, %1, %2
+ sub%.\\t%0, %1, #%n2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ add%.\\t%0, %1, %2
+ sub%.\\t%0, %1, #%n2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C CC_REGNUM)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "adc%?\\t%0, %1, %2"
+ [(set_attr "conds" "use")]
+)
+
+(define_insn "*addsi3_carryin_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (plus:SI
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])
+ (match_operand:SI 1 "s_register_operand" "r"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "adc%?\\t%0, %1, %3%S2"
+ [(set_attr "conds" "use")
+ (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "adc%?\\t%0, %1, %2"
+ [(set_attr "conds" "use")]
+)
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "adc%?\\t%0, %1, %2"
+ [(set_attr "conds" "use")]
+)
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C CC_REGNUM) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "adc%?\\t%0, %1, %2"
+ [(set_attr "conds" "use")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_expand "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_ARM"
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,8")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+; transform ((x << y) - 1) to ~(~(x-1) << y) Where X is a constant.
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (ashift:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" ""))
+ (const_int -1)))
+ (clobber (match_operand:SI 3 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (not:SI (ashift:SI (match_dup 3) (match_dup 2))))]
+ "
+ operands[1] = GEN_INT (~(INTVAL (operands[1]) - 1));
+")
+
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "")
+ (match_operand:SF 2 "arm_float_add_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK
+ && !cirrus_fp_register (operands[2], SFmode))
+ operands[2] = force_reg (SFmode, operands[2]);
+")
+
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "")
+ (match_operand:DF 2 "arm_float_add_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK
+ && !cirrus_fp_register (operands[2], DFmode))
+ operands[2] = force_reg (DFmode, operands[2]);
+")
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_expand "subdi3"
+ [(parallel
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:DI 2 "arm_add64_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_HARD_FLOAT && TARGET_MAVERICK
+ && TARGET_32BIT
+ && cirrus_fp_register (operands[0], DImode)
+ && cirrus_fp_register (operands[1], DImode))
+ {
+ emit_insn (gen_cirrus_subdi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+
+ if (TARGET_THUMB1)
+ {
+ if (GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ if (GET_CODE (operands[2]) != REG)
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (TARGET_32BIT
+ && (GET_CODE (operands[2]) == CONST_INT
+ || GET_CODE (operands[2]) == CONST_DOUBLE)
+ && !const64_ok_for_arm_immediate (operands[2]))
+ {
+ emit_insn (gen_adddi3 (operands[0], operands[1],
+ negate_rtx (DImode, operands[2])));
+ DONE;
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ "
+)
+
+(define_insn "*arm_subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0,r,0")
+ (match_operand:DI 2 "arm_rhs64_operand" "r,0,0,Dd,Dd")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "*
+ if (which_alternative <= 2)
+ return \"subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2\";
+ else
+ {
+ operands[3] = gen_lowpart (SImode, operands[2]);
+ operands[2] = gen_highpart_mode (SImode, DImode, operands[2]);
+ return \"subs\\t%Q0, %Q1, %3\;sbc\\t%R0, %R1, %2\";
+ }"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_insn "*thumb_subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "l")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+ [(set_attr "length" "4")]
+)
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "subs\\t%Q0, %1, %2\;sbc\\t%R0, %1, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "TARGET_EITHER"
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ arm_split_constant (MINUS, SImode, NULL_RTX,
+ INTVAL (operands[1]), operands[0],
+ operands[2], optimize && !no_new_pseudos);
+ DONE;
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1 */
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_subsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "register_operand" "l")
+ (match_operand:SI 2 "register_operand" "l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "sub\\t%0, %1, %2"
+ [(set_attr "length" "2")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+; ??? Check Thumb-2 split length
+(define_insn_and_split "*arm_subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ "TARGET_32BIT"
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+ "TARGET_32BIT
+ && GET_CODE (operands[1]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, curr_insn,
+ INTVAL (operands[1]), operands[0], operands[2], 0);
+ DONE;
+ "
+ [(set_attr "length" "4,16")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (set (match_operand:SI 0 "arm_general_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "arm_general_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && !const_ok_for_arm (INTVAL (operands[1]))
+ && const_ok_for_arm (~INTVAL (operands[1]))"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (minus:SI (match_dup 3) (match_dup 2)))]
+ ""
+)
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ sub%.\\t%0, %1, %2
+ rsb%.\\t%0, %2, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_expand "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ "TARGET_ARM"
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "*,8")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (minus:SF (match_operand:SF 1 "arm_float_rhs_operand" "")
+ (match_operand:SF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK)
+ {
+ if (!cirrus_fp_register (operands[1], SFmode))
+ operands[1] = force_reg (SFmode, operands[1]);
+ if (!cirrus_fp_register (operands[2], SFmode))
+ operands[2] = force_reg (SFmode, operands[2]);
+ }
+")
+
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (minus:DF (match_operand:DF 1 "arm_float_rhs_operand" "")
+ (match_operand:DF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK)
+ {
+ if (!cirrus_fp_register (operands[1], DFmode))
+ operands[1] = force_reg (DFmode, operands[1]);
+ if (!cirrus_fp_register (operands[2], DFmode))
+ operands[2] = force_reg (DFmode, operands[2]);
+ }
+")
+
+
+;; Multiplication insns
+
+(define_expand "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 1 "s_register_operand" "")))]
+ "TARGET_EITHER"
+ ""
+)
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "*arm_mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && !arm_arch6"
+ "mul%?\\t%0, %2, %1"
+ [(set_attr "insn" "mul")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*arm_mulsi3_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ "TARGET_32BIT && arm_arch6"
+ "mul%?\\t%0, %1, %2"
+ [(set_attr "insn" "mul")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+; Unfortunately with the Thumb the '&'/'0' trick can fails when operands
+; 1 and 2; are the same, because reload will make operand 0 match
+; operand 1 without realizing that this conflicts with operand 2. We fix
+; this by adding another alternative to match this case, and then `reload'
+; it ourselves. This alternative must come first.
+(define_insn "*thumb_mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "register_operand" "%l,*h,0")
+ (match_operand:SI 2 "register_operand" "l,l,l")))]
+ "TARGET_THUMB1 && !arm_arch6"
+ "*
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %2\";
+ else
+ return \"mul\\t%0, %2\";
+ "
+ [(set_attr "length" "4,4,2")
+ (set_attr "insn" "mul")]
+)
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*thumb_mulsi3_v6"
+ [(set (match_operand:SI 0 "register_operand" "=l,l,l")
+ (mult:SI (match_operand:SI 1 "register_operand" "0,l,0")
+ (match_operand:SI 2 "register_operand" "l,0,0")))]
+ "TARGET_THUMB1 && arm_arch6"
+ "@
+ mul\\t%0, %2
+ mul\\t%0, %1
+ mul\\t%0, %1"
+ [(set_attr "length" "2")
+ (set_attr "insn" "mul")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ "TARGET_ARM && !arm_arch6"
+ "mul%.\\t%0, %2, %1"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "muls")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*mulsi3_compare0_v6"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+;; APPLE LOCAL 6040923 unrecognizable insn ICE
+ "TARGET_ARM && arm_arch6"
+ "mul%.\\t%0, %2, %1"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "muls")]
+)
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ "TARGET_ARM && !arm_arch6"
+ "mul%.\\t%0, %2, %1"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "muls")]
+)
+
+(define_insn "*mulsi_compare0_scratch_v6"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+;; APPLE LOCAL 6040923 unrecognizable insn ICE
+ "TARGET_ARM && arm_arch6"
+ "mul%.\\t%0, %2, %1"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "muls")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; Unnamed templates to match MLA instruction.
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ "TARGET_32BIT && !arm_arch6"
+ "mla%?\\t%0, %2, %1, %3"
+ [(set_attr "insn" "mla")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulsi3addsi_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 3 "s_register_operand" "r")))]
+ "TARGET_32BIT && arm_arch6"
+ "mla%?\\t%0, %2, %1, %3"
+ [(set_attr "insn" "mla")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_ARM && arm_arch6"
+ "mla%.\\t%0, %2, %1, %3"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "mlas")]
+)
+
+(define_insn "*mulsi3addsi_compare0_v6"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ "TARGET_ARM && arm_arch6 && optimize_size"
+ "mla%.\\t%0, %2, %1, %3"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "mlas")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_ARM && !arm_arch6"
+ "mla%.\\t%0, %2, %1, %3"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")
+ (set_attr "insn" "mlas")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*mulsi3addsi_compare0_scratch_v6"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_ARM && arm_arch6 && optimize_size"
+ "mla%.\\t%0, %2, %1, %3"
+ [(set_attr "conds" "set")
+ (set_attr "insn" "mlas")]
+)
+
+(define_insn "*mulsi3subsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI
+ (match_operand:SI 3 "s_register_operand" "r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))))]
+ "TARGET_32BIT && arm_arch_thumb2"
+ "mls%?\\t%0, %2, %1, %3"
+ [(set_attr "insn" "mla")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL end v7 support. Merge from mainline
+;; Unnamed template to match long long multiply-accumulate (smlal)
+
+(define_insn "*mulsidi3adddi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (plus:DI
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 3 "s_register_operand" "r")))
+ (match_operand:DI 1 "s_register_operand" "0")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch3m && !arm_arch6"
+ "smlal%?\\t%Q0, %R0, %3, %2"
+ [(set_attr "insn" "smlal")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulsidi3adddi_v6"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (plus:DI
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 3 "s_register_operand" "r")))
+ (match_operand:DI 1 "s_register_operand" "0")))]
+ "TARGET_32BIT && arm_arch6"
+ "smlal%?\\t%Q0, %R0, %3, %2"
+ [(set_attr "insn" "smlal")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "r"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch3m"
+ "smull%?\\t%Q0, %R0, %1, %2"
+ [(set_attr "insn" "smull")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "r"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch3m"
+ "umull%?\\t%Q0, %R0, %1, %2"
+ [(set_attr "insn" "umull")
+ (set_attr "predicable" "yes")]
+)
+
+;; Unnamed template to match long long unsigned multiply-accumulate (umlal)
+
+(define_insn "*umulsidi3adddi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (plus:DI
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 3 "s_register_operand" "r")))
+ (match_operand:DI 1 "s_register_operand" "0")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch3m && !arm_arch6"
+ "umlal%?\\t%Q0, %R0, %3, %2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "insn" "umlal")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*umulsidi3adddi_v6"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (plus:DI
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 3 "s_register_operand" "r")))
+ (match_operand:DI 1 "s_register_operand" "0")))]
+ "TARGET_32BIT && arm_arch6"
+ "umlal%?\\t%Q0, %R0, %3, %2"
+ [(set_attr "insn" "umlal")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch3m"
+ "smull%?\\t%3, %0, %2, %1"
+ [(set_attr "insn" "smull")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch3m"
+ "umull%?\\t%3, %0, %2, %1"
+ [(set_attr "insn" "umull")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "s_register_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "s_register_operand" "r"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_DSP_MULTIPLY"
+ "smulbb%?\\t%0, %1, %2"
+ [(set_attr "insn" "smulxy")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulhisi3tb"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "s_register_operand" "r"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_DSP_MULTIPLY"
+ "smultb%?\\t%0, %1, %2"
+ [(set_attr "insn" "smulxy")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulhisi3bt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "s_register_operand" "r"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "s_register_operand" "r")
+ (const_int 16))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_DSP_MULTIPLY"
+ "smulbt%?\\t%0, %1, %2"
+ [(set_attr "insn" "smulxy")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulhisi3tt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "s_register_operand" "r")
+ (const_int 16))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_DSP_MULTIPLY"
+ "smultt%?\\t%0, %1, %2"
+ [(set_attr "insn" "smulxy")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulhisi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 2 "s_register_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 3 "s_register_operand" "r")))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_DSP_MULTIPLY"
+ "smlabb%?\\t%0, %2, %3, %1"
+ [(set_attr "insn" "smlaxy")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulhidi3adddi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (plus:DI
+ (match_operand:DI 1 "s_register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (match_operand:HI 2 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:HI 3 "s_register_operand" "r")))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_DSP_MULTIPLY"
+ "smlalbb%?\\t%Q0, %R0, %2, %3"
+ [(set_attr "insn" "smlalxy")
+ (set_attr "predicable" "yes")])
+
+;; APPLE LOCAL begin DImode multiply enhancement
+;; No DI * DI instruction exists (except on Cirrus), but leave this in
+;; the RTL stream through the early optimization phases
+;; to give them a chance to generate the mulsidi3, etc., patterns.
+
+(define_expand "muldi3"
+ [(parallel
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (mult:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:DI 2 "s_register_operand" "")))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_ARM"
+ "
+ if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
+ {
+ if (!cirrus_fp_register (operands[0], DImode))
+ operands[0] = force_reg (DImode, operands[0]);
+ if (!cirrus_fp_register (operands[1], DImode))
+ operands[1] = force_reg (DImode, operands[1]);
+ emit_insn (gen_cirrus_muldi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ "
+)
+
+; Input and output registers cannot overlap in this pattern.
+
+(define_insn_and_split "*soft_muldi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "TARGET_ARM && !(TARGET_HARD_FLOAT && TARGET_MAVERICK)"
+ ""
+ "&& reload_completed"
+ [(set (match_dup 3) (subreg:SI (match_dup 1) 0))
+ (set (match_dup 4) (subreg:SI (match_dup 1) 4))
+ (set (match_dup 0) (mult:DI (zero_extend:DI (match_dup 3))
+ (zero_extend:DI (subreg:SI (match_dup 2) 0))))
+ (set (subreg:SI (match_dup 0) 4) (plus:SI
+ (mult:SI (match_dup 4) (subreg:SI (match_dup 2) 0))
+ (subreg:SI (match_dup 0) 4)))
+ (set (subreg:SI (match_dup 0) 4) (plus:SI
+ (mult:SI (match_dup 3) (subreg:SI (match_dup 2) 4))
+ (subreg:SI (match_dup 0) 4)))]
+ ""
+ ;; APPLE LOCAL 6110622 constant pool reference out of range
+ [(set_attr "length" "20")]
+)
+;; APPLE LOCAL end DImode multiply enhancement
+
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "")
+ (match_operand:SF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK
+ && !cirrus_fp_register (operands[2], SFmode))
+ operands[2] = force_reg (SFmode, operands[2]);
+")
+
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "")
+ (match_operand:DF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK
+ && !cirrus_fp_register (operands[2], DFmode))
+ operands[2] = force_reg (DFmode, operands[2]);
+")
+
+;; Division insns
+
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (div:SF (match_operand:SF 1 "arm_float_rhs_operand" "")
+ (match_operand:SF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "")
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (div:DF (match_operand:DF 1 "arm_float_rhs_operand" "")
+ (match_operand:DF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "")
+
+;; Modulo insns
+
+(define_expand "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "")
+ (match_operand:SF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "")
+
+(define_expand "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "")
+ (match_operand:DF 2 "arm_float_rhs_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "")
+
+;; Boolean and,ior,xor insns
+
+;; Split up double word logical operations
+
+;; APPLE LOCAL begin 5831562 long long constants
+;; Split up simple DImode logical operations. Simply perform the logical
+;; operation on the upper and lower halves of the registers.
+(define_split
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (match_operator:DI 6 "logical_binary_operator"
+ [(match_operand:DI 1 "s_register_operand" "")
+ (match_operand:DI 2 "arm_rhs64_operand" "")]))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && reload_completed
+ && ! IS_IWMMXT_REGNUM (REGNO (operands[0]))"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
+ (set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart_mode (SImode, DImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_split
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (match_operator:DI 6 "logical_binary_operator"
+ [(sign_extend:DI (match_operand:SI 2 "s_register_operand" ""))
+ (match_operand:DI 1 "s_register_operand" "")]))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
+ (set (match_dup 3) (match_op_dup:SI 6
+ [(ashiftrt:SI (match_dup 2) (const_int 31))
+ (match_dup 4)]))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
+
+;; The zero extend of operand 2 means we can just copy the high part of
+;; operand1 into operand0.
+(define_split
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (ior:DI
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" ""))
+ (match_operand:DI 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && operands[0] != operands[1] && reload_completed"
+ [(set (match_dup 0) (ior:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3) (match_dup 4))]
+ "
+ {
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+)
+
+;; The zero extend of operand 2 means we can just copy the high part of
+;; operand1 into operand0.
+(define_split
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (xor:DI
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" ""))
+ (match_operand:DI 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && operands[0] != operands[1] && reload_completed"
+ [(set (match_dup 0) (xor:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3) (match_dup 4))]
+ "
+ {
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+)
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,r,0,r")
+ (match_operand:DI 2 "s_register_operand" "r,r,Dd,Dd")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && ! TARGET_IWMMXT"
+ "#"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_insn_and_split "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ "TARGET_32BIT && reload_completed"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ ; The zero extend of operand 2 clears the high word of the output
+ ; operand.
+ [(set (match_dup 0) (and:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3) (const_int 0))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+ [(set_attr "length" "8")]
+)
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ [(set_attr "length" "8")]
+)
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, NULL_RTX,
+ INTVAL (operands[2]), operands[0],
+ operands[1], optimize && !no_new_pseudos);
+
+ DONE;
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1 */
+ {
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+
+ if (((unsigned HOST_WIDE_INT) ~INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode,
+ GEN_INT (~INTVAL (operands[2])));
+
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ {
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1
+ == ~INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+
+ DONE;
+ }
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ }
+ "
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+; ??? Check split length for Thumb-2
+;; APPLE LOCAL begin ARM 4673027 suboptimal loop codegen
+(define_insn "*arm_andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K")))]
+ "TARGET_32BIT"
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2"
+ [(set_attr "length" "4,4")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end ARM 4673027 suboptimal loop codegen
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_andsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "and\\t%0, %0, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ and%.\\t%0, %1, %2
+ bic%.\\t%0, %1, #%B2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=X,r"))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ tst%?\\t%0, %1
+ bic%.\\t%2, %0, #%B1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && (INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32)"
+ "*
+ operands[1] = GEN_INT (((1 << INTVAL (operands[1])) - 1)
+ << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "set")]
+)
+
+(define_insn_and_split "*ne_zeroextractsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (zero_extract:SI
+ (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT
+ && (INTVAL (operands[3]) >= 0 && INTVAL (operands[3]) < 32
+ && INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) + (INTVAL (operands[3]) & 1) <= 8
+ && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32)"
+ "#"
+ "TARGET_32BIT
+ && (INTVAL (operands[3]) >= 0 && INTVAL (operands[3]) < 32
+ && INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) + (INTVAL (operands[3]) & 1) <= 8
+ && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32)"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (and:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0) (and:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (match_dup 0) (const_int 1)))]
+ "
+ operands[2] = GEN_INT (((1 << INTVAL (operands[2])) - 1)
+ << INTVAL (operands[3]));
+ "
+ [(set_attr "conds" "clob")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ (set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 12)
+ (const_int 8)))]
+;; APPLE LOCAL end v7 support. Merge from mainline
+)
+
+(define_insn_and_split "*ne_zeroextractsi_shifted"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (zero_extract:SI
+ (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (const_int 0))
+ (const_int 0)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ "TARGET_ARM"
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (ashift:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (match_dup 0) (const_int 1)))]
+ "
+ operands[2] = GEN_INT (32 - INTVAL (operands[2]));
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn_and_split "*ite_ne_zeroextractsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (ne (zero_extract:SI
+ (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n"))
+ (const_int 0))
+ (match_operand:SI 4 "arm_not_operand" "rIK")
+ (const_int 0)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM
+ && (INTVAL (operands[3]) >= 0 && INTVAL (operands[3]) < 32
+ && INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) + (INTVAL (operands[3]) & 1) <= 8
+ && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32)
+ && !reg_overlap_mentioned_p (operands[0], operands[4])"
+ "#"
+ "TARGET_ARM
+ && (INTVAL (operands[3]) >= 0 && INTVAL (operands[3]) < 32
+ && INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) + (INTVAL (operands[3]) & 1) <= 8
+ && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32)
+ && !reg_overlap_mentioned_p (operands[0], operands[4])"
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (and:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0) (and:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (match_dup 0) (match_dup 4)))]
+ "
+ operands[2] = GEN_INT (((1 << INTVAL (operands[2])) - 1)
+ << INTVAL (operands[3]));
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn_and_split "*ite_ne_zeroextractsi_shifted"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (ne (zero_extract:SI
+ (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")
+ (const_int 0))
+ (const_int 0))
+ (match_operand:SI 3 "arm_not_operand" "rIK")
+ (const_int 0)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM && !reg_overlap_mentioned_p (operands[0], operands[3])"
+ "#"
+ "TARGET_ARM && !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (ashift:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (match_dup 0) (match_dup 3)))]
+ "
+ operands[2] = GEN_INT (32 - INTVAL (operands[2]));
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))
+ (clobber (match_operand:SI 4 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ [(set (match_dup 4) (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 4) (match_dup 3)))]
+ "{
+ HOST_WIDE_INT temp = INTVAL (operands[2]);
+
+ operands[2] = GEN_INT (32 - temp - INTVAL (operands[3]));
+ operands[3] = GEN_INT (32 - temp);
+ }"
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? Use Thumb-2 has bitfield insert/extract instructions.
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "shiftable_operator"
+ [(zero_extract:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")
+ (match_operand:SI 4 "const_int_operand" ""))
+ (match_operand:SI 5 "s_register_operand" "")]))
+ (clobber (match_operand:SI 6 "s_register_operand" ""))]
+ "TARGET_ARM"
+ [(set (match_dup 6) (ashift:SI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (match_op_dup 1
+ [(lshiftrt:SI (match_dup 6) (match_dup 4))
+ (match_dup 5)]))]
+ "{
+ HOST_WIDE_INT temp = INTVAL (operands[3]);
+
+ operands[3] = GEN_INT (32 - temp - INTVAL (operands[4]));
+ operands[4] = GEN_INT (32 - temp);
+ }"
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 3)))]
+ "{
+ HOST_WIDE_INT temp = INTVAL (operands[2]);
+
+ operands[2] = GEN_INT (32 - temp - INTVAL (operands[3]));
+ operands[3] = GEN_INT (32 - temp);
+ }"
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "shiftable_operator"
+ [(sign_extract:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")
+ (match_operand:SI 4 "const_int_operand" ""))
+ (match_operand:SI 5 "s_register_operand" "")]))
+ (clobber (match_operand:SI 6 "s_register_operand" ""))]
+ "TARGET_ARM"
+ [(set (match_dup 6) (ashift:SI (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (match_op_dup 1
+ [(ashiftrt:SI (match_dup 6) (match_dup 4))
+ (match_dup 5)]))]
+ "{
+ HOST_WIDE_INT temp = INTVAL (operands[3]);
+
+ operands[3] = GEN_INT (32 - temp - INTVAL (operands[4]));
+ operands[4] = GEN_INT (32 - temp);
+ }"
+)
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bit-field insert instruction, the truncation happens
+;;; in the bit-field insert instruction itself. Since arm does not have a
+;;; bit-field insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+;;; APPLE LOCAL begin ARM insv for Thumb
+
+;; APPLE LOCAL v7 support. Merge from mainline
+; ??? Use Thumb-2 bitfield insert/extract instructions
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "reg_or_int_operand" ""))]
+ "TARGET_EITHER"
+ "
+ {
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget, orig_target;
+
+ target = orig_target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0],
+ gen_int_mode (~mask2, SImode)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ gen_int_mode (op3_value << start_bit, SImode)));
+ }
+ else if (TARGET_ARM
+ && start_bit == 0
+ && !(const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergeable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3],
+ gen_int_mode (32 - width, SImode)));
+ emit_insn (gen_lshrsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (op1, op1, op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if (width + start_bit == 32
+ && (TARGET_THUMB
+ || !(const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask))))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3],
+ gen_int_mode (32 - width, SImode)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_lshrsi3 (op1, op1, operands[1]));
+ emit_insn (gen_iorsi3 (subtarget, op1, op0));
+ }
+ else
+ {
+ rtx op0 = gen_int_mode (mask, SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (TARGET_THUMB
+ || !(const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ if (!TARGET_THUMB)
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~(mask << start_bit))))
+ {
+ op0 = gen_int_mode (~(mask << start_bit), SImode);
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ emit_insn (gen_ashlsi3 (op0, op0, operands[2]));
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (!TARGET_THUMB && start_bit != 0)
+ emit_insn (gen_ashlsi3 (op1, op1, operands[2]));
+
+ /* The default code uses AND with constant which is an extra insn on thumb. */
+ if (TARGET_THUMB)
+ {
+ /* If we only want a low subreg, we don't need to worry about
+ bits beyond that. */
+ if (GET_CODE (orig_target) == SUBREG
+ && SUBREG_BYTE (orig_target) == 0
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (orig_target)))
+ < GET_MODE_SIZE (SImode)
+ && width + start_bit
+ >= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (orig_target))))
+ emit_insn (gen_ashlsi3 (op1, operands[3],
+ gen_int_mode (start_bit, SImode)));
+ else
+ {
+ /* Mask unneeded bits in operand[3], and simultaneously move
+ input to the right place in the word. */
+ emit_insn (gen_ashlsi3 (op1, operands[3],
+ gen_int_mode (32 - width, SImode)));
+ emit_insn (gen_lshrsi3 (op1, op1,
+ gen_int_mode (32 - width - start_bit, SImode)));
+ }
+ }
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+ }"
+)
+;;; APPLE LOCAL end ARM insv for Thumb
+
+; constants for op 2 will never be given to these patterns.
+(define_insn_and_split "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 1 "s_register_operand" "r,0"))
+ (match_operand:DI 2 "s_register_operand" "0,r")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ "TARGET_32BIT && reload_completed && ! IS_IWMMXT_REGNUM (REGNO (operands[0]))"
+ [(set (match_dup 0) (and:SI (not:SI (match_dup 1)) (match_dup 2)))
+ (set (match_dup 3) (and:SI (not:SI (match_dup 4)) (match_dup 5)))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn_and_split "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ #"
+ ; (not (zero_extend ...)) allows us to just copy the high word from
+ ; operand1 to operand0.
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && reload_completed
+ && operands[0] != operands[1]"
+ [(set (match_dup 0) (and:SI (not:SI (match_dup 2)) (match_dup 1)))
+ (set (match_dup 3) (match_dup 4))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+ [(set_attr "length" "4,8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn_and_split "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ "TARGET_32BIT && reload_completed"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (match_dup 0) (and:SI (not:SI (match_dup 2)) (match_dup 1)))
+ (set (match_dup 3) (and:SI (not:SI
+ (ashiftrt:SI (match_dup 2) (const_int 31)))
+ (match_dup 4)))]
+ "
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "bic%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")]
+)
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "l"))
+ (match_operand:SI 2 "register_operand" "0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "bic\\t%0, %0, %1"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "bic%?\\t%0, %1, %2%S4"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "2")
+ (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "bic%.\\t%0, %1, %2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "bic%.\\t%0, %1, %2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0,r,0,r")
+ (match_operand:DI 2 "arm_rhs64_operand" "r,r,Dd,Dd")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && ! TARGET_IWMMXT"
+ "#"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ #"
+ [(set_attr "length" "4,8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ "TARGET_EITHER"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ arm_split_constant (IOR, SImode, NULL_RTX,
+ INTVAL (operands[2]), operands[0], operands[1],
+ optimize && !no_new_pseudos);
+ DONE;
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1 */
+ operands [2] = force_reg (SImode, operands [2]);
+ }
+ "
+)
+
+;; APPLE LOCAL begin ARM 4673027 suboptimal loop codegen
+(define_insn"*arm_iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "orr%?\\t%0, %1, %2"
+ [(set_attr "length" "4")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end ARM 4673027 suboptimal loop codegen
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "orr\\t%0, %0, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (set (match_operand:SI 0 "arm_general_register_operand" "")
+ (ior:SI (match_operand:SI 1 "arm_general_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && !const_ok_for_arm (INTVAL (operands[2]))
+ && const_ok_for_arm (~INTVAL (operands[2]))"
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (ior:SI (match_dup 1) (match_dup 3)))]
+ ""
+)
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "orr%.\\t%0, %1, %2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "orr%.\\t%0, %1, %2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,r,0,r")
+ (match_operand:DI 2 "s_register_operand" "r,r,Dd,Dd")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && !TARGET_IWMMXT"
+ "#"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ #"
+ [(set_attr "length" "4,8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ "TARGET_EITHER"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "if (TARGET_THUMB1)
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ "
+)
+
+(define_insn "*arm_xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "eor%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "eor\\t%0, %0, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "eor%.\\t%0, %1, %2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "teq%?\\t%0, %1"
+ [(set_attr "conds" "set")]
+)
+
+; By splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+; (NOT D) we can sometimes merge the final NOT into one of the following
+; insns.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" ""))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "")))
+ (match_operand:SI 3 "arm_rhs_operand" "")))
+ (clobber (match_operand:SI 4 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "predicable" "yes")]
+;; APPLE LOCAL end v7 support. Merge from mainline
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+; ??? Are these four splitters still beneficial when the Thumb-2 bitfield
+; insns are available?
+;; APPLE LOCAL end v7 support. Merge from mainline
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "logical_binary_operator"
+ [(zero_extract:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")
+ (match_operand:SI 4 "const_int_operand" ""))
+ (match_operator:SI 9 "logical_binary_operator"
+ [(lshiftrt:SI (match_operand:SI 5 "s_register_operand" "")
+ (match_operand:SI 6 "const_int_operand" ""))
+ (match_operand:SI 7 "s_register_operand" "")])]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && GET_CODE (operands[1]) == GET_CODE (operands[9])
+ && INTVAL (operands[3]) == 32 - INTVAL (operands[6])"
+ [(set (match_dup 8)
+ (match_op_dup 1
+ [(ashift:SI (match_dup 2) (match_dup 4))
+ (match_dup 5)]))
+ (set (match_dup 0)
+ (match_op_dup 1
+ [(lshiftrt:SI (match_dup 8) (match_dup 6))
+ (match_dup 7)]))]
+ "
+ operands[4] = GEN_INT (32 - (INTVAL (operands[3]) + INTVAL (operands[4])));
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "logical_binary_operator"
+ [(match_operator:SI 9 "logical_binary_operator"
+ [(lshiftrt:SI (match_operand:SI 5 "s_register_operand" "")
+ (match_operand:SI 6 "const_int_operand" ""))
+ (match_operand:SI 7 "s_register_operand" "")])
+ (zero_extract:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")
+ (match_operand:SI 4 "const_int_operand" ""))]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && GET_CODE (operands[1]) == GET_CODE (operands[9])
+ && INTVAL (operands[3]) == 32 - INTVAL (operands[6])"
+ [(set (match_dup 8)
+ (match_op_dup 1
+ [(ashift:SI (match_dup 2) (match_dup 4))
+ (match_dup 5)]))
+ (set (match_dup 0)
+ (match_op_dup 1
+ [(lshiftrt:SI (match_dup 8) (match_dup 6))
+ (match_dup 7)]))]
+ "
+ operands[4] = GEN_INT (32 - (INTVAL (operands[3]) + INTVAL (operands[4])));
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "logical_binary_operator"
+ [(sign_extract:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")
+ (match_operand:SI 4 "const_int_operand" ""))
+ (match_operator:SI 9 "logical_binary_operator"
+ [(ashiftrt:SI (match_operand:SI 5 "s_register_operand" "")
+ (match_operand:SI 6 "const_int_operand" ""))
+ (match_operand:SI 7 "s_register_operand" "")])]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && GET_CODE (operands[1]) == GET_CODE (operands[9])
+ && INTVAL (operands[3]) == 32 - INTVAL (operands[6])"
+ [(set (match_dup 8)
+ (match_op_dup 1
+ [(ashift:SI (match_dup 2) (match_dup 4))
+ (match_dup 5)]))
+ (set (match_dup 0)
+ (match_op_dup 1
+ [(ashiftrt:SI (match_dup 8) (match_dup 6))
+ (match_dup 7)]))]
+ "
+ operands[4] = GEN_INT (32 - (INTVAL (operands[3]) + INTVAL (operands[4])));
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "logical_binary_operator"
+ [(match_operator:SI 9 "logical_binary_operator"
+ [(ashiftrt:SI (match_operand:SI 5 "s_register_operand" "")
+ (match_operand:SI 6 "const_int_operand" ""))
+ (match_operand:SI 7 "s_register_operand" "")])
+ (sign_extract:SI (match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")
+ (match_operand:SI 4 "const_int_operand" ""))]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && GET_CODE (operands[1]) == GET_CODE (operands[9])
+ && INTVAL (operands[3]) == 32 - INTVAL (operands[6])"
+ [(set (match_dup 8)
+ (match_op_dup 1
+ [(ashift:SI (match_dup 2) (match_dup 4))
+ (match_dup 5)]))
+ (set (match_dup 0)
+ (match_op_dup 1
+ [(ashiftrt:SI (match_dup 8) (match_dup 6))
+ (match_dup 7)]))]
+ "
+ operands[4] = GEN_INT (32 - (INTVAL (operands[3]) + INTVAL (operands[4])));
+")
+
+
+;; Minimum and maximum insns
+
+(define_expand "smaxsi3"
+ [(parallel [
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ if (operands[2] == const0_rtx || operands[2] == constm1_rtx)
+ {
+ /* No need for a clobber of the condition code register here. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SMAX (SImode, operands[1],
+ operands[2])));
+ DONE;
+ }
+")
+
+(define_insn "*smax_0"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "bic%?\\t%0, %1, %1, asr #31"
+ [(set_attr "predicable" "yes")]
+)
+
+(define_insn "*smax_m1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int -1)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "orr%?\\t%0, %1, %1, asr #31"
+ [(set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_smax_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "%0,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_expand "sminsi3"
+ [(parallel [
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ if (operands[2] == const0_rtx)
+ {
+ /* No need for a clobber of the condition code register here. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SMIN (SImode, operands[1],
+ operands[2])));
+ DONE;
+ }
+")
+
+(define_insn "*smin_0"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "and%?\\t%0, %1, %1, asr #31"
+ [(set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_smin_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "%0,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_expand "umaxsi3"
+ [(parallel [
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")]
+)
+
+(define_expand "uminsi3"
+ [(parallel [
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "*
+ operands[3] = gen_rtx_fmt_ee (minmax_code (operands[3]), SImode,
+ operands[1], operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ if (TARGET_THUMB2)
+ output_asm_insn (\"ite\t%d3\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "clob")
+ (set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 14)
+ (const_int 12)))
+;; APPLE LOCAL end v7 support. Merge from mainline
+ (set_attr "type" "store1")]
+)
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && !arm_eliminable_register (operands[1])"
+ "*
+ {
+ enum rtx_code code = GET_CODE (operands[4]);
+ bool need_else;
+
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ need_else = true;
+ else
+ need_else = false;
+
+ operands[5] = gen_rtx_fmt_ee (minmax_code (operands[5]), SImode,
+ operands[2], operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (TARGET_THUMB2)
+ {
+ if (need_else)
+ output_asm_insn (\"ite\\t%d5\", operands);
+ else
+ output_asm_insn (\"it\\t%d5\", operands);
+ }
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (need_else)
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+ }"
+ [(set_attr "conds" "clob")
+ (set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 14)
+ (const_int 12)))]
+;; APPLE LOCAL end v7 support. Merge from mainline
+)
+
+
+;; Shift and rotation insns
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (ashift:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if ((HOST_WIDE_INT) INTVAL (operands[2]) == 1)
+ {
+ emit_insn (gen_arm_ashldi3_1bit (operands[0], operands[1]));
+ DONE;
+ }
+ /* Ideally we shouldn't fail here if we could know that operands[1]
+ ends up already living in an iwmmxt register. Otherwise it's
+ cheaper to have the alternate code being generated than moving
+ values to iwmmxt regs and back. */
+ FAIL;
+ }
+ else if (!TARGET_REALLY_IWMMXT && !(TARGET_HARD_FLOAT && TARGET_MAVERICK))
+ FAIL;
+ "
+)
+
+(define_insn "arm_ashldi3_1bit"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,r")
+ (ashift:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (const_int 1)))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "movs\\t%Q0, %Q1, asl #1\;adc\\t%R0, %R1, %R1"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ "TARGET_EITHER"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "lsl\\t%0, %1, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if ((HOST_WIDE_INT) INTVAL (operands[2]) == 1)
+ {
+ emit_insn (gen_arm_ashrdi3_1bit (operands[0], operands[1]));
+ DONE;
+ }
+ /* Ideally we shouldn't fail here if we could know that operands[1]
+ ends up already living in an iwmmxt register. Otherwise it's
+ cheaper to have the alternate code being generated than moving
+ values to iwmmxt regs and back. */
+ FAIL;
+ }
+ else if (!TARGET_REALLY_IWMMXT)
+ FAIL;
+ "
+)
+
+(define_insn "arm_ashrdi3_1bit"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,r")
+ (ashiftrt:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (const_int 1)))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "movs\\t%R0, %R1, asr #1\;mov\\t%Q0, %Q1, rrx"
+ [(set_attr "conds" "clob")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "8")]
+)
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ "TARGET_EITHER"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "asr\\t%0, %1, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if ((HOST_WIDE_INT) INTVAL (operands[2]) == 1)
+ {
+ emit_insn (gen_arm_lshrdi3_1bit (operands[0], operands[1]));
+ DONE;
+ }
+ /* Ideally we shouldn't fail here if we could know that operands[1]
+ ends up already living in an iwmmxt register. Otherwise it's
+ cheaper to have the alternate code being generated than moving
+ values to iwmmxt regs and back. */
+ FAIL;
+ }
+ else if (!TARGET_REALLY_IWMMXT)
+ FAIL;
+ "
+)
+
+(define_insn "arm_lshrdi3_1bit"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,r")
+ (lshiftrt:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (const_int 1)))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "movs\\t%R0, %R1, lsr #1\;mov\\t%Q0, %Q1, rrx"
+ [(set_attr "conds" "clob")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "8")]
+)
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ "TARGET_EITHER"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "lsr\\t%0, %1, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+ "
+)
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1 */
+ {
+ if (GET_CODE (operands [2]) == CONST_INT)
+ operands [2] = force_reg (SImode, operands[2]);
+ }
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "ror\\t%0, %0, %2"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "*arm_shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "* return arm_output_shift(operands, 0);"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "* return arm_output_shift(operands, 1);"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "* return arm_output_shift(operands, 1);"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ "TARGET_ARM"
+ "mvn%?\\t%0, %1%S3"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mvn")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ "TARGET_ARM"
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ "mvn%.\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "insn" "mvn")
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_ARM"
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ "mvn%.\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "insn" "mvn")
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "
+ {
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+
+ operands[3] = GEN_INT (rshift);
+
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+ }"
+)
+
+
+;; Unary arithmetic insns
+
+(define_expand "negdi2"
+ [(parallel
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ if (GET_CODE (operands[1]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ "
+)
+
+;; The constraints here are to prevent a *partial* overlap (where %Q0 == %R1).
+;; The second alternative is to allow the common case of a *full* overlap.
+(define_insn "*arm_negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=&l")
+ (neg:DI (match_operand:DI 1 "register_operand" "l")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "mov\\t%R0, #0\;neg\\t%Q0, %Q1\;sbc\\t%R0, %R1"
+ [(set_attr "length" "6")]
+)
+
+(define_expand "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "")))]
+ "TARGET_EITHER"
+ ""
+)
+
+(define_insn "*arm_negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "rsb%?\\t%0, %1, #0"
+ [(set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*thumb1_negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l")))]
+ "TARGET_THUMB1"
+ "neg\\t%0, %1"
+ [(set_attr "length" "2")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_expand "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ ""
+)
+
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "")
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_expand "abssi2"
+ [(parallel
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "")
+
+(define_insn "*arm_abssi2"
+;; APPLE LOCAL v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+ [(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
+ ;; predicable can't be set based on the variant, so left as no
+ (set_attr "length" "8")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+ [(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
+ ;; predicable can't be set based on the variant, so left as no
+ (set_attr "length" "8")]
+)
+
+(define_expand "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "")
+
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "")
+
+(define_expand "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "")
+
+(define_expand "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "")
+
+(define_insn_and_split "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "#"
+ "TARGET_32BIT && reload_completed"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (match_dup 0) (not:SI (match_dup 1)))
+ (set (match_dup 2) (not:SI (match_dup 3)))]
+ "
+ {
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")]
+)
+
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (not:SI (match_operand:SI 1 "s_register_operand" "")))]
+ "TARGET_EITHER"
+ ""
+)
+
+(define_insn "*arm_one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "mvn%?\\t%0, %1"
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ [(set_attr "predicable" "yes")
+ (set_attr "insn" "mvn")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (not:SI (match_operand:SI 1 "register_operand" "l")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "mvn\\t%0, %1"
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ [(set_attr "length" "2")
+ (set_attr "insn" "mvn")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "mvn%.\\t%0, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ [(set_attr "conds" "set")
+ (set_attr "insn" "mvn")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "mvn%.\\t%0, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ [(set_attr "conds" "set")
+ (set_attr "insn" "mvn")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+;; Fixed <--> Floating conversion insns
+
+(define_expand "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (float:SF (match_operand:SI 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK)
+ {
+ emit_insn (gen_cirrus_floatsisf2 (operands[0], operands[1]));
+ DONE;
+ }
+")
+
+(define_expand "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (float:DF (match_operand:SI 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK)
+ {
+ emit_insn (gen_cirrus_floatsidf2 (operands[0], operands[1]));
+ DONE;
+ }
+")
+
+(define_expand "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" ""))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK)
+ {
+ if (!cirrus_fp_register (operands[0], SImode))
+ operands[0] = force_reg (SImode, operands[0]);
+ if (!cirrus_fp_register (operands[1], SFmode))
+ operands[1] = force_reg (SFmode, operands[0]);
+ emit_insn (gen_cirrus_truncsfsi2 (operands[0], operands[1]));
+ DONE;
+ }
+")
+
+(define_expand "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" ""))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ if (TARGET_MAVERICK)
+ {
+ if (!cirrus_fp_register (operands[1], DFmode))
+ operands[1] = force_reg (DFmode, operands[0]);
+ emit_insn (gen_cirrus_truncdfsi2 (operands[0], operands[1]));
+ DONE;
+ }
+")
+
+;; Truncation insns
+
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ ""
+)
+
+;; Zero and sign extension instructions.
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "")))]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_zero_extendsidi2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "*
+ if (REGNO (operands[1])
+ != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+ "
+ [(set_attr "length" "8")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_expand "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_ARM"
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%(b%)\\t%Q0, %1\;mov%?\\t%R0, #0"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "length" "8")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "*,load_byte")
+ (set_attr "pool_range" "*,4092")
+ (set_attr "neg_pool_range" "*,4084")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "")))]
+ "TARGET_32BIT"
+ ""
+)
+
+(define_insn "*arm_extendsidi2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "*
+ if (REGNO (operands[1])
+ != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+ "
+ [(set_attr "length" "8")
+ (set_attr "shift" "1")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "predicable" "yes")]
+)
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "TARGET_EITHER"
+ "
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if ((TARGET_THUMB1 || arm_arch4) && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_ARM && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+
+ if (!s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+
+ if (arm_arch6)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
+
+;; APPLE LOCAL ARM compact switch tables
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_THUMB1 && !arm_arch6"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "*
+ rtx mem = XEXP (operands[1], 0);
+
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+
+ if (GET_CODE (mem) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (mem) == PLUS)
+ {
+ rtx a = XEXP (mem, 0);
+ rtx b = XEXP (mem, 1);
+
+ /* This can happen due to bugs in reload. */
+ if (GET_CODE (a) == REG && REGNO (a) == SP_REGNUM)
+ {
+ rtx ops[2];
+ ops[0] = operands[0];
+ ops[1] = a;
+
+ output_asm_insn (\"mov %0, %1\", ops);
+
+ XEXP (mem, 0) = operands[0];
+ }
+
+ else if ( GET_CODE (a) == LABEL_REF
+ && GET_CODE (b) == CONST_INT)
+ return \"ldr\\t%0, %1\";
+ }
+
+ return \"ldrh\\t%0, %1\";
+ "
+ [(set_attr "length" "4")
+ (set_attr "type" "load_byte")
+ (set_attr "pool_range" "60")]
+)
+
+;; APPLE LOCAL ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_zero_extendhisi2_v6"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "l,m")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && arm_arch6"
+ "*
+ rtx mem;
+
+ if (which_alternative == 0)
+ return \"uxth\\t%0, %1\";
+
+ mem = XEXP (operands[1], 0);
+
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+
+ if (GET_CODE (mem) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (mem) == PLUS)
+ {
+ rtx a = XEXP (mem, 0);
+ rtx b = XEXP (mem, 1);
+
+ /* This can happen due to bugs in reload. */
+ if (GET_CODE (a) == REG && REGNO (a) == SP_REGNUM)
+ {
+ rtx ops[2];
+ ops[0] = operands[0];
+ ops[1] = a;
+
+ output_asm_insn (\"mov %0, %1\", ops);
+
+ XEXP (mem, 0) = operands[0];
+ }
+
+ else if ( GET_CODE (a) == LABEL_REF
+ && GET_CODE (b) == CONST_INT)
+ return \"ldr\\t%0, %1\";
+ }
+
+ return \"ldrh\\t%0, %1\";
+ "
+ [(set_attr "length" "2,4")
+ (set_attr "type" "alu_shift,load_byte")
+ (set_attr "pool_range" "*,60")]
+)
+
+(define_insn "*arm_zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_ARM && arm_arch4 && !arm_arch6"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "ldr%(h%)\\t%0, %1"
+ [(set_attr "type" "load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "256")
+ (set_attr "neg_pool_range" "244")]
+)
+
+(define_insn "*arm_zero_extendhisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_ARM && arm_arch6"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "@
+ uxth%?\\t%0, %1
+ ldr%(h%)\\t%0, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,256")
+ (set_attr "neg_pool_range" "*,244")]
+)
+
+(define_insn "*arm_zero_extendhisi2addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (zero_extend:SI (match_operand:HI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_INT_SIMD"
+ "uxtah%?\\t%0, %2, %1"
+ [(set_attr "type" "alu_shift")
+ (set_attr "predicable" "yes")]
+)
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "TARGET_EITHER"
+ "
+ if (!arm_arch6 && GET_CODE (operands[1]) != MEM)
+ {
+ if (TARGET_ARM)
+ {
+ emit_insn (gen_andsi3 (operands[0],
+ gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ }
+ else /* TARGET_THUMB */
+ {
+ rtx temp = gen_reg_rtx (SImode);
+ rtx ops[3];
+
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ ops[0] = temp;
+ ops[1] = operands[1];
+ ops[2] = GEN_INT (24);
+
+ emit_insn (gen_rtx_SET (VOIDmode, ops[0],
+ gen_rtx_ASHIFT (SImode, ops[1], ops[2])));
+
+ ops[0] = operands[0];
+ ops[1] = temp;
+ ops[2] = GEN_INT (24);
+
+ emit_insn (gen_rtx_SET (VOIDmode, ops[0],
+ gen_rtx_LSHIFTRT (SImode, ops[1], ops[2])));
+ }
+ DONE;
+ }
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && !arm_arch6"
+ "ldrb\\t%0, %1"
+ [(set_attr "length" "2")
+ (set_attr "type" "load_byte")
+ (set_attr "pool_range" "32")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_zero_extendqisi2_v6"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "l,m")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && arm_arch6"
+ "@
+ uxtb\\t%0, %1
+ ldrb\\t%0, %1"
+ [(set_attr "length" "2,2")
+ (set_attr "type" "alu_shift,load_byte")
+ (set_attr "pool_range" "*,32")]
+)
+
+(define_insn "*arm_zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "TARGET_ARM && !arm_arch6"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+ [(set_attr "type" "load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "4096")
+ (set_attr "neg_pool_range" "4084")]
+)
+
+(define_insn "*arm_zero_extendqisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_ARM && arm_arch6"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "@
+ uxtb%(%)\\t%0, %1
+ ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,4084")]
+)
+
+(define_insn "*arm_zero_extendqisi2addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (zero_extend:SI (match_operand:QI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_INT_SIMD"
+ "uxtab%?\\t%0, %2, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && (GET_CODE (operands[1]) != MEM) && ! BYTES_BIG_ENDIAN"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ ""
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 3)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && (GET_CODE (operands[1]) != MEM) && BYTES_BIG_ENDIAN"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ ""
+)
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z CC_REGNUM)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "tst\\t%0, #255"
+ [(set_attr "conds" "set")]
+)
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ "TARGET_EITHER"
+ "
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ emit_insn (gen_thumb1_extendhisi2 (operands[0], operands[1]));
+ DONE;
+ }
+ else if (arm_arch4)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ }
+
+ if (TARGET_ARM && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+
+ if (!s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+
+ if (arm_arch6)
+ {
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ emit_insn (gen_thumb1_extendhisi2 (operands[0], operands[1]));
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "thumb1_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && !arm_arch6"
+ "*
+ {
+ rtx ops[4];
+ rtx mem = XEXP (operands[1], 0);
+
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+
+ if (GET_CODE (mem) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (mem) == PLUS)
+ {
+ rtx a = XEXP (mem, 0);
+ rtx b = XEXP (mem, 1);
+
+ if (GET_CODE (a) == LABEL_REF
+ && GET_CODE (b) == CONST_INT)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (b) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[1] = a;
+ ops[2] = b;
+ }
+ else
+ {
+ ops[1] = mem;
+ ops[2] = const0_rtx;
+ }
+
+ gcc_assert (GET_CODE (ops[1]) == REG);
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+ }"
+ [(set_attr "length" "4")
+ (set_attr "type" "load_byte")
+ (set_attr "pool_range" "1020")]
+)
+
+;; We used to have an early-clobber on the scratch register here.
+;; However, there's a bug somewhere in reload which means that this
+;; can be partially ignored during spill allocation if the memory
+;; address also needs reloading; this causes us to die later on when
+;; we try to verify the operands. Fortunately, we don't really need
+;; the early-clobber: we can always use operand 0 if operand 2
+;; overlaps the address.
+;; APPLE LOCAL ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_extendhisi2_insn_v6"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "l,m")))
+ (clobber (match_scratch:SI 2 "=X,l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && arm_arch6"
+ "*
+ {
+ rtx ops[4];
+ rtx mem;
+
+ if (which_alternative == 0)
+ return \"sxth\\t%0, %1\";
+
+ mem = XEXP (operands[1], 0);
+
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+
+ if (GET_CODE (mem) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (mem) == PLUS)
+ {
+ rtx a = XEXP (mem, 0);
+ rtx b = XEXP (mem, 1);
+
+ if (GET_CODE (a) == LABEL_REF
+ && GET_CODE (b) == CONST_INT)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (b) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[1] = a;
+ ops[2] = b;
+ }
+ else
+ {
+ ops[1] = mem;
+ ops[2] = const0_rtx;
+ }
+
+ gcc_assert (GET_CODE (ops[1]) == REG);
+
+ ops[0] = operands[0];
+ if (reg_mentioned_p (operands[2], ops[1]))
+ ops[3] = ops[0];
+ else
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+ }"
+ [(set_attr "length" "2,4")
+ (set_attr "type" "alu_shift,load_byte")
+ (set_attr "pool_range" "*,1020")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; This pattern will only be used when ldsh is not available
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ "TARGET_ARM"
+ "
+ {
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = change_address (operands[1], QImode, addr);
+ mem2 = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+ }"
+)
+
+(define_insn "*arm_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_ARM && arm_arch4 && !arm_arch6"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "ldr%(sh%)\\t%0, %1"
+ [(set_attr "type" "load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "256")
+ (set_attr "neg_pool_range" "244")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? Check Thumb-2 pool range
+(define_insn "*arm_extendhisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch6"
+ "@
+ sxth%?\\t%0, %1
+ ldr%(sh%)\\t%0, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,256")
+ (set_attr "neg_pool_range" "*,244")]
+)
+
+(define_insn "*arm_extendhisi2addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (sign_extend:SI (match_operand:HI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_INT_SIMD"
+ "sxtah%?\\t%0, %2, %1"
+)
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "TARGET_ARM"
+ "
+ {
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_SIGN_EXTEND (HImode, operands[1])));
+ DONE;
+ }
+ if (!s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "Uq")))]
+ "TARGET_ARM && arm_arch4"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "ldr%(sb%)\\t%0, %1"
+ [(set_attr "type" "load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "256")
+ (set_attr "neg_pool_range" "244")]
+)
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "TARGET_EITHER"
+ "
+ {
+ if ((TARGET_THUMB || arm_arch4) && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (!s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+
+ if (arm_arch6)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
+
+(define_insn "*arm_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "Uq")))]
+ "TARGET_ARM && arm_arch4 && !arm_arch6"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "ldr%(sb%)\\t%0, %1"
+ [(set_attr "type" "load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "256")
+ (set_attr "neg_pool_range" "244")]
+)
+
+(define_insn "*arm_extendqisi_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,Uq")))]
+ "TARGET_ARM && arm_arch6"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "@
+ sxtb%?\\t%0, %1
+ ldr%(sb%)\\t%0, %1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,256")
+ (set_attr "neg_pool_range" "*,244")]
+)
+
+(define_insn "*arm_extendqisi2addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (sign_extend:SI (match_operand:QI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_INT_SIMD"
+ "sxtab%?\\t%0, %2, %1"
+ [(set_attr "type" "alu_shift")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && !arm_arch6"
+ "*
+ {
+ rtx ops[3];
+ rtx mem = XEXP (operands[1], 0);
+
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+
+ if (GET_CODE (mem) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (mem) == PLUS
+ && GET_CODE (XEXP (mem, 0)) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+
+ ops[0] = operands[0];
+
+ if (GET_CODE (mem) == PLUS)
+ {
+ rtx a = XEXP (mem, 0);
+ rtx b = XEXP (mem, 1);
+
+ ops[1] = a;
+ ops[2] = b;
+
+ if (GET_CODE (a) == REG)
+ {
+ if (GET_CODE (b) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (REGNO (a) == REGNO (ops[0]))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\", ops);
+ output_asm_insn (\"lsl\\t%0, %0, #24\", ops);
+ output_asm_insn (\"asr\\t%0, %0, #24\", ops);
+ }
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ gcc_assert (GET_CODE (b) == REG);
+ if (REGNO (b) == REGNO (ops[0]))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\", ops);
+ output_asm_insn (\"lsl\\t%0, %0, #24\", ops);
+ output_asm_insn (\"asr\\t%0, %0, #24\", ops);
+ }
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (GET_CODE (mem) == REG && REGNO (ops[0]) == REGNO (mem))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\", ops);
+ output_asm_insn (\"lsl\\t%0, %0, #24\", ops);
+ output_asm_insn (\"asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = mem;
+ ops[2] = const0_rtx;
+
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+ }"
+ [(set_attr "length" "2,6")
+ (set_attr "type" "load_byte,load_byte")
+ (set_attr "pool_range" "32,32")]
+)
+
+;; APPLE LOCAL ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_extendqisi2_v6"
+ [(set (match_operand:SI 0 "register_operand" "=l,l,l")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "l,V,m")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && arm_arch6"
+ "*
+ {
+ rtx ops[3];
+ rtx mem;
+
+ if (which_alternative == 0)
+ return \"sxtb\\t%0, %1\";
+
+ mem = XEXP (operands[1], 0);
+
+ if (GET_CODE (mem) == CONST)
+ mem = XEXP (mem, 0);
+
+ if (GET_CODE (mem) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ if (GET_CODE (mem) == PLUS
+ && GET_CODE (XEXP (mem, 0)) == LABEL_REF)
+ return \"ldr\\t%0, %1\";
+
+ /* APPLE LOCAL ARM fix obvious typo */
+ if (which_alternative == 1)
+ return \"ldrsb\\t%0, %1\";
+
+ ops[0] = operands[0];
+
+ if (GET_CODE (mem) == PLUS)
+ {
+ rtx a = XEXP (mem, 0);
+ rtx b = XEXP (mem, 1);
+
+ ops[1] = a;
+ ops[2] = b;
+
+ if (GET_CODE (a) == REG)
+ {
+ if (GET_CODE (b) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (REGNO (a) == REGNO (ops[0]))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\", ops);
+ output_asm_insn (\"sxtb\\t%0, %0\", ops);
+ }
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ gcc_assert (GET_CODE (b) == REG);
+ if (REGNO (b) == REGNO (ops[0]))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\", ops);
+ output_asm_insn (\"sxtb\\t%0, %0\", ops);
+ }
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (GET_CODE (mem) == REG && REGNO (ops[0]) == REGNO (mem))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\", ops);
+ output_asm_insn (\"sxtb\\t%0, %0\", ops);
+ }
+ else
+ {
+ ops[1] = mem;
+ ops[2] = const0_rtx;
+
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+ }"
+ [(set_attr "length" "2,2,4")
+ (set_attr "type" "alu_shift,load_byte,load_byte")
+ (set_attr "pool_range" "*,32,32")]
+)
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ ""
+)
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognize garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register const char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+ "
+)
+
+(define_insn "*arm_movdi"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, r, r, m")
+ (match_operand:DI 1 "di_operand" "rDa,Db,Dc,mi,r"))]
+ "TARGET_ARM
+ && !(TARGET_HARD_FLOAT && (TARGET_MAVERICK || TARGET_VFP))
+ && !TARGET_IWMMXT
+ && ( register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return \"#\";
+ default:
+ return output_move_double (operands);
+ }
+ "
+ [(set_attr "length" "8,12,16,8,8")
+ (set_attr "type" "*,*,*,load2,store2")
+ (set_attr "pool_range" "*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*")]
+)
+
+(define_split
+ [(set (match_operand:ANY64 0 "arm_general_register_operand" "")
+ (match_operand:ANY64 1 "const_double_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && reload_completed
+ && (arm_const_double_inline_cost (operands[1])
+ <= ((optimize_size || arm_ld_sched) ? 3 : 4))"
+ [(const_int 0)]
+ "
+ arm_split_constant (SET, SImode, curr_insn,
+ INTVAL (gen_lowpart (SImode, operands[1])),
+ gen_lowpart (SImode, operands[0]), NULL_RTX, 0);
+ arm_split_constant (SET, SImode, curr_insn,
+ INTVAL (gen_highpart_mode (SImode,
+ GET_MODE (operands[0]),
+ operands[1])),
+ gen_highpart (SImode, operands[0]), NULL_RTX, 0);
+ DONE;
+ "
+)
+
+; If optimizing for size, or if we have load delay slots, then
+; we want to split the constant into two separate operations.
+; In both cases this may split a trivial part into a single data op
+; leaving a single complex constant to load. We can also get longer
+; offsets in a LDR which means we get better chances of sharing the pool
+; entries. Finally, we can normally do a better job of scheduling
+; LDR instructions than we can with LDM.
+;; APPLE LOCAL begin ARM split 64-bit constants on Thumb
+; On ARM, This pattern will only match if the one above did not.
+; On Thumb, use this form always; don't try to do inline expansions.
+(define_split
+ [(set (match_operand:ANY64 0 "arm_general_register_operand" "")
+ (match_operand:ANY64 1 "const_double_operand" ""))]
+ "TARGET_EITHER && reload_completed
+ && (TARGET_THUMB || arm_const_double_by_parts (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+ "
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_highpart_mode (SImode, GET_MODE (operands[0]),
+ operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ "
+)
+;; APPLE LOCAL end ARM split 64-bit constants on Thumb
+
+(define_split
+ [(set (match_operand:ANY64 0 "arm_general_register_operand" "")
+ (match_operand:ANY64 1 "arm_general_register_operand" ""))]
+ "TARGET_EITHER && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+ "
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ /* Handle a partial overlap. */
+ if (rtx_equal_p (operands[0], operands[3]))
+ {
+ rtx tmp0 = operands[0];
+ rtx tmp1 = operands[1];
+
+ operands[0] = operands[2];
+ operands[1] = operands[3];
+ operands[2] = tmp0;
+ operands[3] = tmp1;
+ }
+ "
+)
+
+;; We can't actually do base+index doubleword loads if the index and
+;; destination overlap. Split here so that we at least have chance to
+;; schedule.
+(define_split
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (mem:DI (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "s_register_operand" ""))))]
+ "TARGET_LDRD
+ && reg_overlap_mentioned_p (operands[0], operands[1])
+ && reg_overlap_mentioned_p (operands[0], operands[2])"
+ [(set (match_dup 4)
+ (plus:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (mem:DI (match_dup 4)))]
+ "
+ operands[4] = gen_rtx_REG (SImode, REGNO(operands[0]));
+ "
+)
+
+;; APPLE LOCAL begin compact switch tables
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_movdi_insn"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,l,>,l, m,*r")
+ (match_operand:DI 1 "general_operand" "l, I,J,>,l,mi,l,*r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && !(TARGET_HARD_FLOAT && TARGET_MAVERICK)
+ && ( register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ default:
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx_MEM (SImode,
+ plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+ }"
+ [(set_attr "length" "4,4,6,2,2,4,4,4")
+ (set_attr "type" "*,*,*,load2,store2,load2,store2,*")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "*,mov,*,*,*,*,*,mov")
+ (set_attr "pool_range" "*,*,*,*,*,1018,*,*")]
+)
+;; APPLE LOCAL end compact switch tables
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ /* Everything except mem = const or mem = mem can be done easily. */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ if (arm_general_register_operand (operands[0], SImode)
+ && GET_CODE (operands[1]) == CONST_INT
+ && !(const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1]))))
+ {
+ arm_split_constant (SET, SImode, NULL_RTX,
+ INTVAL (operands[1]), operands[0], NULL_RTX,
+ optimize && !no_new_pseudos);
+ DONE;
+ }
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1... */
+ {
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ }
+
+ /* Recognize the case where operand[1] is a reference to thread-local
+ data and load its address to a register. */
+ if (arm_tls_referenced_p (operands[1]))
+ {
+ rtx tmp = operands[1];
+ rtx addend = NULL;
+
+ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
+ {
+ addend = XEXP (XEXP (tmp, 0), 1);
+ tmp = XEXP (XEXP (tmp, 0), 0);
+ }
+
+ gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
+ gcc_assert (SYMBOL_REF_TLS_MODEL (tmp) != 0);
+
+ tmp = legitimize_tls_address (tmp, no_new_pseudos ? operands[0] : 0);
+ if (addend)
+ {
+ tmp = gen_rtx_PLUS (SImode, tmp, addend);
+ tmp = force_operand (tmp, operands[0]);
+ }
+ operands[1] = tmp;
+ }
+ /* APPLE LOCAL ARM pic support */
+ else if (! LEGITIMATE_INDIRECT_OPERAND_P (operands[1]))
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ (no_new_pseudos ? operands[0] : 0));
+ "
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*arm_movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r, m")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r"))]
+ "TARGET_ARM && ! TARGET_IWMMXT
+ && !(TARGET_HARD_FLOAT && TARGET_VFP)
+ && ( register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ movw%?\\t%0, %1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+ [(set_attr "type" "*,*,*,load1,store1")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,mov,*,*")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,*,4084,*")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_split
+ [(set (match_operand:SI 0 "arm_general_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && (!(const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1]))))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, NULL_RTX,
+ INTVAL (operands[1]), operands[0], NULL_RTX, 0);
+ DONE;
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l, m,*lh")
+ (match_operand:SI 1 "general_operand" "l, I,J,K,>,l,mi,l,*lh"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && ( register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov %0, %1
+ mov %0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1"
+ [(set_attr "length" "2,2,4,4,2,2,2,2,2")
+ (set_attr "type" "*,*,*,*,load1,store1,load1,store1,*")
+ (set_attr "pool_range" "*,*,*,*,*,*,1020,*,*")]
+)
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && satisfies_constraint_J (operands[1])"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "operands[1] = GEN_INT (- INTVAL (operands[1]));"
+)
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && satisfies_constraint_K (operands[1])"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+ {
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ /* Shouldn't happen, but we don't want to split if the shift is zero. */
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+ }"
+)
+
+;; When generating pic, we need to load the symbol offset into a register.
+;; So that the optimizer does not confuse this with a normal symbol load
+;; we use an unspec. The offset will be loaded from a constant pool entry,
+;; since that is the only type of relocation we can use.
+
+;; The rather odd constraints on the following are to force reload to leave
+;; the insn alone, and to force the minipool generation pass to then move
+;; the GOT symbol to memory.
+
+;; APPLE LOCAL begin ARM pic support
+(define_insn "pic_load_addr_arm"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "" "mX")
+ (label_ref (match_operand 2 "" ""))] UNSPEC_PIC_SYM))
+ (use (label_ref (match_dup 2)))]
+ "TARGET_ARM && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "ldr%?\\t%0, %1"
+ [(set_attr "type" "load1")
+ (set (attr "pool_range") (const_int 4096))
+ (set (attr "neg_pool_range") (const_int 4084))]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "pic_load_addr_thumb1"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (unspec:SI [(match_operand:SI 1 "" "mX")
+ (label_ref (match_operand 2 "" ""))] UNSPEC_PIC_SYM))
+ (use (label_ref (match_dup 2)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "ldr\\t%0, %1"
+ [(set_attr "type" "load1")
+ (set (attr "pool_range") (const_int 1022))
+ (set_attr "length" "2")]
+)
+;; APPLE LOCAL end ARM pic support
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] UNSPEC_PIC_SYM))]
+ "TARGET_ARM && flag_pic"
+ "operands[2] = cfun->machine->pic_reg;"
+)
+
+;; APPLE LOCAL begin ARM compact switch tables
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")]
+ UNSPEC_PIC_SYM))]
+ "TARGET_EITHER && flag_pic && operands[2] == cfun->machine->pic_reg"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+ "
+ [(set_attr "type" "load1")
+ (set (attr "pool_range")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 1020)
+ (const_int 4096)))
+ (set (attr "neg_pool_range")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 0)
+ (const_int 4084)))
+ (set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 2)
+ (const_int 4)))]
+)
+;; APPLE LOCAL end ARM compact switch tables
+
+;; APPLE LOCAL begin ARM pic support
+(define_insn "pic_add_dot_plus_four"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(label_ref (match_operand 1 "" ""))
+ (plus:SI (match_operand:SI 2 "register_operand" "0")
+ (const (plus:SI (pc) (const_int 4))))]
+ UNSPEC_PIC_BASE))]
+ "TARGET_THUMB && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "*
+ (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[1]));
+ return \"add\\t%0, %|pc\";
+ "
+ [(set_attr "length" "2")]
+)
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(label_ref (match_operand 1 "" ""))
+ (plus:SI (match_operand:SI 2 "register_operand" "r")
+ (const (plus:SI (pc) (const_int 8))))]
+ UNSPEC_PIC_BASE))]
+ "TARGET_ARM && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "*
+ (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[1]));
+ return \"add%?\\t%0, %|pc, %2\";
+ "
+ [(set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end ARM pic support
+
+(define_insn "tls_load_dot_plus_eight"
+ [(set (match_operand:SI 0 "register_operand" "+r")
+ (mem:SI (unspec:SI [(plus:SI (match_operand:SI 1 "register_operand" "r")
+ (const (plus:SI (pc) (const_int 8))))]
+ UNSPEC_PIC_BASE)))
+ (use (match_operand 2 "" ""))]
+ "TARGET_ARM"
+ "*
+ (*targetm.asm_out.internal_label) (asm_out_file, \"LPIC\",
+ INTVAL (operands[2]));
+ return \"ldr%?\\t%0, [%|pc, %1]\t\t@ tls_load_dot_plus_eight\";
+ "
+ [(set_attr "predicable" "yes")]
+)
+
+;; PIC references to local variables can generate pic_add_dot_plus_eight
+;; followed by a load. These sequences can be crunched down to
+;; tls_load_dot_plus_eight by a peephole.
+
+(define_peephole2
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(plus:SI (match_operand:SI 3 "register_operand" "")
+ (const (plus:SI (pc) (const_int 8))))]
+ UNSPEC_PIC_BASE))
+ (use (label_ref (match_operand 1 "" "")))])
+ (set (match_operand:SI 2 "register_operand" "") (mem:SI (match_dup 0)))]
+ "TARGET_ARM && peep2_reg_dead_p (2, operands[0])"
+ [(parallel [(set (match_dup 2)
+ (mem:SI (unspec:SI [(plus:SI (match_dup 3)
+ (const (plus:SI (pc) (const_int 8))))]
+ UNSPEC_PIC_BASE)))
+ (use (label_ref (match_dup 1)))])]
+ ""
+)
+
+;; APPLE LOCAL begin ARM 4224487
+;; These short forms work for addresses of scalar globals. They
+;; are produced by combine. There is no Thumb counterpart, as
+;; [Rn+PC] is not a valid addressing mode on Thumb.
+
+(define_insn "*arm_pic_ldrsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (unspec:SI [(label_ref (match_operand 1 "" ""))
+ (plus:SI (match_operand:SI 2 "register_operand" "r")
+ (const (plus:SI (pc) (const_int 8))))]
+ UNSPEC_PIC_BASE)))]
+ "TARGET_ARM && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "*
+ (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[1]));
+ return \"ldr%?\\t%0, [%|pc, %2]\";
+ "
+ [(set_attr "predicable" "yes")]
+)
+
+(define_insn "*arm_pic_strsi"
+ [(set (mem:SI (unspec:SI [(label_ref (match_operand 1 "" ""))
+ (plus:SI (match_operand:SI 2 "register_operand" "r")
+ (const (plus:SI (pc) (const_int 8))))]
+ UNSPEC_PIC_BASE))
+ (match_operand:SI 0 "register_operand" "r"))]
+ "TARGET_ARM && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "*
+ (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[1]));
+ return \"str%?\\t%0, [%|pc, %2]\";
+ "
+ [(set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end ARM 4224487
+
+;; APPLE LOCAL begin ARM setjmp/longjmp interworking
+;; If we'll be returning to thumb code, we need to set the low-order
+;; bit of the resume address. builtin_setjmp_setup doesn't handle all
+;; of the setup, it just augments the logic in builtins.c, to post-
+;; process the already-initialized mini-jmp_buf.
+(define_expand "builtin_setjmp_setup"
+ [(use (match_operand 0 "register_operand"))]
+ "TARGET_THUMB"
+{
+ rtx resume_addr =
+ gen_rtx_MEM (Pmode, plus_constant (operands[0],
+ GET_MODE_SIZE (Pmode)));
+ rtx resume_reg;
+
+ /* Set low-order bit of resume address */
+ resume_reg = force_reg (Pmode, resume_addr);
+ resume_reg = gen_rtx_IOR (Pmode, resume_reg, GEN_INT (1));
+ emit_move_insn (resume_addr, resume_reg);
+ /* APPLE LOCAL 6387939 */
+ DONE;
+})
+
+;; Very similar to the logic in builtins.c, except that we always
+;; restore both ARM_HARD_FRAME_POINTER and THUMB_HARD_FRAME_POINTER,
+;; and we emit an "indirect_jump_exchange" instead of the standard
+;; "indirect_jump". If we're jumping back into ARM code, we will
+;; unnecessarily (but harmlessly) trash the Thumb FP register.
+(define_expand "builtin_longjmp"
+ [(use (match_operand 0 "register_operand"))]
+ ""
+ "
+{
+ rtx arm_saved_fp = gen_rtx_MEM (Pmode, operands[0]);
+ rtx lab =
+ gen_rtx_MEM (Pmode, plus_constant (operands[0],
+ GET_MODE_SIZE (Pmode)));
+ rtx stack =
+ gen_rtx_MEM (Pmode, plus_constant (operands[0],
+ 2 * GET_MODE_SIZE (Pmode)));
+ rtx arm_fp = gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM);
+
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, arm_fp)));
+
+ emit_move_insn (arm_fp, arm_saved_fp);
+
+ emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
+
+ if (arm_arch4t)
+ {
+ lab = copy_to_mode_reg (Pmode, lab);
+ emit_insn (gen_rtx_USE (VOIDmode, arm_fp));
+ emit_jump_insn (gen_indirect_jump_exchange (lab));
+ emit_barrier ();
+ }
+ else
+ emit_indirect_jump (lab);
+
+ DONE;
+}")
+;; APPLE LOCAL end ARM setjmp/longjmp interworking
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_dup 1))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ cmp%?\\t%0, #0
+ sub%.\\t%0, %1, #0"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "conds" "set")]
+)
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (match_dup 5))]
+ "TARGET_ARM"
+ "
+ {
+ rtx op1 = operands[1];
+ rtx addr = XEXP (op1, 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ op1 = replace_equiv_address (operands[1], force_reg (SImode, addr));
+
+ operands[4] = adjust_address (op1, QImode, 1);
+ operands[1] = adjust_address (operands[1], QImode, 0);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+ operands[5] = gen_lowpart (QImode, operands[2]);
+ }"
+)
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (match_dup 5))]
+ "TARGET_ARM"
+ "
+ {
+ rtx op1 = operands[1];
+ rtx addr = XEXP (op1, 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ op1 = replace_equiv_address (op1, force_reg (SImode, addr));
+
+ operands[4] = adjust_address (op1, QImode, 1);
+ operands[1] = adjust_address (operands[1], QImode, 0);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+ operands[5] = gen_lowpart (QImode, operands[2]);
+ }"
+)
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (set (match_dup 3) (match_dup 2))]
+ "TARGET_ARM"
+ "
+ {
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ rtx op0 = operands[0];
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ op0 = replace_equiv_address (op0, force_reg (SImode, addr));
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = adjust_address (op0, QImode, 1);
+ operands[0] = adjust_address (operands[0], QImode, 0);
+ operands[2] = gen_lowpart (QImode, operands[2]);
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ }"
+)
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch4"
+ "
+ if (!s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ "
+)
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ if (TARGET_ARM)
+ {
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (!const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~(val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (arm_arch4 && optimize && !no_new_pseudos
+ && GET_CODE (operands[1]) == MEM)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_zero_extendhisi2 (reg, operands[1]));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (!arm_arch4)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && (GET_CODE (offset = XEXP (base, 1))
+ == CONST_INT)
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 32)
+ {
+ rtx new;
+
+ new = widen_memory_access (operands[1], SImode,
+ ((INTVAL (offset) & ~3)
+ - INTVAL (offset)));
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ }
+ }
+ /* Handle loading a large integer during reload. */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[1]))
+ && !const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ gcc_assert (GET_CODE (operands[0]) == REG);
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (TARGET_THUMB2)
+ {
+ /* Thumb-2 can do everything except mem=mem and mem=const easily. */
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+ /* Zero extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ }
+ }
+ else /* TARGET_THUMB1 */
+ {
+ if (!no_new_pseudos)
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or
+ virtual register (rejected by GO_IF_LEGITIMATE_ADDRESS for
+ HImode/QImode) relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && !memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ operands[0]
+ = replace_equiv_address (operands[0],
+ copy_to_reg (XEXP (operands[0], 0)));
+
+ if (GET_CODE (operands[1]) == MEM
+ && !memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_to_reg (XEXP (operands[1], 0)));
+
+ if (GET_CODE (operands[1]) == MEM && optimize > 0)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_zero_extendhisi2 (reg, operands[1]));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && !satisfies_constraint_I (operands[1]))
+ {
+ /* Handle loading a large integer during reload. */
+
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ gcc_assert (GET_CODE (operands[0]) == REG);
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ "
+)
+
+;; APPLE LOCAL ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "adjustable_thumb1_movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && ( register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"add %0, %1, #0\";
+ case 2: return \"strh %1, %0\";
+ case 3: return \"mov %0, %1\";
+ case 4: return \"mov %0, %1\";
+ case 5: return \"mov %0, %1\";
+ default: gcc_unreachable ();
+ case 1:
+ /* The stack pointer can end up being taken as an index register.
+ Catch this case here and deal with it. */
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == REG
+ && REGNO (XEXP (XEXP (operands[1], 0), 0)) == SP_REGNUM)
+ {
+ rtx ops[2];
+ ops[0] = operands[0];
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+
+ output_asm_insn (\"mov %0, %1\", ops);
+
+ XEXP (XEXP (operands[1], 0), 0) = operands[0];
+
+ }
+ return \"ldrh %0, %1\";
+ }"
+ [(set_attr "length" "2,4,2,2,2,2")
+ (set_attr "type" "*,load1,store1,*,*,*")]
+)
+
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ "TARGET_ARM"
+ "
+ {
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = change_address (operands[1], QImode, addr);
+ mem2 = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+ }"
+)
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (match_dup 4))]
+ "TARGET_ARM"
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_lowpart (HImode, operands[3]);
+ "
+)
+
+;; Pattern to recognize insn generated default case above
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "TARGET_ARM
+ && arm_arch4
+ && (GET_CODE (operands[1]) != CONST_INT
+ || const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%(h%)\\t%1, %0\\t%@ movhi
+ ldr%(h%)\\t%0, %1\\t%@ movhi"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "*,*,store1,load1")
+ (set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,*,*")
+ (set_attr "pool_range" "*,*,*,256")
+ (set_attr "neg_pool_range" "*,*,*,244")]
+)
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_ARM"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi"
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ [(set_attr "predicable" "yes")
+ (set_attr "insn" "mov,mvn")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+(define_expand "thumb_movhi_clobber"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "register_operand" ""))
+ (clobber (match_operand:DI 2 "register_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "
+ if (strict_memory_address_p (HImode, XEXP (operands[0], 0))
+ && REGNO (operands[1]) <= LAST_LO_REGNUM)
+ {
+ emit_insn (gen_movhi (operands[0], operands[1]));
+ DONE;
+ }
+ /* XXX Fixme, need to handle other cases here as well. */
+ gcc_unreachable ();
+ "
+)
+
+;; We use a DImode scratch because we may occasionally need an additional
+;; temporary if the address isn't offsettable -- push_reload doesn't seem
+;; to take any notice of the "o" constraints on reload_memory_operand operand.
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "arm_reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:DI 2 "s_register_operand" "=&l")])]
+ "TARGET_EITHER"
+ "if (TARGET_ARM)
+ arm_reload_out_hi (operands);
+ else
+ thumb_reload_out_hi (operands);
+ DONE;
+ "
+)
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "arm_reload_memory_operand" "o")
+ (match_operand:DI 2 "s_register_operand" "=&r")])]
+ "TARGET_EITHER"
+ "
+ if (TARGET_ARM)
+ arm_reload_in_hi (operands);
+ else
+ thumb_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_lowpart (QImode, reg);
+ }
+
+ if (TARGET_THUMB)
+ {
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or
+ virtual register (rejected by GO_IF_LEGITIMATE_ADDRESS for
+ HImode/QImode) relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && !memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ operands[0]
+ = replace_equiv_address (operands[0],
+ copy_to_reg (XEXP (operands[0], 0)));
+ if (GET_CODE (operands[1]) == MEM
+ && !memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_to_reg (XEXP (operands[1], 0)));
+ }
+
+ if (GET_CODE (operands[1]) == MEM && optimize > 0)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_zero_extendqisi2 (reg, operands[1]));
+ operands[1] = gen_lowpart (QImode, reg);
+ }
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+ else if (TARGET_THUMB
+ && GET_CODE (operands[1]) == CONST_INT
+ && !satisfies_constraint_I (operands[1]))
+ {
+ /* Handle loading a large integer during reload. */
+
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ gcc_assert (GET_CODE (operands[0]) == REG);
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+ "
+)
+
+
+(define_insn "*arm_movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && ( register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%(b%)\\t%0, %1
+ str%(b%)\\t%1, %0"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "*,*,load1,store1")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,*,*")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l, m,l,*h,*r,I"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && ( register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1"
+ [(set_attr "length" "2")
+ (set_attr "type" "*,load1,store1,*,*,*")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "*,*,*,mov,mov,mov")
+ (set_attr "pool_range" "*,32,*,*,*,*")]
+)
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ else /* TARGET_THUMB1 */
+ {
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+ }
+ "
+)
+
+;; Transform a floating-point move of a constant into a core register into
+;; an SImode operation.
+(define_split
+ [(set (match_operand:SF 0 "arm_general_register_operand" "")
+ (match_operand:SF 1 "immediate_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT
+ && reload_completed
+ && GET_CODE (operands[1]) == CONST_DOUBLE"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+ operands[2] = gen_lowpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[1]);
+ if (operands[2] == 0 || operands[3] == 0)
+ FAIL;
+ "
+)
+
+(define_insn "*arm_movsf_soft_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_ARM
+ && TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "length" "4,4,4")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "*,load1,store1")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,*,*")
+ (set_attr "pool_range" "*,4096,*")
+ (set_attr "neg_pool_range" "*,4084,*")]
+)
+
+;;; ??? This should have alternatives for constants.
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l, m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l, >,l,mF,l,*h,*r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && ( register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+ [(set_attr "length" "2")
+ (set_attr "type" "*,load1,store1,load1,store1,*,*")
+ (set_attr "pool_range" "*,*,*,1020,*,*,*")]
+)
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+ else /* TARGET_THUMB */
+ {
+ if (!no_new_pseudos)
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+ }
+ "
+)
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "arm_reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ {
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx_SUBREG (DImode, operands[0], 0);
+ operands[1] = gen_rtx_SUBREG (DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ replace_equiv_address (operands[0], operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+ }"
+)
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=r,r,r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "rDa,Db,Dc,mF,r"))]
+ "TARGET_ARM && TARGET_SOFT_FLOAT
+ && ( register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return \"#\";
+ default:
+ return output_move_double (operands);
+ }
+ "
+ [(set_attr "length" "8,12,16,8,8")
+ (set_attr "type" "*,*,*,load2,store2")
+ (set_attr "pool_range" "1020")
+ (set_attr "neg_pool_range" "1008")]
+)
+
+;; APPLE LOCAL begin ARM compact switch tables
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*thumb_movdf_insn"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=l,l,>,l, m,*r")
+ (match_operand:DF 1 "general_operand" "l, >,l,mF,l,*r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && ( register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+ switch (which_alternative)
+ {
+ default:
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx_MEM (SImode,
+ plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+ "
+ [(set_attr "length" "4,2,2,4,4,4")
+ (set_attr "type" "*,load2,store2,load2,store2,*")
+ (set_attr "pool_range" "*,*,*,1018,*,*")]
+)
+;; APPLE LOCAL end ARM compact switch tables
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (XFmode, operands[1]);
+ "
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; Removed lines
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+{
+ HOST_WIDE_INT offset = 0;
+
+ /* Support only fixed point registers. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > (LAST_ARM_REGNUM - 1)
+ || REGNO (operands[0]) + INTVAL (operands[2]) > LAST_ARM_REGNUM)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, operands[1], &offset);
+})
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc4"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 16)))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (match_dup 2)))
+ (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 4))))
+ (set (match_operand:SI 5 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 8))))
+ (set (match_operand:SI 6 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 12))))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
+ "ldm%(ia%)\\t%1!, {%3, %4, %5, %6}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "load4")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*ldmsi_postinc4_thumb1"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=l")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 16)))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (match_dup 2)))
+ (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 4))))
+ (set (match_operand:SI 5 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 8))))
+ (set (match_operand:SI 6 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 12))))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && XVECLEN (operands[0], 0) == 5"
+ "ldmia\\t%1!, {%3, %4, %5, %6}"
+ [(set_attr "type" "load4")
+ (set_attr "length" "2")]
+)
+;; APPLE LOCAL end ARM compact switch tables
+
+(define_insn "*ldmsi_postinc3"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 12)))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (match_dup 2)))
+ (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 4))))
+ (set (match_operand:SI 5 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 8))))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
+ "ldm%(ia%)\\t%1!, {%3, %4, %5}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "load3")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*ldmsi_postinc2"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 8)))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (match_dup 2)))
+ (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 2) (const_int 4))))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
+ "ldm%(ia%)\\t%1!, {%3, %4}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "load2")
+ (set_attr "predicable" "yes")]
+)
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi4"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "r")))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
+ "ldm%(ia%)\\t%1, {%2, %3, %4, %5}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "load4")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*ldmsi3"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "r")))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
+ "ldm%(ia%)\\t%1, {%2, %3, %4}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "load3")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*ldmsi2"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "r")))
+ (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
+ "ldm%(ia%)\\t%1, {%2, %3}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "load2")
+ (set_attr "predicable" "yes")]
+)
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+{
+ HOST_WIDE_INT offset = 0;
+
+ /* Support only fixed point registers. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > (LAST_ARM_REGNUM - 1)
+ || REGNO (operands[1]) + INTVAL (operands[2]) > LAST_ARM_REGNUM)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, operands[0], &offset);
+})
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc4"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 16)))
+ (set (mem:SI (match_dup 2))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 4)))
+ (match_operand:SI 4 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 8)))
+ (match_operand:SI 5 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 12)))
+ (match_operand:SI 6 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
+ "stm%(ia%)\\t%1!, {%3, %4, %5, %6}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store4")]
+)
+
+;; APPLE LOCAL begin ARM compact switch tables
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*stmsi_postinc4_thumb1"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=l")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 16)))
+ (set (mem:SI (match_dup 2))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 4)))
+ (match_operand:SI 4 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 8)))
+ (match_operand:SI 5 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 12)))
+ (match_operand:SI 6 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_THUMB1 && XVECLEN (operands[0], 0) == 5"
+ "stmia\\t%1!, {%3, %4, %5, %6}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "store4")
+ (set_attr "length" "2")]
+)
+;; APPLE LOCAL end ARM compact switch tables
+
+(define_insn "*stmsi_postinc3"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 12)))
+ (set (mem:SI (match_dup 2))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 4)))
+ (match_operand:SI 4 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 8)))
+ (match_operand:SI 5 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
+ "stm%(ia%)\\t%1!, {%3, %4, %5}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store3")]
+)
+
+(define_insn "*stmsi_postinc2"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 2 "s_register_operand" "1")
+ (const_int 8)))
+ (set (mem:SI (match_dup 2))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 4)))
+ (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
+ "stm%(ia%)\\t%1!, {%3, %4}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store2")]
+)
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi4"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 4 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 5 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
+ "stm%(ia%)\\t%1, {%2, %3, %4, %5}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store4")]
+)
+
+(define_insn "*stmsi3"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
+ "stm%(ia%)\\t%1, {%2, %3, %4}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store3")]
+)
+
+(define_insn "*stmsi2"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
+ "stm%(ia%)\\t%1, {%2, %3}"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "store2")]
+)
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movmemqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ "TARGET_EITHER"
+ "
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ {
+ if (arm_gen_movmemqi (operands))
+ DONE;
+ FAIL;
+ }
+ else /* TARGET_THUMB */
+ {
+ if ( INTVAL (operands[3]) != 4
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ /* APPLE LOCAL begin ARM use memcpy more at -Os */
+ if (optimize_size
+ && INTVAL (operands[2]) != 1
+ && INTVAL (operands[2]) != 2
+ && INTVAL (operands[2]) != 4
+ && INTVAL (operands[2]) != 8
+ && INTVAL (operands[2]) != 12
+ && INTVAL (operands[2]) != 16)
+ FAIL;
+ /* APPLE LOCAL end ARM use memcpy more at -Os */
+
+ thumb_expand_movmemqi (operands);
+ DONE;
+ }
+ "
+)
+
+;; Thumb block-move insns
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 2 "register_operand" "0"))
+ (mem:SI (match_operand:SI 3 "register_operand" "1")))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 3) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 3) (const_int 8))))
+ (set (match_operand:SI 0 "register_operand" "=l")
+ (plus:SI (match_dup 2) (const_int 12)))
+ (set (match_operand:SI 1 "register_operand" "=l")
+ (plus:SI (match_dup 3) (const_int 12)))
+ (clobber (match_scratch:SI 4 "=&l"))
+ (clobber (match_scratch:SI 5 "=&l"))
+ (clobber (match_scratch:SI 6 "=&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "* return thumb_output_move_mem_multiple (3, operands);"
+ [(set_attr "length" "4")
+ ; This isn't entirely accurate... It loads as well, but in terms of
+ ; scheduling the following insn it is better to consider it as a store
+ (set_attr "type" "store3")]
+)
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 2 "register_operand" "0"))
+ (mem:SI (match_operand:SI 3 "register_operand" "1")))
+ (set (mem:SI (plus:SI (match_dup 2) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 3) (const_int 4))))
+ (set (match_operand:SI 0 "register_operand" "=l")
+ (plus:SI (match_dup 2) (const_int 8)))
+ (set (match_operand:SI 1 "register_operand" "=l")
+ (plus:SI (match_dup 3) (const_int 8)))
+ (clobber (match_scratch:SI 4 "=&l"))
+ (clobber (match_scratch:SI 5 "=&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "* return thumb_output_move_mem_multiple (2, operands);"
+ [(set_attr "length" "4")
+ ; This isn't entirely accurate... It loads as well, but in terms of
+ ; scheduling the following insn it is better to consider it as a store
+ (set_attr "type" "store2")]
+)
+
+
+
+;; Compare & branch insns
+;; The range calculations are based as follows:
+;; For forward branches, the address calculation returns the address of
+;; the next instruction. This is 2 beyond the branch instruction.
+;; For backward branches, the address calculation returns the address of
+;; the first instruction in this pattern (cmp). This is 2 before the branch
+;; instruction for the shortest sequence, and 4 before the branch instruction
+;; if we have to jump around an unconditional branch.
+;; To the basic branch range the PC offset must be added (this is +4).
+;; So for forward branches we have
+;; (pos_range - pos_base_offs + pc_offs) = (pos_range - 2 + 4).
+;; And for backward branches we have
+;; (neg_range - neg_base_offs + pc_offs) = (neg_range - (-2 or -4) + 4).
+;;
+;; For a 'b' pos_range = 2046, neg_range = -2048 giving (-2040->2048).
+;; For a 'b<cond>' pos_range = 254, neg_range = -256 giving (-250 ->256).
+
+(define_expand "cbranchsi4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "
+ if (thumb1_cmpneg_operand (operands[2], SImode))
+ {
+ emit_jump_insn (gen_cbranchsi4_scratch (NULL, operands[1], operands[2],
+ operands[3], operands[0]));
+ DONE;
+ }
+ if (!thumb1_cmp_operand (operands[2], SImode))
+ operands[2] = force_reg (SImode, operands[2]);
+ ")
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "*cbranchsi4_insn"
+ [(set (pc) (if_then_else
+ (match_operator 0 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "l,*h")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ (match_operand:SI 2 "thumb1_cmp_operand" "lI*h,*r")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_THUMB1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "*
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d0\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D0\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D0\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "cbranchsi4_scratch"
+ [(set (pc) (if_then_else
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "l,0")
+;; APPLE LOCAL v7 support. Merge from mainline
+ (match_operand:SI 2 "thumb1_cmpneg_operand" "L,J")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 0 "=l,l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ output_asm_insn (\"add\\t%0, %1, #%n2\", operands);
+
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d4\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D4\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D4\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+(define_insn "*movsi_cbranchsi4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "0,l,l,l")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,l,*h,*m")
+ (match_dup 1))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*{
+ if (which_alternative == 0)
+ output_asm_insn (\"cmp\t%0, #0\", operands);
+ else if (which_alternative == 1)
+ output_asm_insn (\"sub\t%0, %1, #0\", operands);
+ else
+ {
+ output_asm_insn (\"cmp\t%1, #0\", operands);
+ if (which_alternative == 2)
+ output_asm_insn (\"mov\t%0, %1\", operands);
+ else
+ output_asm_insn (\"str\t%1, %0\", operands);
+ }
+ switch (get_attr_length (insn) - ((which_alternative > 1) ? 2 : 0))
+ {
+ case 4: return \"b%d3\\t%l2\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D3\\t%~LCB%=\;b\\t%l2\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D3\\t%~LCB%=\;bl\\t%l2\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (gt (symbol_ref ("which_alternative"))
+ (const_int 1))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (le (symbol_ref ("which_alternative"))
+ (const_int 1))
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -250))
+ (le (minus (match_dup 2) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -2040))
+ (le (minus (match_dup 2) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -248))
+ (le (minus (match_dup 2) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -2038))
+ (le (minus (match_dup 2) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+;; APPLE LOCAL begin ARM add this peephole
+;; The above pattern is produced by combine in some cases, but not
+;; when one of the regs involved is hard, e.g. a function return value.
+;; This peephole catches that case. Valid only for low regs.
+
+(define_peephole2
+ [(set (match_operand:SI 0 "thumb_low_register_operand" "")
+ (match_operand:SI 1 "thumb_low_register_operand" ""))
+ (set (pc) (if_then_else
+ (match_operator 2 "arm_comparison_operator"
+ [(match_dup 0) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_THUMB"
+ [(parallel
+ [(set (pc)
+ (if_then_else
+ (match_op_dup 2
+ [(match_dup 1) (const_int 0)])
+ (label_ref (match_dup 3 ))
+ (pc)))
+ (set (match_dup 0) (match_dup 1))])]
+ ""
+)
+;; APPLE LOCAL end ARM add this peephole
+
+(define_insn "*negated_cbranchsi4"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_operator"
+ [(match_operand:SI 1 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "l"))])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ output_asm_insn (\"cmn\\t%1, %2\", operands);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d0\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D0\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D0\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*tbit_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_operator"
+ [(zero_extract:SI (match_operand:SI 1 "s_register_operand" "l")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 4 "=l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ rtx op[3];
+ op[0] = operands[4];
+ op[1] = operands[1];
+ op[2] = GEN_INT (32 - 1 - INTVAL (operands[2]));
+
+ output_asm_insn (\"lsl\\t%0, %1, %2\", op);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d0\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D0\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D0\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*tlobits_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_operator"
+ [(zero_extract:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (const_int 0))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 4 "=l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ rtx op[3];
+ op[0] = operands[4];
+ op[1] = operands[1];
+ op[2] = GEN_INT (32 - INTVAL (operands[2]));
+
+ output_asm_insn (\"lsl\\t%0, %1, %2\", op);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d0\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D0\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D0\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*tstsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "equality_operator"
+ [(and:SI (match_operand:SI 0 "s_register_operand" "%l")
+ (match_operand:SI 1 "s_register_operand" "l"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ output_asm_insn (\"tst\\t%0, %1\", operands);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d3\\t%l2\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D3\\t%~LCB%=\;b\\t%l2\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D3\\t%~LCB%=\;bl\\t%l2\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -250))
+ (le (minus (match_dup 2) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 2) (pc)) (const_int -2040))
+ (le (minus (match_dup 2) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*andsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 5 "equality_operator"
+ [(and:SI (match_operand:SI 2 "s_register_operand" "%0,1,1,1")
+ (match_operand:SI 3 "s_register_operand" "l,l,l,l"))
+ (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (and:SI (match_dup 2) (match_dup 3)))
+ (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ if (which_alternative == 0)
+ output_asm_insn (\"and\\t%0, %3\", operands);
+ else if (which_alternative == 1)
+ {
+ output_asm_insn (\"and\\t%1, %3\", operands);
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"and\\t%1, %3\", operands);
+ output_asm_insn (\"str\\t%1, %0\", operands);
+ }
+
+ switch (get_attr_length (insn) - (which_alternative ? 2 : 0))
+ {
+ case 4: return \"b%d5\\t%l4\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D5\\t%~LCB%=\;b\\t%l4\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D5\\t%~LCB%=\;bl\\t%l4\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -250))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2040))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+(define_insn "*orrsi3_cbranch_scratch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 4 "equality_operator"
+ [(ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 0 "=l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ output_asm_insn (\"orr\\t%0, %2\", operands);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d4\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D4\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D4\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*orrsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 5 "equality_operator"
+ [(ior:SI (match_operand:SI 2 "s_register_operand" "%0,1,1,1")
+ (match_operand:SI 3 "s_register_operand" "l,l,l,l"))
+ (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (ior:SI (match_dup 2) (match_dup 3)))
+ (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ if (which_alternative == 0)
+ output_asm_insn (\"orr\\t%0, %3\", operands);
+ else if (which_alternative == 1)
+ {
+ output_asm_insn (\"orr\\t%1, %3\", operands);
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"orr\\t%1, %3\", operands);
+ output_asm_insn (\"str\\t%1, %0\", operands);
+ }
+
+ switch (get_attr_length (insn) - (which_alternative ? 2 : 0))
+ {
+ case 4: return \"b%d5\\t%l4\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D5\\t%~LCB%=\;b\\t%l4\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D5\\t%~LCB%=\;bl\\t%l4\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -250))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2040))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+(define_insn "*xorsi3_cbranch_scratch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 4 "equality_operator"
+ [(xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 0 "=l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ output_asm_insn (\"eor\\t%0, %2\", operands);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d4\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D4\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D4\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*xorsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 5 "equality_operator"
+ [(xor:SI (match_operand:SI 2 "s_register_operand" "%0,1,1,1")
+ (match_operand:SI 3 "s_register_operand" "l,l,l,l"))
+ (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (xor:SI (match_dup 2) (match_dup 3)))
+ (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ if (which_alternative == 0)
+ output_asm_insn (\"eor\\t%0, %3\", operands);
+ else if (which_alternative == 1)
+ {
+ output_asm_insn (\"eor\\t%1, %3\", operands);
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"eor\\t%1, %3\", operands);
+ output_asm_insn (\"str\\t%1, %0\", operands);
+ }
+
+ switch (get_attr_length (insn) - (which_alternative ? 2 : 0))
+ {
+ case 4: return \"b%d5\\t%l4\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D5\\t%~LCB%=\;b\\t%l4\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D5\\t%~LCB%=\;bl\\t%l4\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -250))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2040))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+(define_insn "*bicsi3_cbranch_scratch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 4 "equality_operator"
+ [(and:SI (not:SI (match_operand:SI 2 "s_register_operand" "l"))
+ (match_operand:SI 1 "s_register_operand" "0"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 0 "=l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ output_asm_insn (\"bic\\t%0, %2\", operands);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d4\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D4\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D4\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*bicsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 5 "equality_operator"
+ [(and:SI (not:SI (match_operand:SI 3 "s_register_operand" "l,l,l,l,l"))
+ (match_operand:SI 2 "s_register_operand" "0,1,1,1,1"))
+ (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=!l,l,*?h,*?m,*?m")
+ (and:SI (not:SI (match_dup 3)) (match_dup 2)))
+ (clobber (match_scratch:SI 1 "=X,l,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ if (which_alternative == 0)
+ output_asm_insn (\"bic\\t%0, %3\", operands);
+ else if (which_alternative <= 2)
+ {
+ output_asm_insn (\"bic\\t%1, %3\", operands);
+ /* It's ok if OP0 is a lo-reg, even though the mov will set the
+ conditions again, since we're only testing for equality. */
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"bic\\t%1, %3\", operands);
+ output_asm_insn (\"str\\t%1, %0\", operands);
+ }
+
+ switch (get_attr_length (insn) - (which_alternative ? 2 : 0))
+ {
+ case 4: return \"b%d5\\t%l4\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D5\\t%~LCB%=\;b\\t%l4\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D5\\t%~LCB%=\;bl\\t%l4\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }"
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -250))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2040))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+(define_insn "*cbranchne_decr1"
+ [(set (pc)
+ (if_then_else (match_operator 3 "equality_operator"
+ [(match_operand:SI 2 "s_register_operand" "l,l,1,l")
+ (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (plus:SI (match_dup 2) (const_int -1)))
+ (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ {
+ rtx cond[2];
+ cond[0] = gen_rtx_fmt_ee ((GET_CODE (operands[3]) == NE
+ ? GEU : LTU),
+ VOIDmode, operands[2], const1_rtx);
+ cond[1] = operands[4];
+
+ if (which_alternative == 0)
+ output_asm_insn (\"sub\\t%0, %2, #1\", operands);
+ else if (which_alternative == 1)
+ {
+ /* We must provide an alternative for a hi reg because reload
+ cannot handle output reloads on a jump instruction, but we
+ can't subtract into that. Fortunately a mov from lo to hi
+ does not clobber the condition codes. */
+ output_asm_insn (\"sub\\t%1, %2, #1\", operands);
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ }
+ else
+ {
+ /* Similarly, but the target is memory. */
+ output_asm_insn (\"sub\\t%1, %2, #1\", operands);
+ output_asm_insn (\"str\\t%1, %0\", operands);
+ }
+
+ switch (get_attr_length (insn) - (which_alternative ? 2 : 0))
+ {
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 4:
+ output_asm_insn (\"b%d0\\t%l1\", cond);
+ return \"\";
+ case 6:
+ output_asm_insn (\"b%D0\\t%~LCB%=\", cond);
+ return \"b\\t%l4\\t%@long jump\\n%~LCB%=:\";
+ default:
+ output_asm_insn (\"b%D0\\t%~LCB%=\", cond);
+ return \"bl\\t%l4\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set_attr_alternative "length"
+ [
+ ;; Alternative 0
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -250))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2040))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ ;; Alternative 1
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))
+ ;; Alternative 2
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))
+ ;; Alternative 3
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -248))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2038))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))])]
+)
+
+(define_insn "*addsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 4 "comparison_operator"
+ [(plus:SI
+ (match_operand:SI 2 "s_register_operand" "%l,0,*0,1,1,1")
+ (match_operand:SI 3 "reg_or_int_operand" "lL,IJ,*r,lIJ,lIJ,lIJ"))
+ (const_int 0)])
+ (label_ref (match_operand 5 "" ""))
+ (pc)))
+ (set
+ (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,l,*!h,*?h,*?m,*?m")
+ (plus:SI (match_dup 2) (match_dup 3)))
+ (clobber (match_scratch:SI 1 "=X,X,X,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && (GET_CODE (operands[4]) == EQ
+ || GET_CODE (operands[4]) == NE
+ || GET_CODE (operands[4]) == GE
+ || GET_CODE (operands[4]) == LT)"
+ "*
+ {
+ rtx cond[3];
+
+
+ cond[0] = (which_alternative < 3) ? operands[0] : operands[1];
+ cond[1] = operands[2];
+ cond[2] = operands[3];
+
+ if (GET_CODE (cond[2]) == CONST_INT && INTVAL (cond[2]) < 0)
+ output_asm_insn (\"sub\\t%0, %1, #%n2\", cond);
+ else
+ output_asm_insn (\"add\\t%0, %1, %2\", cond);
+
+ if (which_alternative >= 3
+ && which_alternative < 4)
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ else if (which_alternative >= 4)
+ output_asm_insn (\"str\\t%1, %0\", operands);
+
+ switch (get_attr_length (insn) - ((which_alternative >= 3) ? 2 : 0))
+ {
+ case 4:
+ return \"b%d4\\t%l5\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6:
+ return \"b%D4\\t%~LCB%=\;b\\t%l5\\t%@long jump\\n%~LCB%=:\";
+ default:
+ return \"b%D4\\t%~LCB%=\;bl\\t%l5\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (lt (symbol_ref ("which_alternative"))
+ (const_int 3))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (lt (symbol_ref ("which_alternative"))
+ (const_int 3))
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -250))
+ (le (minus (match_dup 5) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -2040))
+ (le (minus (match_dup 5) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -248))
+ (le (minus (match_dup 5) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -2038))
+ (le (minus (match_dup 5) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+(define_insn "*addsi3_cbranch_scratch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 3 "comparison_operator"
+ [(plus:SI
+ (match_operand:SI 1 "s_register_operand" "%l,l,l,0")
+ (match_operand:SI 2 "reg_or_int_operand" "J,l,L,IJ"))
+ (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (clobber (match_scratch:SI 0 "=X,X,l,l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && (GET_CODE (operands[3]) == EQ
+ || GET_CODE (operands[3]) == NE
+ || GET_CODE (operands[3]) == GE
+ || GET_CODE (operands[3]) == LT)"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ output_asm_insn (\"cmp\t%1, #%n2\", operands);
+ break;
+ case 1:
+ output_asm_insn (\"cmn\t%1, %2\", operands);
+ break;
+ case 2:
+ if (INTVAL (operands[2]) < 0)
+ output_asm_insn (\"sub\t%0, %1, %2\", operands);
+ else
+ output_asm_insn (\"add\t%0, %1, %2\", operands);
+ break;
+ case 3:
+ if (INTVAL (operands[2]) < 0)
+ output_asm_insn (\"sub\t%0, %0, %2\", operands);
+ else
+ output_asm_insn (\"add\t%0, %0, %2\", operands);
+ break;
+ }
+
+ switch (get_attr_length (insn))
+ {
+ case 4:
+ return \"b%d3\\t%l4\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6:
+ return \"b%D3\\t%~LCB%=\;b\\t%l4\\t%@long jump\\n%~LCB%=:\";
+ default:
+ return \"b%D3\\t%~LCB%=\;bl\\t%l4\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -250))
+ (le (minus (match_dup 4) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 4) (pc)) (const_int -2040))
+ (le (minus (match_dup 4) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+(define_insn "*subsi3_cbranch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 4 "comparison_operator"
+ [(minus:SI
+ (match_operand:SI 2 "s_register_operand" "l,l,1,l")
+ (match_operand:SI 3 "s_register_operand" "l,l,l,l"))
+ (const_int 0)])
+ (label_ref (match_operand 5 "" ""))
+ (pc)))
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (minus:SI (match_dup 2) (match_dup 3)))
+ (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && (GET_CODE (operands[4]) == EQ
+ || GET_CODE (operands[4]) == NE
+ || GET_CODE (operands[4]) == GE
+ || GET_CODE (operands[4]) == LT)"
+ "*
+ {
+ if (which_alternative == 0)
+ output_asm_insn (\"sub\\t%0, %2, %3\", operands);
+ else if (which_alternative == 1)
+ {
+ /* We must provide an alternative for a hi reg because reload
+ cannot handle output reloads on a jump instruction, but we
+ can't subtract into that. Fortunately a mov from lo to hi
+ does not clobber the condition codes. */
+ output_asm_insn (\"sub\\t%1, %2, %3\", operands);
+ output_asm_insn (\"mov\\t%0, %1\", operands);
+ }
+ else
+ {
+ /* Similarly, but the target is memory. */
+ output_asm_insn (\"sub\\t%1, %2, %3\", operands);
+ output_asm_insn (\"str\\t%1, %0\", operands);
+ }
+
+ switch (get_attr_length (insn) - ((which_alternative != 0) ? 2 : 0))
+ {
+ case 4:
+ return \"b%d4\\t%l5\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6:
+ return \"b%D4\\t%~LCB%=\;b\\t%l5\\t%@long jump\\n%~LCB%=:\";
+ default:
+ return \"b%D4\\t%~LCB%=\;bl\\t%l5\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (ior (and (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (eq_attr "length" "8"))
+ (eq_attr "length" "10"))
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (eq (symbol_ref ("which_alternative"))
+ (const_int 0))
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -250))
+ (le (minus (match_dup 5) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -2040))
+ (le (minus (match_dup 5) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8)))
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -248))
+ (le (minus (match_dup 5) (pc)) (const_int 256)))
+ (const_int 6)
+ (if_then_else
+ (and (ge (minus (match_dup 5) (pc)) (const_int -2038))
+ (le (minus (match_dup 5) (pc)) (const_int 2048)))
+ (const_int 8)
+ (const_int 10)))))]
+)
+
+(define_insn "*subsi3_cbranch_scratch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "arm_comparison_operator"
+ [(minus:SI (match_operand:SI 1 "register_operand" "l")
+ (match_operand:SI 2 "nonmemory_operand" "l"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1
+ && (GET_CODE (operands[0]) == EQ
+ || GET_CODE (operands[0]) == NE
+ || GET_CODE (operands[0]) == GE
+ || GET_CODE (operands[0]) == LT)"
+ "*
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ switch (get_attr_length (insn))
+ {
+ case 4: return \"b%d0\\t%l3\";
+ /* APPLE LOCAL begin ARM MACH assembler */
+ case 6: return \"b%D0\\t%~LCB%=\;b\\t%l3\\t%@long jump\\n%~LCB%=:\";
+ default: return \"b%D0\\t%~LCB%=\;bl\\t%l3\\t%@far jump\\n%~LCB%=:\";
+ /* APPLE LOCAL end ARM MACH assembler */
+ }
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "8")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -250))
+ (le (minus (match_dup 3) (pc)) (const_int 256)))
+ (const_int 4)
+ (if_then_else
+ (and (ge (minus (match_dup 3) (pc)) (const_int -2040))
+ (le (minus (match_dup 3) (pc)) (const_int 2048)))
+ (const_int 6)
+ (const_int 8))))]
+)
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ DONE;
+ }"
+)
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "arm_float_compare_operand" "")]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ DONE;
+ "
+)
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "arm_float_compare_operand" "")]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT"
+ "
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ DONE;
+ "
+)
+
+;; APPLE LOCAL begin ARM enhance conditional insn generation
+(define_insn "*arm_cmpsi_insn"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+ [(set_attr "conds" "set")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end ARM enhance conditional insn generation
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_cmpsi_shiftsi"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ "TARGET_ARM"
+ "cmp%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "cmp%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*arm_cmpsi_negshiftsi_si"
+ [(set (reg:CC_Z CC_REGNUM)
+ (compare:CC_Z
+ (neg:SI (match_operator:SI 1 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "reg_or_int_operand" "rM")]))
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "cmn%?\\t%0, %2%S1"
+ [(set_attr "conds" "set")
+ (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; Cirrus SF compare instruction
+(define_insn "*cirrus_cmpsf"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:SF 0 "cirrus_fp_register" "v")
+ (match_operand:SF 1 "cirrus_fp_register" "v")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcmps%?\\tr15, %V0, %V1"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "compare")]
+)
+
+;; Cirrus DF compare instruction
+(define_insn "*cirrus_cmpdf"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:DF 0 "cirrus_fp_register" "v")
+ (match_operand:DF 1 "cirrus_fp_register" "v")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcmpd%?\\tr15, %V0, %V1"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "compare")]
+)
+
+;; Cirrus DI compare instruction
+(define_expand "cmpdi"
+ [(match_operand:DI 0 "cirrus_fp_register" "")
+ (match_operand:DI 1 "cirrus_fp_register" "")]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ DONE;
+ }")
+
+(define_insn "*cirrus_cmpdi"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:DI 0 "cirrus_fp_register" "v")
+ (match_operand:DI 1 "cirrus_fp_register" "v")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcmp64%?\\tr15, %V0, %V1"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "compare")]
+)
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "\\t%@ deleted compare"
+ [(set_attr "conds" "set")
+ (set_attr "length" "0")]
+)
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (NE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GT, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LT, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bunordered"
+ [(set (pc)
+ (if_then_else (unordered (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNORDERED, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "bordered"
+ [(set (pc)
+ (if_then_else (ordered (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (ORDERED, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "bungt"
+ [(set (pc)
+ (if_then_else (ungt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNGT, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bunlt"
+ [(set (pc)
+ (if_then_else (unlt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNLT, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bunge"
+ [(set (pc)
+ (if_then_else (unge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNGE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bunle"
+ [(set (pc)
+ (if_then_else (unle (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNLE, arm_compare_op0, arm_compare_op1);"
+)
+
+;; The following two patterns need two branch instructions, since there is
+;; no single instruction that will handle all cases.
+(define_expand "buneq"
+ [(set (pc)
+ (if_then_else (uneq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNEQ, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "bltgt"
+ [(set (pc)
+ (if_then_else (ltgt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (LTGT, arm_compare_op0, arm_compare_op1);"
+)
+
+;;
+;; Patterns to match conditional branch insns.
+;;
+
+; Special pattern to match UNEQ.
+(define_insn "*arm_buneq"
+ [(set (pc)
+ (if_then_else (uneq (match_operand 1 "cc_register" "") (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "*
+ gcc_assert (!arm_ccfsm_state);
+
+ return \"bvs\\t%l0\;beq\\t%l0\";
+ "
+ [(set_attr "conds" "jump_clob")
+ (set_attr "length" "8")]
+)
+
+; Special pattern to match LTGT.
+(define_insn "*arm_bltgt"
+ [(set (pc)
+ (if_then_else (ltgt (match_operand 1 "cc_register" "") (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "*
+ gcc_assert (!arm_ccfsm_state);
+
+ return \"bmi\\t%l0\;bgt\\t%l0\";
+ "
+ [(set_attr "conds" "jump_clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*arm_cond_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "*
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+ "
+ [(set_attr "conds" "use")
+ (set_attr "type" "branch")]
+)
+
+; Special pattern to match reversed UNEQ.
+(define_insn "*arm_buneq_reversed"
+ [(set (pc)
+ (if_then_else (uneq (match_operand 1 "cc_register" "") (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "*
+ gcc_assert (!arm_ccfsm_state);
+
+ return \"bmi\\t%l0\;bgt\\t%l0\";
+ "
+ [(set_attr "conds" "jump_clob")
+ (set_attr "length" "8")]
+)
+
+; Special pattern to match reversed LTGT.
+(define_insn "*arm_bltgt_reversed"
+ [(set (pc)
+ (if_then_else (ltgt (match_operand 1 "cc_register" "") (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "*
+ gcc_assert (!arm_ccfsm_state);
+
+ return \"bvs\\t%l0\;beq\\t%l0\";
+ "
+ [(set_attr "conds" "jump_clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*arm_cond_branch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "*
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+ "
+ [(set_attr "conds" "use")
+ (set_attr "type" "branch")]
+)
+
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (NE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (gt:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GT, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (le:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ge:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GE, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lt:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LT, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (leu:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (geu:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "operands[1] = arm_gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1);"
+)
+
+(define_expand "sunordered"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (unordered:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNORDERED, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "sordered"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ordered:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (ORDERED, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "sungt"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ungt:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNGT, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "sunge"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (unge:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNGE, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "sunlt"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (unlt:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNLT, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+(define_expand "sunle"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (unle:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "operands[1] = arm_gen_compare_reg (UNLE, arm_compare_op0,
+ arm_compare_op1);"
+)
+
+;;; DO NOT add patterns for SUNEQ or SLTGT, these can't be represented with
+;;; simple ARM instructions.
+;
+; (define_expand "suneq"
+; [(set (match_operand:SI 0 "s_register_operand" "")
+; (uneq:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+; "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+; "gcc_unreachable ();"
+; )
+;
+; (define_expand "sltgt"
+; [(set (match_operand:SI 0 "s_register_operand" "")
+; (ltgt:SI (match_dup 1) (const_int 0)))]
+;; APPLE LOCAL v7 support. Merge from mainline
+; "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+; "gcc_unreachable ();"
+; )
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ "TARGET_ARM"
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ "TARGET_ARM"
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ "TARGET_ARM"
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "8")]
+)
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "arm_comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ {
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }"
+)
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "arm_comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "
+ {
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((!(TARGET_HARD_FLOAT && TARGET_FPA))
+ || (!arm_float_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }"
+)
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "arm_comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "arm_float_add_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FPA || TARGET_VFP)"
+ "
+ {
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }"
+)
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ "TARGET_ARM"
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ (set_attr "conds" "use")
+ (set_attr "insn" "mov,mvn,mov,mvn,mov,mov,mvn,mvn")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_ARM && TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+ [(set_attr "conds" "use")
+ (set_attr "insn" "mov")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+
+;; Jump and linkage insns
+
+(define_expand "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ "TARGET_EITHER"
+ ""
+)
+
+(define_insn "*arm_jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ "TARGET_ARM"
+ "*
+ {
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+ }
+ "
+ [(set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*thumb2_jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ "TARGET_THUMB2"
+ "*
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b\\t%l0\";
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2048)))
+ (const_int 2)
+ (const_int 4)))]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "*thumb_jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+ "
+ [(set (attr "far_jump")
+ (if_then_else
+ (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2048)))
+ (const_int 2)
+ (const_int 4)))]
+)
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))])]
+ "TARGET_EITHER"
+ "
+ {
+ rtx callee;
+
+ /* APPLE LOCAL begin ARM dynamic */
+ /* In an untyped call, we can get NULL for operand 2. */
+ if (operands[2] == NULL_RTX)
+ operands[2] = const0_rtx;
+
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT
+ && !arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+
+ /* This is to decide if we should generate indirect calls by loading the
+ 32 bit address of the callee into a register before performing the
+ branch and link. operand[2] encodes the long_call/short_call
+ attribute of the function being called. This attribute is set whenever
+ __attribute__((long_call/short_call)) or #pragma long_call/no_long_call
+ is used, and the short_call attribute can also be set if function is
+ declared as static or if it has already been defined in the current
+ compilation unit. See arm.c and arm.h for info about this. The third
+ parameter to arm_is_longcall_p is used to tell it which pattern
+ invoked it. */
+ callee = XEXP (operands[0], 0);
+ /* APPLE LOCAL end ARM dynamic */
+
+ if ((GET_CODE (callee) == SYMBOL_REF
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ || (GET_CODE (callee) != SYMBOL_REF
+ && GET_CODE (callee) != REG))
+ XEXP (operands[0], 0) = force_reg (Pmode, callee);
+ }"
+)
+
+;; APPLE LOCAL begin 5831528 make calls predicable
+(define_insn "*call_reg_armv5"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM && arm_arch5"
+ "blx%?\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*call_reg_arm"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM && !arm_arch5"
+ "*
+ return output_call (operands);
+ "
+ ;; length is worst case, normally it is only two
+ [(set_attr "length" "12")
+ (set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin 7649286 always use blx for indirect call when available
+(define_insn "*call_mem_v4"
+ [(call (mem:SI (match_operand:SI 0 "call_memory_operand" "m"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM && !arm_arch5"
+ "*
+ return output_call_mem (operands);
+ "
+ [(set_attr "length" "12")
+ (set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end 7649286 always use blx for indirect call when available
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*call_reg_thumb1_v5"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && arm_arch5"
+ "blx\\t%0"
+ [(set_attr "length" "2")
+ (set_attr "type" "call")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*call_reg_thumb1"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && !arm_arch5"
+ "*
+ {
+ if (!TARGET_CALLER_INTERWORKING)
+ return thumb_call_via_reg (operands[0]);
+ else if (operands[1] == const0_rtx)
+ return \"bl\\t%__interwork_call_via_%0\";
+ else if (frame_pointer_needed)
+ return \"bl\\t%__interwork_r7_call_via_%0\";
+ else
+ return \"bl\\t%__interwork_r11_call_via_%0\";
+ }"
+ [(set_attr "type" "call")]
+)
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "general_operand" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))])]
+ "TARGET_EITHER"
+ "
+ {
+ /* APPLE LOCAL begin ARM dynamic */
+ rtx callee;
+
+ /* In an untyped call, we can get NULL for operand 2. */
+ if (operands[3] == 0)
+ operands[3] = const0_rtx;
+
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT
+ && !arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+
+ callee = XEXP (operands[1], 0);
+ /* APPLE LOCAL end ARM dynamic */
+
+ /* See the comment in define_expand \"call\". */
+ if ((GET_CODE (callee) == SYMBOL_REF
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ || (GET_CODE (callee) != SYMBOL_REF
+ && GET_CODE (callee) != REG))
+ XEXP (operands[1], 0) = force_reg (Pmode, callee);
+ }"
+)
+
+(define_insn "*call_value_reg_armv5"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM && arm_arch5"
+ "blx%?\\t%1"
+ [(set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*call_value_reg_arm"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM && !arm_arch5"
+ "*
+ return output_call (&operands[1]);
+ "
+ [(set_attr "length" "12")
+ (set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+
+;; APPLE LOCAL begin 7649286 always use blx for indirect call when available
+(define_insn "*call_value_mem_v4"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_memory_operand" "m"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM && (!CONSTANT_ADDRESS_P (XEXP (operands[1], 0))) && !arm_arch5"
+ "*
+ return output_call_mem (&operands[1]);
+ "
+ [(set_attr "length" "12")
+ (set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end 7649286 always use blx for indirect call when available
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*call_value_reg_thumb1_v5"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && arm_arch5"
+ "blx\\t%1"
+ [(set_attr "length" "2")
+ (set_attr "type" "call")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*call_value_reg_thumb1"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1 && !arm_arch5"
+ "*
+ {
+ if (!TARGET_CALLER_INTERWORKING)
+ return thumb_call_via_reg (operands[1]);
+ else if (operands[2] == const0_rtx)
+ return \"bl\\t%__interwork_call_via_%1\";
+ else if (frame_pointer_needed)
+ return \"bl\\t%__interwork_r7_call_via_%1\";
+ else
+ return \"bl\\t%__interwork_r11_call_via_%1\";
+ }"
+ [(set_attr "type" "call")]
+)
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+;; APPLE LOCAL begin ARM pic support
+;; Prevent these patterns from being used with dynamic symbol_refs. An
+;; alternate approach would be to generate a stub, but this would be
+;; of questionnable value, as these patterns are not generally used
+;; for dynamic code anyway (see rdar://4514281 for an example of what it
+;; takes to get here).
+(define_insn "*call_symbol_predicable"
+ [(call (mem:SI (match_operand:SI 0 "arm_branch_target" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM
+ && !TARGET_INTERWORK
+ && (GET_CODE (operands[0]) == SYMBOL_REF)
+ && !arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "*
+ {
+ return NEED_PLT_RELOC ? \"bl%?\\t%a0(PLT)\" : \"bl%?\\t%a0\";
+ }"
+ [(set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "arm_branch_target" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM
+ && TARGET_INTERWORK
+ && (GET_CODE (operands[0]) == SYMBOL_REF)
+ && !arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "*
+ {
+ return NEED_PLT_RELOC ? \"bl%?\\t%a0(PLT)\" : \"bl%?\\t%a0\";
+ }"
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*call_value_symbol_predicable"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "arm_branch_target" ""))
+ (match_operand:SI 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM
+ && !TARGET_INTERWORK
+ && (GET_CODE (operands[1]) == SYMBOL_REF)
+ && !arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "*
+ {
+ return NEED_PLT_RELOC ? \"bl%?\\t%a1(PLT)\" : \"bl%?\\t%a1\";
+ }"
+ [(set_attr "type" "call")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "arm_branch_target" ""))
+ (match_operand:SI 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_ARM
+ && TARGET_INTERWORK
+ && (GET_CODE (operands[1]) == SYMBOL_REF)
+ && !arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "*
+ {
+ return NEED_PLT_RELOC ? \"bl%?\\t%a1(PLT)\" : \"bl%?\\t%a1\";
+ }"
+ [(set_attr "type" "call")]
+)
+;; APPLE LOCAL end 5831528 make calls predicable
+;; APPLE LOCAL end ARM pic support
+
+;; APPLE LOCAL begin ARM dynamic
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "arm_branch_target" ""))
+ (match_operand:SI 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_THUMB
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "*
+ {
+#if TARGET_MACHO
+ if (machopic_lookup_stub_or_non_lazy_ptr (XSTR (operands[0], 0)))
+ return \"blx\\t%a0\";
+ else
+#endif
+ return \"bl\\t%a0\";
+ }"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand 1 "arm_branch_target" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_THUMB
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "*
+ {
+#if TARGET_MACHO
+ if (machopic_lookup_stub_or_non_lazy_ptr (XSTR (operands[1], 0)))
+ return \"blx\\t%a1\";
+ else
+#endif
+ return \"bl\\t%a1\";
+ }"
+ [(set_attr "length" "4")
+ (set_attr "type" "call")]
+)
+;; APPLE LOCAL end ARM dynamic
+
+;; We may also be able to do sibcalls for Thumb, but it's much harder...
+(define_expand "sibcall"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (return)
+ (use (match_operand 2 "" ""))])]
+ "TARGET_ARM"
+ "
+ {
+/* APPLE LOCAL begin ARM dynamic */
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+/* APPLE LOCAL end ARM dynamic */
+
+ if (operands[2] == NULL_RTX)
+ operands[2] = const0_rtx;
+ }"
+)
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "general_operand" "")))
+ (return)
+ (use (match_operand 3 "" ""))])]
+ "TARGET_ARM"
+ "
+ {
+/* APPLE LOCAL begin ARM dynamic */
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+/* APPLE LOCAL end ARM dynamic */
+
+ if (operands[3] == NULL_RTX)
+ operands[3] = const0_rtx;
+ }"
+)
+
+;; APPLE LOCAL begin ARM indirect sibcalls
+(define_insn "*sibcall_insn"
+ [(call (mem:SI (match_operand:SI 0 "arm_branch_target" "X"))
+ (match_operand 1 "" ""))
+ (return)
+ (use (match_operand 2 "" ""))]
+ "TARGET_ARM && (GET_CODE (operands[0]) == SYMBOL_REF || GET_CODE (operands[0]) == REG)"
+ "*
+ if (GET_CODE (operands[0]) == REG)
+ return \"bx%?\\t%0\";
+ else
+ return NEED_PLT_RELOC ? \"b%?\\t%a0(PLT)\" : \"b%?\\t%a0\";
+ "
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*sibcall_value_insn"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "arm_branch_target" "X"))
+ (match_operand 2 "" "")))
+ (return)
+ (use (match_operand 3 "" ""))]
+ "TARGET_ARM && (GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == REG)"
+ "*
+ if (GET_CODE (operands[1]) == REG)
+ return \"bx%?\\t%1\";
+ else
+ return NEED_PLT_RELOC ? \"b%?\\t%a1(PLT)\" : \"b%?\\t%a1\";
+ "
+ [(set_attr "type" "call")]
+)
+;; APPLE LOCAL end ARM indirect sibcalls
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "TARGET_ARM && USE_RETURN_INSN (FALSE)"
+ "*
+ {
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (const_true_rtx, TRUE, FALSE);
+ }"
+ [(set_attr "type" "load1")
+ (set_attr "length" "12")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "arm_comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "TARGET_ARM && USE_RETURN_INSN (TRUE)"
+ "*
+ {
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+ }"
+ [(set_attr "conds" "use")
+ (set_attr "length" "12")
+ (set_attr "type" "load1")]
+)
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "arm_comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "TARGET_ARM && USE_RETURN_INSN (TRUE)"
+ "*
+ {
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+ }"
+ [(set_attr "conds" "use")
+ (set_attr "length" "12")
+ (set_attr "type" "load1")]
+)
+
+;; Generate a sequence of instructions to determine if the processor is
+;; in 26-bit or 32-bit mode, and return the appropriate return address
+;; mask.
+
+(define_expand "return_addr_mask"
+ [(set (match_dup 1)
+ (compare:CC_NOOV (unspec [(const_int 0)] UNSPEC_CHECK_ARCH)
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (eq (match_dup 1) (const_int 0))
+ (const_int -1)
+ (const_int 67108860)))] ; 0x03fffffc
+ "TARGET_ARM"
+ "
+ operands[1] = gen_rtx_REG (CC_NOOVmode, CC_REGNUM);
+ ")
+
+(define_insn "*check_arch2"
+ [(set (match_operand:CC_NOOV 0 "cc_register" "")
+ (compare:CC_NOOV (unspec [(const_int 0)] UNSPEC_CHECK_ARCH)
+ (const_int 0)))]
+ "TARGET_ARM"
+ "teq\\t%|r0, %|r0\;teq\\t%|pc, %|pc"
+ [(set_attr "length" "8")
+ (set_attr "conds" "set")]
+)
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ "TARGET_EITHER"
+ "
+ {
+ int i;
+ rtx par = gen_rtx_PARALLEL (VOIDmode,
+ rtvec_alloc (XVECLEN (operands[2], 0)));
+ rtx addr = gen_reg_rtx (Pmode);
+ rtx mem;
+ int size = 0;
+
+ emit_move_insn (addr, XEXP (operands[1], 0));
+ mem = change_address (operands[1], BLKmode, addr);
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx src = SET_SRC (XVECEXP (operands[2], 0, i));
+
+ /* Default code only uses r0 as a return value, but we could
+ be using anything up to 4 registers. */
+ if (REGNO (src) == R0_REGNUM)
+ src = gen_rtx_REG (TImode, R0_REGNUM);
+
+ XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, src,
+ GEN_INT (size));
+ size += GET_MODE_SIZE (GET_MODE (src));
+ }
+
+ emit_call_insn (GEN_CALL_VALUE (par, operands[0], const0_rtx, NULL,
+ const0_rtx));
+
+ size = 0;
+
+ for (i = 0; i < XVECLEN (par, 0); i++)
+ {
+ HOST_WIDE_INT offset = 0;
+ rtx reg = XEXP (XVECEXP (par, 0, i), 0);
+
+ if (size != 0)
+ emit_move_insn (addr, plus_constant (addr, size));
+
+ mem = change_address (mem, GET_MODE (reg), NULL);
+ if (REGNO (reg) == R0_REGNUM)
+ {
+ /* On thumb we have to use a write-back instruction. */
+ emit_insn (arm_gen_store_multiple (R0_REGNUM, 4, addr, TRUE,
+ TARGET_THUMB ? TRUE : FALSE, mem, &offset));
+ size = TARGET_ARM ? 16 : 0;
+ }
+ else
+ {
+ emit_move_insn (mem, reg);
+ size = GET_MODE_SIZE (GET_MODE (reg));
+ }
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+ }"
+)
+
+(define_expand "untyped_return"
+ [(match_operand:BLK 0 "memory_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_EITHER"
+ "
+ {
+ int i;
+ rtx addr = gen_reg_rtx (Pmode);
+ rtx mem;
+ int size = 0;
+
+ emit_move_insn (addr, XEXP (operands[0], 0));
+ mem = change_address (operands[0], BLKmode, addr);
+
+ for (i = 0; i < XVECLEN (operands[1], 0); i++)
+ {
+ HOST_WIDE_INT offset = 0;
+ rtx reg = SET_DEST (XVECEXP (operands[1], 0, i));
+
+ if (size != 0)
+ emit_move_insn (addr, plus_constant (addr, size));
+
+ mem = change_address (mem, GET_MODE (reg), NULL);
+ if (REGNO (reg) == R0_REGNUM)
+ {
+ /* On thumb we have to use a write-back instruction. */
+ emit_insn (arm_gen_load_multiple (R0_REGNUM, 4, addr, TRUE,
+ TARGET_THUMB ? TRUE : FALSE, mem, &offset));
+ size = TARGET_ARM ? 16 : 0;
+ }
+ else
+ {
+ emit_move_insn (reg, mem);
+ size = GET_MODE_SIZE (GET_MODE (reg));
+ }
+ }
+
+ /* Emit USE insns before the return. */
+ for (i = 0; i < XVECLEN (operands[1], 0); i++)
+ emit_insn (gen_rtx_USE (VOIDmode,
+ SET_DEST (XVECEXP (operands[1], 0, i))));
+
+ /* Construct the return. */
+ expand_naked_return ();
+
+ DONE;
+ }"
+)
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
+ "TARGET_EITHER"
+ ""
+ [(set_attr "length" "0")
+ (set_attr "type" "block")]
+)
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+;; APPLE LOCAL compact switch tables
+ "TARGET_32BIT || TARGET_COMPACT_SWITCH_TABLES"
+ "
+ {
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ /* APPLE LOCAL begin compact switch tables */
+ if (TARGET_32BIT)
+ {
+ /* APPLE LOCAL end compact switch tables */
+ if (!const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ if (TARGET_ARM)
+ {
+ emit_jump_insn (gen_arm_casesi_internal (operands[0], operands[2],
+ operands[3], operands[4]));
+ }
+ /* APPLE LOCAL begin 6152801 SImode thumb2 switch table dispatch */
+ /* Removed specialized PIC handling */
+ /* APPLE LOCAL end 6152801 SImode thumb2 switch table dispatch */
+ else
+ {
+ emit_jump_insn (gen_thumb2_casesi_internal (operands[0],
+ operands[2], operands[3], operands[4]));
+ }
+ DONE;
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL begin compact switch tables */
+ }
+ else
+ {
+ /* Containing function must be 4-byte aligned, else we won't know what the
+ various .align directives do, e.g. around constant tables. */
+ cfun->needs_4byte_alignment = 1;
+ /* This is a function call, but the semantics are not the same as a normal
+ function call, so we put the parameter in R0 explicitly and hide the
+ call as a casesi node. The USE of R0 in the casesi_internal pattern
+ causes the value to be retained. */
+ emit_move_insn (gen_rtx_REG (Pmode, 0), operands[0]);
+ emit_jump_insn (gen_thumb_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+ }
+ /* APPLE LOCAL end compact switch tables */
+ }"
+)
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "arm_casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CC CC_REGNUM))
+ (use (label_ref (match_dup 2)))])]
+ "TARGET_ARM"
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+;; APPLE LOCAL begin compact switch tables
+;; This pattern represents the library call for Thumb switch tables.
+;; The functions' (sparse) register usage is recorded as clobbers.
+
+(define_insn "thumb_casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "l")
+ (match_operand:SI 1 "const_int_operand" "i"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 2))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (reg:SI IP_REGNUM))
+ (use (reg:SI 0))
+ (use (label_ref (match_dup 2)))])]
+ "TARGET_COMPACT_SWITCH_TABLES"
+ "*
+ {
+ rtx body = PATTERN (next_real_insn (insn));
+ static char buf[255];
+ gcc_assert (GET_CODE (body) == ADDR_DIFF_VEC);
+ strcpy(buf, \"bl\\t\");
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ strcat(buf, \"L\");
+ if (GET_MODE (body) == QImode
+ && ADDR_DIFF_VEC_FLAGS (body).offset_unsigned)
+ {
+ register_switchu8_libfunc ();
+ strcat(buf, \"___switchu8\");
+ }
+ else if (GET_MODE (body) == QImode)
+ {
+ register_switch8_libfunc ();
+ strcat(buf, \"___switch8\");
+ }
+ else if (GET_MODE (body) == HImode)
+ {
+ register_switch16_libfunc ();
+ strcat(buf, \"___switch16\");
+ }
+ else
+ {
+ register_switch32_libfunc ();
+ /* The table is 4-byte aligned, and the call should
+ immediately precede the table. To do this, align
+ here; as it happens, 0x0000 is a NOP insn. The
+ insn_length is still 4 even if a NOP is inserted;
+ however, the computation in shorten_branches
+ comes out right because that 4 is counted against
+ the following label, which is marked as 4-byte
+ aligned. I.e. the shorten_branch code thinks it's
+ going to looks like
+ call
+ .align 2
+ zero padding
+ label:
+ when in fact it is
+ .align 2
+ NOP
+ call
+ .align 2
+ never any padding here
+ label:
+ and it gets the right address for the label.
+ Yes, this is overly tricky. */
+ assemble_align (32);
+ strcat(buf, \"___switch32\");
+ }
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ strcat(buf, \"$stub\");
+ return buf;
+ }
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")]
+)
+;; APPLE LOCAL end compact switch tables
+
+;; APPLE LOCAL begin ARM setjmp/longjmp interworking
+;; Indirect jump with possible change between ARM/Thumb state
+(define_expand "indirect_jump_exchange"
+ [(unspec:SI [(match_operand:SI 0 "s_register_operand" "")]
+ UNSPEC_JMP_XCHG)]
+ "TARGET_EITHER"
+ ""
+)
+
+(define_insn "*arm_indirect_jump_exchange"
+ [(unspec:SI [(match_operand:SI 0 "s_register_operand" "r")]
+ UNSPEC_JMP_XCHG)]
+ "TARGET_ARM && (arm_arch4t)"
+ "bx\\t%0"
+ [(set_attr "predicable" "yes")]
+)
+
+(define_insn "*thumb_indirect_jump_exchange"
+ [(unspec:SI [(match_operand:SI 0 "s_register_operand" "l*r")]
+ UNSPEC_JMP_XCHG)]
+ "TARGET_THUMB"
+ "bx\\t%0"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "2")]
+)
+;; APPLE LOCAL end ARM setjmp/longjmp interworking
+
+(define_expand "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" ""))]
+ "TARGET_EITHER"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "
+ /* Thumb-2 doesn't have mov pc, reg. Explicitly set the low bit of the
+ address and use bx. */
+ if (TARGET_THUMB2)
+ {
+ rtx tmp;
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_iorsi3 (tmp, operands[0], GEN_INT(1)));
+ operands[0] = tmp;
+ }
+ "
+;; APPLE LOCAL end v7 support. Merge from mainline
+)
+
+;; NB Never uses BX.
+(define_insn "*arm_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ "TARGET_ARM"
+ "mov%?\\t%|pc, %0\\t%@ indirect register jump"
+ [(set_attr "predicable" "yes")]
+)
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ "TARGET_ARM"
+ "ldr%?\\t%|pc, %0\\t%@ indirect memory jump"
+ [(set_attr "type" "load1")
+ (set_attr "pool_range" "4096")
+ (set_attr "neg_pool_range" "4084")
+ (set_attr "predicable" "yes")]
+)
+
+;; NB Never uses BX.
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "l*r"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "mov\\tpc, %0"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "2")]
+)
+
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ "TARGET_EITHER"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "*
+ if (TARGET_UNIFIED_ASM)
+ return \"nop\";
+ if (TARGET_ARM)
+ return \"mov%?\\t%|r0, %|r0\\t%@ nop\";
+ return \"mov\\tr8, r8\";
+ "
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 2)
+ (const_int 4)))]
+)
+
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ "TARGET_ARM"
+ "%i1%?\\t%0, %2, %4%S3"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "4")
+ (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 2 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "")
+ (match_operand:SI 5 "reg_or_int_operand" "")])
+ (match_operand:SI 6 "s_register_operand" "")])
+ (match_operand:SI 7 "arm_rhs_operand" "")]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+ "TARGET_ARM"
+ [(set (match_dup 8)
+ (match_op_dup 2 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 6)]))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 8) (match_dup 7)]))]
+ "")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ "TARGET_ARM"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "%i1%.\\t%0, %2, %4%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "4")
+ (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_ARM"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "%i1%.\\t%0, %2, %4%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "4")
+ (set (attr "type") (if_then_else (match_operand 5 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ "TARGET_ARM"
+ "sub%?\\t%0, %1, %3%S2"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "3")
+ (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "TARGET_ARM"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "sub%.\\t%0, %1, %3%S2"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "3")
+ (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_ARM"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "sub%.\\t%0, %1, %3%S2"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "3")
+ (set (attr "type") (if_then_else (match_operand 4 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_ARM"
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,8")]
+)
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ if (operands[3] == const0_rtx)
+ {
+ if (GET_CODE (operands[1]) == LT)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == EQ)
+ return \"rsbs\\t%0, %2, #1\;movcc\\t%0, #0\";
+ }
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "arm_comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ "TARGET_ARM"
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set_attr "length" "4,4,8")]
+)
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? Is it worth using these conditional patterns in Thumb-2 mode?
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ "TARGET_ARM"
+ "*
+ {
+ static const char * const opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",
+ \"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\",
+ \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+ }"
+ [(set_attr "conds" "set")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ "TARGET_ARM"
+ "*
+ {
+ static const char * const opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\",
+ \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\",
+ \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+ }"
+ [(set_attr "conds" "set")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*cmp_and"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (and:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")]))
+ (const_int 0)))]
+ "TARGET_ARM"
+ "*
+ {
+ static const char *const opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",
+ \"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\",
+ \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+ }"
+ [(set_attr "conds" "set")
+ (set_attr "predicable" "no")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*cmp_ior"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (ior:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")]))
+ (const_int 0)))]
+ "TARGET_ARM"
+ "*
+{
+ static const char *const opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%D4\\t%2, %3\",
+ \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%D4\\t%2, %3\",
+ \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%D4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%D4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+ [(set_attr "conds" "set")
+ (set_attr "length" "8")]
+)
+
+(define_insn_and_split "*ior_scc_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_operator:SI 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_add_operand" "rIL")])
+ (match_operator:SI 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM
+ && (arm_select_dominance_cc_mode (operands[3], operands[6], DOM_CC_X_OR_Y)
+ != CCmode)"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 7)
+ (compare
+ (ior:SI
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+ (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+ (const_int 0)))
+ (set (match_dup 0) (ne:SI (match_dup 7) (const_int 0)))]
+ "operands[7]
+ = gen_rtx_REG (arm_select_dominance_cc_mode (operands[3], operands[6],
+ DOM_CC_X_OR_Y),
+ CC_REGNUM);"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "16")])
+
+; If the above pattern is followed by a CMP insn, then the compare is
+; redundant, since we can rework the conditional instruction that follows.
+(define_insn_and_split "*ior_scc_scc_cmp"
+ [(set (match_operand 0 "dominant_cc_register" "")
+ (compare (ior:SI (match_operator:SI 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_add_operand" "rIL")])
+ (match_operator:SI 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")]))
+ (const_int 0)))
+ (set (match_operand:SI 7 "s_register_operand" "=r")
+ (ior:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+ (match_op_dup 6 [(match_dup 4) (match_dup 5)])))]
+ "TARGET_ARM"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 0)
+ (compare
+ (ior:SI
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+ (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+ (const_int 0)))
+ (set (match_dup 7) (ne:SI (match_dup 0) (const_int 0)))]
+ ""
+ [(set_attr "conds" "set")
+ (set_attr "length" "16")])
+
+(define_insn_and_split "*and_scc_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator:SI 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_add_operand" "rIL")])
+ (match_operator:SI 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM
+ && (arm_select_dominance_cc_mode (operands[3], operands[6], DOM_CC_X_AND_Y)
+ != CCmode)"
+ "#"
+ "TARGET_ARM && reload_completed
+ && (arm_select_dominance_cc_mode (operands[3], operands[6], DOM_CC_X_AND_Y)
+ != CCmode)"
+ [(set (match_dup 7)
+ (compare
+ (and:SI
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+ (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+ (const_int 0)))
+ (set (match_dup 0) (ne:SI (match_dup 7) (const_int 0)))]
+ "operands[7]
+ = gen_rtx_REG (arm_select_dominance_cc_mode (operands[3], operands[6],
+ DOM_CC_X_AND_Y),
+ CC_REGNUM);"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "16")])
+
+; If the above pattern is followed by a CMP insn, then the compare is
+; redundant, since we can rework the conditional instruction that follows.
+(define_insn_and_split "*and_scc_scc_cmp"
+ [(set (match_operand 0 "dominant_cc_register" "")
+ (compare (and:SI (match_operator:SI 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_add_operand" "rIL")])
+ (match_operator:SI 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")]))
+ (const_int 0)))
+ (set (match_operand:SI 7 "s_register_operand" "=r")
+ (and:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+ (match_op_dup 6 [(match_dup 4) (match_dup 5)])))]
+ "TARGET_ARM"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 0)
+ (compare
+ (and:SI
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+ (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+ (const_int 0)))
+ (set (match_dup 7) (ne:SI (match_dup 0) (const_int 0)))]
+ ""
+ [(set_attr "conds" "set")
+ (set_attr "length" "16")])
+
+;; If there is no dominance in the comparison, then we can still save an
+;; instruction in the AND case, since we can know that the second compare
+;; need only zero the value if false (if true, then the value is already
+;; correct).
+(define_insn_and_split "*and_scc_scc_nodom"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (match_operator:SI 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_add_operand" "rIL,0,rIL")])
+ (match_operator:SI 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL,rIL")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM
+ && (arm_select_dominance_cc_mode (operands[3], operands[6], DOM_CC_X_AND_Y)
+ == CCmode)"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))
+ (clobber (reg:CC CC_REGNUM))])
+ (set (match_dup 7) (match_op_dup 8 [(match_dup 4) (match_dup 5)]))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 6 [(match_dup 7) (const_int 0)])
+ (match_dup 0)
+ (const_int 0)))]
+ "operands[7] = gen_rtx_REG (SELECT_CC_MODE (GET_CODE (operands[6]),
+ operands[4], operands[5]),
+ CC_REGNUM);
+ operands[8] = gen_rtx_COMPARE (GET_MODE (operands[7]), operands[4],
+ operands[5]);"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "20")])
+
+(define_split
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (ior:SI
+ (and:SI (match_operand:SI 0 "s_register_operand" "")
+ (const_int 1))
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")]))
+ (const_int 0)))
+ (clobber (match_operand:SI 4 "s_register_operand" ""))]
+ "TARGET_ARM"
+ [(set (match_dup 4)
+ (ior:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])
+ (match_dup 0)))
+ (set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (and:SI (match_dup 4) (const_int 1))
+ (const_int 0)))]
+ "")
+
+(define_split
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (ior:SI
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (and:SI (match_operand:SI 0 "s_register_operand" "")
+ (const_int 1)))
+ (const_int 0)))
+ (clobber (match_operand:SI 4 "s_register_operand" ""))]
+ "TARGET_ARM"
+ [(set (match_dup 4)
+ (ior:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])
+ (match_dup 0)))
+ (set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (and:SI (match_dup 4) (const_int 1))
+ (const_int 0)))]
+ "")
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? The conditional patterns above need checking for Thumb-2 usefulness
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants. */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants. */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")]
+)
+
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? The patterns below need checking for Thumb-2 usefulness.
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI")))]
+ "TARGET_ARM"
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8")
+ (set_attr "type" "*,*,*,*")]
+)
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L"))))]
+ "TARGET_ARM"
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8")
+ (set_attr "type" "*,*,*,*")]
+)
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "arm_comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "arm_comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ "TARGET_ARM"
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+ [(set_attr "conds" "use")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions. */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ return \"mov%D6\\t%0, %1\";
+ return \"\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "arm_comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))]
+ "TARGET_ARM"
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,8")
+ (set_attr "type" "*,*")]
+)
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ return \"%I7%D6\\t%0, %2, %3\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))]
+ "TARGET_ARM"
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,8")
+ (set_attr "type" "*,*")]
+)
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ "TARGET_ARM"
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mvn")
+ (set_attr "length" "4,8,8")]
+)
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ "TARGET_ARM"
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mvn")
+ (set_attr "length" "4,8,8")]
+)
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ "TARGET_ARM"
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+ [(set_attr "conds" "use")
+ (set_attr "shift" "2")
+ (set_attr "length" "4,8,8")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ "TARGET_ARM"
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+ [(set_attr "conds" "use")
+ (set_attr "shift" "2")
+ (set_attr "length" "4,8,8")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set (attr "type") (if_then_else (match_operand 3 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "arm_comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ "TARGET_ARM"
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+ [(set_attr "conds" "use")
+ (set_attr "shift" "1")
+ (set_attr "length" "8")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov")
+ (set (attr "type") (if_then_else
+ (and (match_operand 2 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ "TARGET_ARM"
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mvn")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "arm_comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ "TARGET_ARM"
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mvn")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ "TARGET_ARM"
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")]
+)
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8,12")]
+)
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ "TARGET_ARM"
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")]
+)
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "TARGET_ARM && adjacent_mem_locations (operands[2], operands[3])"
+ "*
+ {
+ rtx ldm[3];
+ rtx arith[4];
+ rtx base_reg;
+ HOST_WIDE_INT val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+
+ base_reg = XEXP (operands[2], 0);
+
+ if (!REG_P (base_reg))
+ {
+ val1 = INTVAL (XEXP (base_reg, 1));
+ base_reg = XEXP (base_reg, 0);
+ }
+
+ if (!REG_P (XEXP (operands[3], 0)))
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+
+ ldm[0] = base_reg;
+ if (val1 !=0 && val2 != 0)
+ {
+ rtx ops[3];
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (val1 == 4 || val2 == 4)
+ /* Other val must be 8, since we know they are adjacent and neither
+ is zero. */
+ output_asm_insn (\"ldm%(ib%)\\t%0, {%1, %2}\", ldm);
+ else if (const_ok_for_arm (val1) || const_ok_for_arm (-val1))
+ {
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = base_reg;
+ ops[2] = GEN_INT (val1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%(ia%)\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%(da%)\\t%0, {%1, %2}\", ldm);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ else
+ {
+ /* Offset is out of range for a single add, so use two ldr. */
+ ops[0] = ldm[1];
+ ops[1] = base_reg;
+ ops[2] = GEN_INT (val1);
+ output_asm_insn (\"ldr%?\\t%0, [%1, %2]\", ops);
+ ops[0] = ldm[2];
+ ops[2] = GEN_INT (val2);
+ output_asm_insn (\"ldr%?\\t%0, [%1, %2]\", ops);
+ }
+ }
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ else if (val1 != 0)
+ {
+ if (val1 < val2)
+ output_asm_insn (\"ldm%(da%)\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%(ia%)\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ if (val1 < val2)
+ output_asm_insn (\"ldm%(ia%)\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%(da%)\\t%0, {%1, %2}\", ldm);
+ }
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+ }"
+ [(set_attr "length" "12")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "load1")]
+)
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole2
+ [(set (match_operand:SI 0 "arm_general_register_operand" "")
+ (match_operand:SI 1 "arm_general_register_operand" ""))
+ (set (reg:CC CC_REGNUM)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ "TARGET_ARM"
+ [(parallel [(set (reg:CC CC_REGNUM) (compare:CC (match_dup 1) (const_int 0)))
+ (set (match_dup 0) (match_dup 1))])]
+ ""
+)
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "TARGET_ARM && load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+ "
+)
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "TARGET_ARM && load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+ "
+)
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "TARGET_ARM && load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+ "
+)
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "TARGET_ARM && store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+ "
+)
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "TARGET_ARM && store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+ "
+)
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "TARGET_ARM && store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+ "
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ "TARGET_ARM"
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ ""
+)
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z CC_REGNUM)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "TARGET_ARM
+ && (((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24)"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC CC_REGNUM) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+ "
+)
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? Check the patterns above for Thumb-2 usefulness
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ "TARGET_EITHER"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "if (TARGET_32BIT)
+ arm_expand_prologue ();
+ else
+ thumb1_expand_prologue ();
+ DONE;
+ "
+;; APPLE LOCAL end v7 support. Merge from mainline
+)
+
+(define_expand "epilogue"
+ [(clobber (const_int 0))]
+ "TARGET_EITHER"
+ "
+ if (current_function_calls_eh_return)
+ emit_insn (gen_prologue_use (gen_rtx_REG (Pmode, 2)));
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ if (TARGET_THUMB1)
+ thumb1_expand_epilogue ();
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ else if (USE_RETURN_INSN (FALSE))
+ {
+ emit_jump_insn (gen_return ());
+ DONE;
+ }
+ emit_jump_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
+ gen_rtvec (1,
+ gen_rtx_RETURN (VOIDmode)),
+ VUNSPEC_EPILOGUE));
+ DONE;
+ "
+)
+
+;; Note - although unspec_volatile's USE all hard registers,
+;; USEs are ignored after relaod has completed. Thus we need
+;; to add an unspec of the link register to ensure that flow
+;; does not think that it is unused by the sibcall branch that
+;; will replace the standard function epilogue.
+(define_insn "sibcall_epilogue"
+ [(parallel [(unspec:SI [(reg:SI LR_REGNUM)] UNSPEC_PROLOGUE_USE)
+ (unspec_volatile [(return)] VUNSPEC_EPILOGUE)])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "*
+ if (use_return_insn (FALSE, next_nonnote_insn (insn)))
+ return output_return_instruction (const_true_rtx, FALSE, FALSE);
+ return arm_output_epilogue (next_nonnote_insn (insn));
+ "
+;; Length is absolute worst case
+ [(set_attr "length" "44")
+ (set_attr "type" "block")
+ ;; We don't clobber the conditions, but the potential length of this
+ ;; operation is sufficient to make conditionalizing the sequence
+ ;; unlikely to be profitable.
+ (set_attr "conds" "clob")]
+)
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(return)] VUNSPEC_EPILOGUE)]
+ "TARGET_EITHER"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "*
+ if (TARGET_32BIT)
+ return arm_output_epilogue (NULL);
+ else /* TARGET_THUMB1 */
+ return thumb_unexpanded_epilogue ();
+ "
+;; APPLE LOCAL end v7 support. Merge from mainline
+ ; Length is absolute worst case
+ [(set_attr "length" "44")
+ (set_attr "type" "block")
+ ;; We don't clobber the conditions, but the potential length of this
+ ;; operation is sufficient to make conditionalizing the sequence
+ ;; unlikely to be profitable.
+ (set_attr "conds" "clob")]
+)
+
+(define_expand "eh_epilogue"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:SI 1 "register_operand" ""))
+ (use (match_operand:SI 2 "register_operand" ""))]
+ "TARGET_EITHER"
+ "
+ {
+ cfun->machine->eh_epilogue_sp_ofs = operands[1];
+ if (GET_CODE (operands[2]) != REG || REGNO (operands[2]) != 2)
+ {
+ rtx ra = gen_rtx_REG (Pmode, 2);
+
+ emit_move_insn (ra, operands[2]);
+ operands[2] = ra;
+ }
+ /* This is a hack -- we may have crystalized the function type too
+ early. */
+ cfun->machine->func_type = 0;
+ }"
+)
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; ??? Need to audit these splitters for Thumb-2. Why isn't normal
+;; conditional execution sufficient?
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "arm_comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_dup 0)
+ (match_operand 4 "" "")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 5) (match_dup 6))
+ (cond_exec (match_dup 7)
+ (set (match_dup 0) (match_dup 4)))]
+ "
+ {
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]),
+ operands[2], operands[3]);
+ enum rtx_code rc = GET_CODE (operands[1]);
+
+ operands[5] = gen_rtx_REG (mode, CC_REGNUM);
+ operands[6] = gen_rtx_COMPARE (mode, operands[2], operands[3]);
+ if (mode == CCFPmode || mode == CCFPEmode)
+ rc = reverse_condition_maybe_unordered (rc);
+ else
+ rc = reverse_condition (rc);
+
+ operands[7] = gen_rtx_fmt_ee (rc, VOIDmode, operands[5], const0_rtx);
+ }"
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "arm_comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_dup 0)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 5) (match_dup 6))
+ (cond_exec (match_op_dup 1 [(match_dup 5) (const_int 0)])
+ (set (match_dup 0) (match_dup 4)))]
+ "
+ {
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]),
+ operands[2], operands[3]);
+
+ operands[5] = gen_rtx_REG (mode, CC_REGNUM);
+ operands[6] = gen_rtx_COMPARE (mode, operands[2], operands[3]);
+ }"
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "arm_comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (cond_exec (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (set (match_dup 0) (match_dup 4)))
+ (cond_exec (match_dup 8)
+ (set (match_dup 0) (match_dup 5)))]
+ "
+ {
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]),
+ operands[2], operands[3]);
+ enum rtx_code rc = GET_CODE (operands[1]);
+
+ operands[6] = gen_rtx_REG (mode, CC_REGNUM);
+ operands[7] = gen_rtx_COMPARE (mode, operands[2], operands[3]);
+ if (mode == CCFPmode || mode == CCFPEmode)
+ rc = reverse_condition_maybe_unordered (rc);
+ else
+ rc = reverse_condition (rc);
+
+ operands[8] = gen_rtx_fmt_ee (rc, VOIDmode, operands[6], const0_rtx);
+ }"
+)
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM && reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (cond_exec (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (set (match_dup 0) (match_dup 4)))
+ (cond_exec (match_dup 8)
+ (set (match_dup 0) (not:SI (match_dup 5))))]
+ "
+ {
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]),
+ operands[2], operands[3]);
+ enum rtx_code rc = GET_CODE (operands[1]);
+
+ operands[6] = gen_rtx_REG (mode, CC_REGNUM);
+ operands[7] = gen_rtx_COMPARE (mode, operands[2], operands[3]);
+ if (mode == CCFPmode || mode == CCFPEmode)
+ rc = reverse_condition_maybe_unordered (rc);
+ else
+ rc = reverse_condition (rc);
+
+ operands[8] = gen_rtx_fmt_ee (rc, VOIDmode, operands[6], const0_rtx);
+ }"
+)
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ "TARGET_ARM"
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+ [(set_attr "conds" "use")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mvn")
+ (set_attr "length" "4,8")]
+)
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_ARM"
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "12")]
+)
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? The above patterns need auditing for Thumb-2
+
+;; Push multiple registers to the stack. Registers are in parallel (use ...)
+;; expressions. For simplicity, the first register is also in the unspec
+;; part.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")]
+ UNSPEC_PUSH_MULT))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT"
+ "*
+ {
+ int num_saves = XVECLEN (operands[2], 0);
+
+ /* For the StrongARM at least it is faster to
+ use STR to store only a single register.
+ In Thumb mode always use push, and the assmebler will pick
+ something approporiate. */
+ if (num_saves == 1 && TARGET_ARM)
+ output_asm_insn (\"str\\t%1, [%m0, #-4]!\", operands);
+ else
+ {
+ int i;
+ char pattern[100];
+
+ if (TARGET_ARM)
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ else
+ strcpy (pattern, \"push\\t{%1\");
+
+ for (i = 1; i < num_saves; i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern,
+ reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i), 0))]);
+ }
+
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ }
+
+ return \"\";
+ }"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "store4")]
+)
+
+(define_insn "stack_tie"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r")]
+ UNSPEC_PRLG_STK))]
+ ""
+ ""
+ [(set_attr "length" "0")]
+)
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")]
+ UNSPEC_PUSH_MULT))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "*
+ {
+ char pattern[100];
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+ }"
+ [(set_attr "type" "f_store")]
+)
+
+;; Special patterns for dealing with the constant pool
+
+;; APPLE LOCAL begin ARM compact switch tables
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN)]
+ "TARGET_EITHER"
+ "*
+ assemble_align (32);
+ return \"\";
+ "
+ [(set (attr "length") (const_int 0))]
+)
+
+(define_insn "align_8"
+ [(unspec_volatile [(const_int 0)] VUNSPEC_ALIGN8)]
+ "TARGET_EITHER"
+ "*
+ assemble_align (64);
+ return \"\";
+ "
+ [(set (attr "length") (const_int 0))]
+)
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] VUNSPEC_POOL_END)]
+ "TARGET_EITHER"
+ "*
+ making_const_table = FALSE;
+ return \"\";
+ "
+ [(set_attr "length" "0")]
+)
+;; APPLE LOCAL end ARM compact switch tables
+
+(define_insn "consttable_1"
+ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_1)]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ making_const_table = TRUE;
+ assemble_integer (operands[0], 1, BITS_PER_WORD, 1);
+ assemble_zeros (3);
+ return \"\";
+ "
+ [(set_attr "length" "4")]
+)
+
+(define_insn "consttable_2"
+ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_2)]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "*
+ making_const_table = TRUE;
+ assemble_integer (operands[0], 2, BITS_PER_WORD, 1);
+ assemble_zeros (2);
+ return \"\";
+ "
+ [(set_attr "length" "4")]
+)
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_4)]
+ "TARGET_EITHER"
+ "*
+ {
+ making_const_table = TRUE;
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
+ assemble_real (r, GET_MODE (operands[0]), BITS_PER_WORD);
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, BITS_PER_WORD, 1);
+ break;
+ }
+ return \"\";
+ }"
+ [(set_attr "length" "4")]
+)
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_8)]
+ "TARGET_EITHER"
+ "*
+ {
+ making_const_table = TRUE;
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
+ assemble_real (r, GET_MODE (operands[0]), BITS_PER_WORD);
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, BITS_PER_WORD, 1);
+ break;
+ }
+ return \"\";
+ }"
+ [(set_attr "length" "8")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_insn "consttable_16"
+ [(unspec_volatile [(match_operand 0 "" "")] VUNSPEC_POOL_16)]
+ "TARGET_EITHER"
+ "*
+ {
+ making_const_table = TRUE;
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[0]);
+ assemble_real (r, GET_MODE (operands[0]), BITS_PER_WORD);
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 16, BITS_PER_WORD, 1);
+ break;
+ }
+ return \"\";
+ }"
+ [(set_attr "length" "16")]
+)
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; Miscellaneous Thumb patterns
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand:SI 0 "register_operand" ""))
+ (use (label_ref (match_operand 1 "" "")))])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "
+ if (flag_pic)
+ {
+ /* Hopefully, CSE will eliminate this copy. */
+ rtx reg1 = copy_addr_to_reg (gen_rtx_LABEL_REF (Pmode, operands[1]));
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (reg2, operands[0], reg1));
+ operands[0] = reg2;
+ }
+ "
+)
+
+;; NB never uses BX.
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_insn "*thumb1_tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "mov\\t%|pc, %0"
+ [(set_attr "length" "2")]
+)
+
+;; V5 Instructions,
+
+(define_insn "clzsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (clz:SI (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch5"
+ "clz%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "insn" "clz")])
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_expand "ffssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ffs:SI (match_operand:SI 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch5"
+ "
+ {
+ rtx t1, t2, t3;
+
+ t1 = gen_reg_rtx (SImode);
+ t2 = gen_reg_rtx (SImode);
+ t3 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_negsi2 (t1, operands[1]));
+ emit_insn (gen_andsi3 (t2, operands[1], t1));
+ emit_insn (gen_clzsi2 (t3, t2));
+ emit_insn (gen_subsi3 (operands[0], GEN_INT (32), t3));
+ DONE;
+ }"
+)
+
+(define_expand "ctzsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ctz:SI (match_operand:SI 1 "s_register_operand" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch5"
+ "
+ {
+ rtx t1, t2, t3;
+
+ t1 = gen_reg_rtx (SImode);
+ t2 = gen_reg_rtx (SImode);
+ t3 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_negsi2 (t1, operands[1]));
+ emit_insn (gen_andsi3 (t2, operands[1], t1));
+ emit_insn (gen_clzsi2 (t3, t2));
+ emit_insn (gen_subsi3 (operands[0], GEN_INT (31), t3));
+ DONE;
+ }"
+)
+
+;; V5E instructions.
+
+(define_insn "prefetch"
+ [(prefetch (match_operand:SI 0 "address_operand" "p")
+ (match_operand:SI 1 "" "")
+ (match_operand:SI 2 "" ""))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && arm_arch5e"
+ "pld\\t%a0")
+
+;; General predication pattern
+
+(define_cond_exec
+ [(match_operator 0 "arm_comparison_operator"
+ [(match_operand 1 "cc_register" "")
+ (const_int 0)])]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT"
+ ""
+)
+
+;; APPLE LOCAL begin ARM compact switch tables
+(define_insn "prologue_use"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "")] UNSPEC_PROLOGUE_USE)]
+ ""
+ "%@ %0 needed for prologue"
+ [(set_attr "length" "0")]
+)
+;; APPLE LOCAL end ARM compact switch tables
+
+
+;; Patterns for exception handling
+
+(define_expand "eh_return"
+ [(use (match_operand 0 "general_operand" ""))]
+ "TARGET_EITHER"
+ "
+ {
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ if (TARGET_32BIT)
+ emit_insn (gen_arm_eh_return (operands[0]));
+ else
+ emit_insn (gen_thumb_eh_return (operands[0]));
+ DONE;
+ }"
+)
+
+;; We can't expand this before we know where the link register is stored.
+(define_insn_and_split "arm_eh_return"
+ [(unspec_volatile [(match_operand:SI 0 "s_register_operand" "r")]
+ VUNSPEC_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&r"))]
+ "TARGET_ARM"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "
+ {
+ arm_set_return_address (operands[0], operands[1]);
+ DONE;
+ }"
+)
+
+(define_insn_and_split "thumb_eh_return"
+ [(unspec_volatile [(match_operand:SI 0 "s_register_operand" "l")]
+ VUNSPEC_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&l"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_THUMB1"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "
+ {
+ thumb_set_return_address (operands[0], operands[1]);
+ DONE;
+ }"
+)
+
+;; APPLE LOCAL begin ARM 4382996 improve assignments of NE
+
+;; Handle ((x op y) != 0)
+(define_insn_and_split "*arm_binary_ne_0"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ne:SI (match_operator:SI 3 "binary_cc_noclobber_operator"
+ [(match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K")])
+ (const_int 0)))
+ (clobber (reg:CC_NOOV CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (match_op_dup:SI 3 [(match_dup 1) (match_dup 2)])
+ (const_int 0)))
+ (set (match_dup 0)
+ (match_op_dup:SI 3 [(match_dup 1) (match_dup 2)]))])
+ (set (match_dup 0)
+ (if_then_else:SI
+ (ne:SI (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (const_int 1) (match_dup 0)))]
+ ""
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+;; A special pattern for ADD, because compare_scc gets recognized first,
+;; preventing the above form from being tried.
+
+(define_insn_and_split "*arm_add_ne_0"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ne:SI (neg:SI (match_operand:SI 1 "s_register_operand" "r,r"))
+ (match_operand:SI 2 "arm_not_operand" "rI,K")))
+ (clobber (reg:CC_NOOV CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (plus:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 1) (match_dup 2)))])
+ (set (match_dup 0)
+ (if_then_else:SI
+ (ne:SI (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (const_int 1) (match_dup 0)))]
+ ""
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+;; A special pattern for MULT, since it requires early clobber semantics.
+
+(define_insn_and_split "*arm_mul_ne_0"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,r,r")
+ (ne:SI (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_not_operand" "%?r,0,I,K"))
+ (const_int 0)))
+ (clobber (reg:CC_NOOV CC_REGNUM))]
+ "TARGET_ARM"
+ "#"
+ "TARGET_ARM && reload_completed"
+ [(parallel [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (mult:SI (match_dup 2) (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0)
+ (mult:SI (match_dup 2) (match_dup 1)))])
+ (set (match_dup 0)
+ (if_then_else:SI
+ (ne:SI (reg:CC_NOOV CC_REGNUM) (const_int 0))
+ (const_int 1) (match_dup 0)))]
+ ""
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+;; APPLE LOCAL end ARM 4382996 improve assignments of NE
+
+;; TLS support
+
+(define_insn "load_tp_hard"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_TLS))]
+ "TARGET_HARD_TP"
+ "mrc%?\\tp15, 0, %0, c13, c0, 3\\t@ load_tp_hard"
+ [(set_attr "predicable" "yes")]
+)
+
+;; Doesn't clobber R1-R3. Must use r0 for the first operand.
+(define_insn "load_tp_soft"
+ [(set (reg:SI 0) (unspec:SI [(const_int 0)] UNSPEC_TLS))
+ (clobber (reg:SI LR_REGNUM))
+ (clobber (reg:SI IP_REGNUM))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_SOFT_TP"
+ "bl\\t__aeabi_read_tp\\t@ load_tp_soft"
+ [(set_attr "conds" "clob")]
+)
+
+;; APPLE LOCAL begin ARM builtin_trap
+
+;; Darwin support
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+ "trap")
+;; APPLE LOCAL end ARM builtin_trap
+
+;; APPLE LOCAL begin bswap UXTB16 support
+(define_expand "bswapsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (bswap:SI (match_operand:SI 1 "s_register_operand" "")))]
+ "TARGET_EITHER && arm_arch6"
+ ""
+)
+
+(define_insn "*arm_bswapsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (bswap:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_ARM && arm_arch6"
+ "rev%?\\t%0, %1"
+ [(set_attr "predicable" "yes")]
+)
+
+(define_insn "*thumb_bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (bswap:SI (match_operand:SI 1 "register_operand" "l")))]
+ "TARGET_THUMB && arm_arch6"
+ "rev\\t%0, %1"
+ [(set_attr "length" "2")]
+)
+
+(define_expand "bswapdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "")
+ (bswap:DI (match_operand:DI 1 "s_register_operand" "")))]
+ "TARGET_EITHER && arm_arch6"
+ ""
+)
+
+(define_insn "*arm_bswapdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (bswap:DI (match_operand:DI 1 "s_register_operand" "r")))]
+ "TARGET_ARM && arm_arch6"
+ "rev%?\\t%Q0, %R1\;rev%?\\t%R0, %Q1"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*thumb_bswapdi2"
+ [(set (match_operand:DI 0 "register_operand" "=&l")
+ (bswap:DI (match_operand:DI 1 "register_operand" "l")))]
+ "TARGET_THUMB && arm_arch6"
+ "rev\\t%Q0, %R1\;rev\\t%R0, %Q1"
+ [(set_attr "length" "4")]
+)
+
+(define_insn "uxtb16"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")] UNSPEC_UXTB16))]
+ "TARGET_ARM && arm_arch6"
+ "uxtb16%?\\t%0, %1, ror %2"
+ [(set_attr "predicable" "yes")]
+)
+;; APPLE LOCAL end bswap UXTB16 support
+
+;; Load the FPA co-processor patterns
+(include "fpa.md")
+;; Load the Maverick co-processor patterns
+(include "cirrus.md")
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+;; Vector bits common to IWMMXT and Neon
+(include "vec-common.md")
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; Load the Intel Wireless Multimedia Extension patterns
+(include "iwmmxt.md")
+;; Load the VFP co-processor patterns
+(include "vfp.md")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; Thumb-2 patterns
+(include "thumb2.md")
+;; APPLE LOCAL end v7 support. Merge from mainline
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+;; Neon patterns
+(include "neon.md")
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; APPLE LOCAL 6258536 atomic builtins
+(include "sync.md")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm.opt b/gcc-4.2.1-5666.3/gcc/config/arm/arm.opt
new file mode 100644
index 000000000..9b8b0d7a8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm.opt
@@ -0,0 +1,187 @@
+; Options for the ARM port of the compiler.
+
+; Copyright (C) 2005 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mabi=
+Target RejectNegative Joined Var(target_abi_name)
+Specify an ABI
+
+mabort-on-noreturn
+Target Report Mask(ABORT_NORETURN)
+Generate a call to abort if a noreturn function returns
+
+mapcs
+Target RejectNegative Mask(APCS_FRAME) MaskExists Undocumented
+
+mapcs-float
+Target Report Mask(APCS_FLOAT)
+Pass FP arguments in FP registers
+
+mapcs-frame
+Target Report Mask(APCS_FRAME)
+Generate APCS conformant stack frames
+
+mapcs-reentrant
+Target Report Mask(APCS_REENT)
+Generate re-entrant, PIC code
+
+mapcs-stack-check
+Target Report Mask(APCS_STACK) Undocumented
+
+march=
+Target RejectNegative Joined
+Specify the name of the target architecture
+
+; APPLE LOCAL begin 6150882 use thumb2 by default for v7
+marm
+Target RejectNegative VarExists Var(thumb_option, 0) Undocumented
+; APPLE LOCAL end 6150882 use thumb2 by default for v7
+
+mbig-endian
+Target Report RejectNegative Mask(BIG_END)
+Assume target CPU is configured as big endian
+
+mcallee-super-interworking
+Target Report Mask(CALLEE_INTERWORKING)
+Thumb: Assume non-static functions may be called from ARM code
+
+mcaller-super-interworking
+Target Report Mask(CALLER_INTERWORKING)
+Thumb: Assume function pointers may go to non-Thumb aware code
+
+mcirrus-fix-invalid-insns
+Target Report Mask(CIRRUS_FIX_INVALID_INSNS)
+Cirrus: Place NOPs to avoid invalid instruction combinations
+
+mcpu=
+Target RejectNegative Joined
+Specify the name of the target CPU
+
+mfloat-abi=
+Target RejectNegative Joined Var(target_float_abi_name)
+Specify if floating point hardware should be used
+
+mfp=
+Target RejectNegative Joined Undocumented Var(target_fpe_name)
+
+;; Now ignored.
+mfpe
+Target RejectNegative Mask(FPE) Undocumented
+
+mfpe=
+Target RejectNegative Joined Undocumented Var(target_fpe_name)
+
+mfpu=
+Target RejectNegative Joined Var(target_fpu_name)
+Specify the name of the target floating point hardware/format
+
+mhard-float
+Target RejectNegative
+Alias for -mfloat-abi=hard
+
+mlittle-endian
+Target Report RejectNegative InverseMask(BIG_END)
+Assume target CPU is configured as little endian
+
+; APPLE LOCAL begin mlong-branch for arm
+mlong-branch
+Target Mask(LONG_CALLS)
+Alias for -mlong-calls
+; APPLE LOCAL end mlong-branch for arm
+
+; APPLE LOCAL begin mlong-branch for arm
+mlong-calls
+Target Report Mask(LONG_CALLS) MaskExists
+Generate call insns as indirect calls, if necessary
+; APPLE LOCAL end mlong-branch for arm
+
+; APPLE LOCAL begin mlong-branch for arm
+mlongcall
+Target Mask(LONG_CALLS) MaskExists
+Alias for -mlong-calls
+; APPLE LOCAL end mlong-branch for arm
+
+mpic-register=
+Target RejectNegative Joined Var(arm_pic_register_string)
+Specify the register to be used for PIC addressing
+
+mpoke-function-name
+Target Report Mask(POKE_FUNCTION_NAME)
+Store function names in object code
+
+msched-prolog
+Target Report Mask(SCHED_PROLOG)
+Permit scheduling of a function's prologue sequence
+
+msingle-pic-base
+Target Report Mask(SINGLE_PIC_BASE)
+Do not load the PIC register in function prologues
+
+msoft-float
+Target RejectNegative
+Alias for -mfloat-abi=soft
+
+mstructure-size-boundary=
+Target RejectNegative Joined Var(structure_size_string)
+Specify the minimum bit alignment of structures
+
+; APPLE LOCAL begin 6150882 use thumb2 by default for v7
+mthumb
+Target Report Var(thumb_option) Init(-1)
+Compile for the Thumb not the ARM
+; APPLE LOCAL end 6150882 use thumb2 by default for v7
+
+; APPLE LOCAL begin ARM interworking
+mthumb-interwork
+Target Report Var(interwork_option) Init(-1)
+Support calls between Thumb and ARM instruction sets
+; APPLE LOCAL end ARM interworking
+
+mtp=
+Target RejectNegative Joined Var(target_thread_switch)
+Specify how to access the thread pointer
+
+mtpcs-frame
+Target Report Mask(TPCS_FRAME)
+Thumb: Generate (non-leaf) stack frames even if not needed
+
+mtpcs-leaf-frame
+Target Report Mask(TPCS_LEAF_FRAME)
+Thumb: Generate (leaf) stack frames even if not needed
+
+mtune=
+Target RejectNegative Joined
+Tune code for the given processor
+
+mwords-little-endian
+Target Report RejectNegative Mask(LITTLE_WORDS)
+Assume big endian bytes, little endian words
+
+; APPLE LOCAL begin 5946347 ms_struct support
+mms-bitfields
+Target Report Mask(MS_BITFIELD_LAYOUT)
+Use Microsoft structure layout
+; APPLE LOCAL end 5946347 ms_struct support
+; APPLE LOCAL begin v7 support. Merge from Codesourcery
+
+mvectorize-with-neon-quad
+Target Report Mask(NEON_VECTORIZE_QUAD)
+Use Neon quad-word (rather than double-word) registers for vectorization
+; APPLE LOCAL end v7 support. Merge from Codesourcery
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm1020e.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm1020e.md
new file mode 100644
index 000000000..32a5d95e9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm1020e.md
@@ -0,0 +1,388 @@
+;; ARM 1020E & ARM 1022E Pipeline Description
+;; Copyright (C) 2005 Free Software Foundation, Inc.
+;; Contributed by Richard Earnshaw (richard.earnshaw@arm.com)
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+;; These descriptions are based on the information contained in the
+;; ARM1020E Technical Reference Manual, Copyright (c) 2003 ARM
+;; Limited.
+;;
+
+;; This automaton provides a pipeline description for the ARM
+;; 1020E core.
+;;
+;; The model given here assumes that the condition for all conditional
+;; instructions is "true", i.e., that all of the instructions are
+;; actually executed.
+
+(define_automaton "arm1020e")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Pipelines
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; There are two pipelines:
+;;
+;; - An Arithmetic Logic Unit (ALU) pipeline.
+;;
+;; The ALU pipeline has fetch, issue, decode, execute, memory, and
+;; write stages. We only need to model the execute, memory and write
+;; stages.
+;;
+;; - A Load-Store Unit (LSU) pipeline.
+;;
+;; The LSU pipeline has decode, execute, memory, and write stages.
+;; We only model the execute, memory and write stages.
+
+(define_cpu_unit "1020a_e,1020a_m,1020a_w" "arm1020e")
+(define_cpu_unit "1020l_e,1020l_m,1020l_w" "arm1020e")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; ALU Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ALU instructions require three cycles to execute, and use the ALU
+;; pipeline in each of the three stages. The results are available
+;; after the execute stage stage has finished.
+;;
+;; If the destination register is the PC, the pipelines are stalled
+;; for several cycles. That case is not modeled here.
+
+;; ALU operations with no shifted operand
+(define_insn_reservation "1020alu_op" 1
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "alu"))
+ "1020a_e,1020a_m,1020a_w")
+
+;; ALU operations with a shift-by-constant operand
+(define_insn_reservation "1020alu_shift_op" 1
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "alu_shift"))
+ "1020a_e,1020a_m,1020a_w")
+
+;; ALU operations with a shift-by-register operand
+;; These really stall in the decoder, in order to read
+;; the shift value in a second cycle. Pretend we take two cycles in
+;; the execute stage.
+(define_insn_reservation "1020alu_shift_reg_op" 2
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "alu_shift_reg"))
+ "1020a_e*2,1020a_m,1020a_w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Multiplication Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Multiplication instructions loop in the execute stage until the
+;; instruction has been passed through the multiplier array enough
+;; times.
+
+;; The result of the "smul" and "smulw" instructions is not available
+;; until after the memory stage.
+(define_insn_reservation "1020mult1" 2
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "insn" "smulxy,smulwy"))
+ "1020a_e,1020a_m,1020a_w")
+
+;; The "smlaxy" and "smlawx" instructions require two iterations through
+;; the execute stage; the result is available immediately following
+;; the execute stage.
+(define_insn_reservation "1020mult2" 2
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "insn" "smlaxy,smlalxy,smlawx"))
+ "1020a_e*2,1020a_m,1020a_w")
+
+;; The "smlalxy", "mul", and "mla" instructions require two iterations
+;; through the execute stage; the result is not available until after
+;; the memory stage.
+(define_insn_reservation "1020mult3" 3
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "insn" "smlalxy,mul,mla"))
+ "1020a_e*2,1020a_m,1020a_w")
+
+;; The "muls" and "mlas" instructions loop in the execute stage for
+;; four iterations in order to set the flags. The value result is
+;; available after three iterations.
+(define_insn_reservation "1020mult4" 3
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "insn" "muls,mlas"))
+ "1020a_e*4,1020a_m,1020a_w")
+
+;; Long multiply instructions that produce two registers of
+;; output (such as umull) make their results available in two cycles;
+;; the least significant word is available before the most significant
+;; word. That fact is not modeled; instead, the instructions are
+;; described.as if the entire result was available at the end of the
+;; cycle in which both words are available.
+
+;; The "umull", "umlal", "smull", and "smlal" instructions all take
+;; three iterations through the execute cycle, and make their results
+;; available after the memory cycle.
+(define_insn_reservation "1020mult5" 4
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "insn" "umull,umlal,smull,smlal"))
+ "1020a_e*3,1020a_m,1020a_w")
+
+;; The "umulls", "umlals", "smulls", and "smlals" instructions loop in
+;; the execute stage for five iterations in order to set the flags.
+;; The value result is available after four iterations.
+(define_insn_reservation "1020mult6" 4
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "insn" "umulls,umlals,smulls,smlals"))
+ "1020a_e*5,1020a_m,1020a_w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Load/Store Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; The models for load/store instructions do not accurately describe
+;; the difference between operations with a base register writeback
+;; (such as "ldm!"). These models assume that all memory references
+;; hit in dcache.
+
+;; LSU instructions require six cycles to execute. They use the ALU
+;; pipeline in all but the 5th cycle, and the LSU pipeline in cycles
+;; three through six.
+;; Loads and stores which use a scaled register offset or scaled
+;; register pre-indexed addressing mode take three cycles EXCEPT for
+;; those that are base + offset with LSL of 0 or 2, or base - offset
+;; with LSL of zero. The remainder take 1 cycle to execute.
+;; For 4byte loads there is a bypass from the load stage
+
+(define_insn_reservation "1020load1_op" 2
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "load_byte,load1"))
+ "1020a_e+1020l_e,1020l_m,1020l_w")
+
+(define_insn_reservation "1020store1_op" 0
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "store1"))
+ "1020a_e+1020l_e,1020l_m,1020l_w")
+
+;; A load's result can be stored by an immediately following store
+(define_bypass 1 "1020load1_op" "1020store1_op" "arm_no_early_store_addr_dep")
+
+;; On a LDM/STM operation, the LSU pipeline iterates until all of the
+;; registers have been processed.
+;;
+;; The time it takes to load the data depends on whether or not the
+;; base address is 64-bit aligned; if it is not, an additional cycle
+;; is required. This model assumes that the address is always 64-bit
+;; aligned. Because the processor can load two registers per cycle,
+;; that assumption means that we use the same instruction reservations
+;; for loading 2k and 2k - 1 registers.
+;;
+;; The ALU pipeline is decoupled after the first cycle unless there is
+;; a register dependency; the dependency is cleared as soon as the LDM/STM
+;; has dealt with the corresponding register. So for example,
+;; stmia sp, {r0-r3}
+;; add r0, r0, #4
+;; will have one fewer stalls than
+;; stmia sp, {r0-r3}
+;; add r3, r3, #4
+;;
+;; As with ALU operations, if one of the destination registers is the
+;; PC, there are additional stalls; that is not modeled.
+
+(define_insn_reservation "1020load2_op" 2
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "load2"))
+ "1020a_e+1020l_e,1020l_m,1020l_w")
+
+(define_insn_reservation "1020store2_op" 0
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "store2"))
+ "1020a_e+1020l_e,1020l_m,1020l_w")
+
+(define_insn_reservation "1020load34_op" 3
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "load3,load4"))
+ "1020a_e+1020l_e,1020l_e+1020l_m,1020l_m,1020l_w")
+
+(define_insn_reservation "1020store34_op" 0
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "store3,store4"))
+ "1020a_e+1020l_e,1020l_e+1020l_m,1020l_m,1020l_w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Branch and Call Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Branch instructions are difficult to model accurately. The ARM
+;; core can predict most branches. If the branch is predicted
+;; correctly, and predicted early enough, the branch can be completely
+;; eliminated from the instruction stream. Some branches can
+;; therefore appear to require zero cycles to execute. We assume that
+;; all branches are predicted correctly, and that the latency is
+;; therefore the minimum value.
+
+(define_insn_reservation "1020branch_op" 0
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "branch"))
+ "1020a_e")
+
+;; The latency for a call is not predictable. Therefore, we use 32 as
+;; roughly equivalent to positive infinity.
+
+(define_insn_reservation "1020call_op" 32
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "type" "call"))
+ "1020a_e*32")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; VFP
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_cpu_unit "v10_fmac" "arm1020e")
+
+(define_cpu_unit "v10_ds" "arm1020e")
+
+(define_cpu_unit "v10_fmstat" "arm1020e")
+
+(define_cpu_unit "v10_ls1,v10_ls2,v10_ls3" "arm1020e")
+
+;; fmstat is a serializing instruction. It will stall the core until
+;; the mac and ds units have completed.
+(exclusion_set "v10_fmac,v10_ds" "v10_fmstat")
+
+(define_attr "vfp10" "yes,no"
+ (const (if_then_else (and (eq_attr "tune" "arm1020e,arm1022e")
+ (eq_attr "fpu" "vfp"))
+ (const_string "yes") (const_string "no"))))
+
+;; The VFP "type" attributes differ from those used in the FPA model.
+;; ffarith Fast floating point insns, e.g. abs, neg, cpy, cmp.
+;; farith Most arithmetic insns.
+;; fmul Double precision multiply.
+;; fdivs Single precision sqrt or division.
+;; fdivd Double precision sqrt or division.
+;; f_flag fmstat operation
+;; f_load Floating point load from memory.
+;; f_store Floating point store to memory.
+;; f_2_r Transfer vfp to arm reg.
+;; r_2_f Transfer arm to vfp reg.
+
+;; Note, no instruction can issue to the VFP if the core is stalled in the
+;; first execute state. We model this by using 1020a_e in the first cycle.
+(define_insn_reservation "v10_ffarith" 5
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "ffarith"))
+ "1020a_e+v10_fmac")
+
+(define_insn_reservation "v10_farith" 5
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "farith"))
+ "1020a_e+v10_fmac")
+
+(define_insn_reservation "v10_cvt" 5
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_cvt"))
+ "1020a_e+v10_fmac")
+
+(define_insn_reservation "v10_fmul" 6
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "fmul"))
+ "1020a_e+v10_fmac*2")
+
+(define_insn_reservation "v10_fdivs" 18
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "fdivs"))
+ "1020a_e+v10_ds*14")
+
+(define_insn_reservation "v10_fdivd" 32
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "fdivd"))
+ "1020a_e+v10_fmac+v10_ds*28")
+
+(define_insn_reservation "v10_floads" 4
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_loads"))
+ "1020a_e+1020l_e+v10_ls1,v10_ls2")
+
+;; We model a load of a double as needing all the vfp ls* stage in cycle 1.
+;; This gives the correct mix between single-and double loads where a flds
+;; followed by and fldd will stall for one cycle, but two back-to-back fldd
+;; insns stall for two cycles.
+(define_insn_reservation "v10_floadd" 5
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_loadd"))
+ "1020a_e+1020l_e+v10_ls1+v10_ls2+v10_ls3,v10_ls2+v10_ls3,v10_ls3")
+
+;; Moves to/from arm regs also use the load/store pipeline.
+
+(define_insn_reservation "v10_c2v" 4
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "r_2_f"))
+ "1020a_e+1020l_e+v10_ls1,v10_ls2")
+
+(define_insn_reservation "v10_fstores" 1
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_stores"))
+ "1020a_e+1020l_e+v10_ls1,v10_ls2")
+
+(define_insn_reservation "v10_fstored" 1
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_stored"))
+ "1020a_e+1020l_e+v10_ls1+v10_ls2+v10_ls3,v10_ls2+v10_ls3,v10_ls3")
+
+(define_insn_reservation "v10_v2c" 1
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_2_r"))
+ "1020a_e+1020l_e,1020l_m,1020l_w")
+
+(define_insn_reservation "v10_to_cpsr" 2
+ (and (eq_attr "vfp10" "yes")
+ (eq_attr "type" "f_flag"))
+ "1020a_e+v10_fmstat,1020a_e+1020l_e,1020l_m,1020l_w")
+
+;; VFP bypasses
+
+;; There are bypasses for most operations other than store
+
+(define_bypass 3
+ "v10_c2v,v10_floads"
+ "v10_ffarith,v10_farith,v10_fmul,v10_fdivs,v10_fdivd,v10_cvt")
+
+(define_bypass 4
+ "v10_floadd"
+ "v10_ffarith,v10_farith,v10_fmul,v10_fdivs,v10_fdivd")
+
+;; Arithmetic to other arithmetic saves a cycle due to forwarding
+(define_bypass 4
+ "v10_ffarith,v10_farith"
+ "v10_ffarith,v10_farith,v10_fmul,v10_fdivs,v10_fdivd")
+
+(define_bypass 5
+ "v10_fmul"
+ "v10_ffarith,v10_farith,v10_fmul,v10_fdivs,v10_fdivd")
+
+(define_bypass 17
+ "v10_fdivs"
+ "v10_ffarith,v10_farith,v10_fmul,v10_fdivs,v10_fdivd")
+
+(define_bypass 31
+ "v10_fdivd"
+ "v10_ffarith,v10_farith,v10_fmul,v10_fdivs,v10_fdivd")
+
+;; VFP anti-dependencies.
+
+;; There is one anti-dependence in the following case (not yet modelled):
+;; - After a store: one extra cycle for both fsts and fstd
+;; Note, back-to-back fstd instructions will overload the load/store datapath
+;; causing a two-cycle stall.
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm1026ejs.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm1026ejs.md
new file mode 100644
index 000000000..a2404ecea
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm1026ejs.md
@@ -0,0 +1,241 @@
+;; ARM 1026EJ-S Pipeline Description
+;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+;; These descriptions are based on the information contained in the
+;; ARM1026EJ-S Technical Reference Manual, Copyright (c) 2003 ARM
+;; Limited.
+;;
+
+;; This automaton provides a pipeline description for the ARM
+;; 1026EJ-S core.
+;;
+;; The model given here assumes that the condition for all conditional
+;; instructions is "true", i.e., that all of the instructions are
+;; actually executed.
+
+(define_automaton "arm1026ejs")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Pipelines
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; There are two pipelines:
+;;
+;; - An Arithmetic Logic Unit (ALU) pipeline.
+;;
+;; The ALU pipeline has fetch, issue, decode, execute, memory, and
+;; write stages. We only need to model the execute, memory and write
+;; stages.
+;;
+;; - A Load-Store Unit (LSU) pipeline.
+;;
+;; The LSU pipeline has decode, execute, memory, and write stages.
+;; We only model the execute, memory and write stages.
+
+(define_cpu_unit "a_e,a_m,a_w" "arm1026ejs")
+(define_cpu_unit "l_e,l_m,l_w" "arm1026ejs")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; ALU Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ALU instructions require three cycles to execute, and use the ALU
+;; pipeline in each of the three stages. The results are available
+;; after the execute stage stage has finished.
+;;
+;; If the destination register is the PC, the pipelines are stalled
+;; for several cycles. That case is not modeled here.
+
+;; ALU operations with no shifted operand
+(define_insn_reservation "alu_op" 1
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "alu"))
+ "a_e,a_m,a_w")
+
+;; ALU operations with a shift-by-constant operand
+(define_insn_reservation "alu_shift_op" 1
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "alu_shift"))
+ "a_e,a_m,a_w")
+
+;; ALU operations with a shift-by-register operand
+;; These really stall in the decoder, in order to read
+;; the shift value in a second cycle. Pretend we take two cycles in
+;; the execute stage.
+(define_insn_reservation "alu_shift_reg_op" 2
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "alu_shift_reg"))
+ "a_e*2,a_m,a_w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Multiplication Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Multiplication instructions loop in the execute stage until the
+;; instruction has been passed through the multiplier array enough
+;; times.
+
+;; The result of the "smul" and "smulw" instructions is not available
+;; until after the memory stage.
+(define_insn_reservation "mult1" 2
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "insn" "smulxy,smulwy"))
+ "a_e,a_m,a_w")
+
+;; The "smlaxy" and "smlawx" instructions require two iterations through
+;; the execute stage; the result is available immediately following
+;; the execute stage.
+(define_insn_reservation "mult2" 2
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "insn" "smlaxy,smlalxy,smlawx"))
+ "a_e*2,a_m,a_w")
+
+;; The "smlalxy", "mul", and "mla" instructions require two iterations
+;; through the execute stage; the result is not available until after
+;; the memory stage.
+(define_insn_reservation "mult3" 3
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "insn" "smlalxy,mul,mla"))
+ "a_e*2,a_m,a_w")
+
+;; The "muls" and "mlas" instructions loop in the execute stage for
+;; four iterations in order to set the flags. The value result is
+;; available after three iterations.
+(define_insn_reservation "mult4" 3
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "insn" "muls,mlas"))
+ "a_e*4,a_m,a_w")
+
+;; Long multiply instructions that produce two registers of
+;; output (such as umull) make their results available in two cycles;
+;; the least significant word is available before the most significant
+;; word. That fact is not modeled; instead, the instructions are
+;; described.as if the entire result was available at the end of the
+;; cycle in which both words are available.
+
+;; The "umull", "umlal", "smull", and "smlal" instructions all take
+;; three iterations through the execute cycle, and make their results
+;; available after the memory cycle.
+(define_insn_reservation "mult5" 4
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "insn" "umull,umlal,smull,smlal"))
+ "a_e*3,a_m,a_w")
+
+;; The "umulls", "umlals", "smulls", and "smlals" instructions loop in
+;; the execute stage for five iterations in order to set the flags.
+;; The value result is available after four iterations.
+(define_insn_reservation "mult6" 4
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "insn" "umulls,umlals,smulls,smlals"))
+ "a_e*5,a_m,a_w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Load/Store Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; The models for load/store instructions do not accurately describe
+;; the difference between operations with a base register writeback
+;; (such as "ldm!"). These models assume that all memory references
+;; hit in dcache.
+
+;; LSU instructions require six cycles to execute. They use the ALU
+;; pipeline in all but the 5th cycle, and the LSU pipeline in cycles
+;; three through six.
+;; Loads and stores which use a scaled register offset or scaled
+;; register pre-indexed addressing mode take three cycles EXCEPT for
+;; those that are base + offset with LSL of 0 or 2, or base - offset
+;; with LSL of zero. The remainder take 1 cycle to execute.
+;; For 4byte loads there is a bypass from the load stage
+
+(define_insn_reservation "load1_op" 2
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "load_byte,load1"))
+ "a_e+l_e,l_m,a_w+l_w")
+
+(define_insn_reservation "store1_op" 0
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "store1"))
+ "a_e+l_e,l_m,a_w+l_w")
+
+;; A load's result can be stored by an immediately following store
+(define_bypass 1 "load1_op" "store1_op" "arm_no_early_store_addr_dep")
+
+;; On a LDM/STM operation, the LSU pipeline iterates until all of the
+;; registers have been processed.
+;;
+;; The time it takes to load the data depends on whether or not the
+;; base address is 64-bit aligned; if it is not, an additional cycle
+;; is required. This model assumes that the address is always 64-bit
+;; aligned. Because the processor can load two registers per cycle,
+;; that assumption means that we use the same instruction reservations
+;; for loading 2k and 2k - 1 registers.
+;;
+;; The ALU pipeline is stalled until the completion of the last memory
+;; stage in the LSU pipeline. That is modeled by keeping the ALU
+;; execute stage busy until that point.
+;;
+;; As with ALU operations, if one of the destination registers is the
+;; PC, there are additional stalls; that is not modeled.
+
+(define_insn_reservation "load2_op" 2
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "load2"))
+ "a_e+l_e,l_m,a_w+l_w")
+
+(define_insn_reservation "store2_op" 0
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "store2"))
+ "a_e+l_e,l_m,a_w+l_w")
+
+(define_insn_reservation "load34_op" 3
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "load3,load4"))
+ "a_e+l_e,a_e+l_e+l_m,a_e+l_m,a_w+l_w")
+
+(define_insn_reservation "store34_op" 0
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "store3,store4"))
+ "a_e+l_e,a_e+l_e+l_m,a_e+l_m,a_w+l_w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Branch and Call Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Branch instructions are difficult to model accurately. The ARM
+;; core can predict most branches. If the branch is predicted
+;; correctly, and predicted early enough, the branch can be completely
+;; eliminated from the instruction stream. Some branches can
+;; therefore appear to require zero cycles to execute. We assume that
+;; all branches are predicted correctly, and that the latency is
+;; therefore the minimum value.
+
+(define_insn_reservation "branch_op" 0
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "branch"))
+ "nothing")
+
+;; The latency for a call is not predictable. Therefore, we use 32 as
+;; roughly equivalent to positive infinity.
+
+(define_insn_reservation "call_op" 32
+ (and (eq_attr "tune" "arm1026ejs")
+ (eq_attr "type" "call"))
+ "nothing")
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm1136jfs.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm1136jfs.md
new file mode 100644
index 000000000..5044a87e0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm1136jfs.md
@@ -0,0 +1,387 @@
+;; ARM 1136J[F]-S Pipeline Description
+;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+;; These descriptions are based on the information contained in the
+;; ARM1136JF-S Technical Reference Manual, Copyright (c) 2003 ARM
+;; Limited.
+;;
+
+;; This automaton provides a pipeline description for the ARM
+;; 1136J-S and 1136JF-S cores.
+;;
+;; The model given here assumes that the condition for all conditional
+;; instructions is "true", i.e., that all of the instructions are
+;; actually executed.
+
+(define_automaton "arm1136jfs")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Pipelines
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; There are three distinct pipelines (page 1-26 and following):
+;;
+;; - A 4-stage decode pipeline, shared by all three. It has fetch (1),
+;; fetch (2), decode, and issue stages. Since this is always involved,
+;; we do not model it in the scheduler.
+;;
+;; - A 4-stage ALU pipeline. It has shifter, ALU (main integer operations),
+;; and saturation stages. The fourth stage is writeback; see below.
+;;
+;; - A 4-stage multiply-accumulate pipeline. It has three stages, called
+;; MAC1 through MAC3, and a fourth writeback stage.
+;;
+;; The 4th-stage writeback is shared between the ALU and MAC pipelines,
+;; which operate in lockstep. Results from either pipeline will be
+;; moved into the writeback stage. Because the two pipelines operate
+;; in lockstep, we schedule them as a single "execute" pipeline.
+;;
+;; - A 4-stage LSU pipeline. It has address generation, data cache (1),
+;; data cache (2), and writeback stages. (Note that this pipeline,
+;; including the writeback stage, is independent from the ALU & LSU pipes.)
+
+(define_cpu_unit "e_1,e_2,e_3,e_wb" "arm1136jfs") ; ALU and MAC
+; e_1 = Sh/Mac1, e_2 = ALU/Mac2, e_3 = SAT/Mac3
+(define_cpu_unit "l_a,l_dc1,l_dc2,l_wb" "arm1136jfs") ; Load/Store
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; ALU Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ALU instructions require eight cycles to execute, and use the ALU
+;; pipeline in each of the eight stages. The results are available
+;; after the alu stage has finished.
+;;
+;; If the destination register is the PC, the pipelines are stalled
+;; for several cycles. That case is not modelled here.
+
+;; ALU operations with no shifted operand
+(define_insn_reservation "11_alu_op" 2
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "alu"))
+ "e_1,e_2,e_3,e_wb")
+
+;; ALU operations with a shift-by-constant operand
+(define_insn_reservation "11_alu_shift_op" 2
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "alu_shift"))
+ "e_1,e_2,e_3,e_wb")
+
+;; ALU operations with a shift-by-register operand
+;; These really stall in the decoder, in order to read
+;; the shift value in a second cycle. Pretend we take two cycles in
+;; the shift stage.
+(define_insn_reservation "11_alu_shift_reg_op" 3
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "alu_shift_reg"))
+ "e_1*2,e_2,e_3,e_wb")
+
+;; alu_ops can start sooner, if there is no shifter dependency
+(define_bypass 1 "11_alu_op,11_alu_shift_op"
+ "11_alu_op")
+(define_bypass 1 "11_alu_op,11_alu_shift_op"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 1 "11_alu_op,11_alu_shift_op"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 2 "11_alu_shift_reg_op"
+ "11_alu_op")
+(define_bypass 2 "11_alu_shift_reg_op"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 2 "11_alu_shift_reg_op"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+
+(define_bypass 1 "11_alu_op,11_alu_shift_op"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+(define_bypass 2 "11_alu_shift_reg_op"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Multiplication Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Multiplication instructions loop in the first two execute stages until
+;; the instruction has been passed through the multiplier array enough
+;; times.
+
+;; Multiply and multiply-accumulate results are available after four stages.
+(define_insn_reservation "11_mult1" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "mul,mla"))
+ "e_1*2,e_2,e_3,e_wb")
+
+;; The *S variants set the condition flags, which requires three more cycles.
+(define_insn_reservation "11_mult2" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "muls,mlas"))
+ "e_1*2,e_2,e_3,e_wb")
+
+(define_bypass 3 "11_mult1,11_mult2"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+(define_bypass 3 "11_mult1,11_mult2"
+ "11_alu_op")
+(define_bypass 3 "11_mult1,11_mult2"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 3 "11_mult1,11_mult2"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 3 "11_mult1,11_mult2"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+
+;; Signed and unsigned multiply long results are available across two cycles;
+;; the less significant word is available one cycle before the more significant
+;; word. Here we conservatively wait until both are available, which is
+;; after three iterations and the memory cycle. The same is also true of
+;; the two multiply-accumulate instructions.
+(define_insn_reservation "11_mult3" 5
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "smull,umull,smlal,umlal"))
+ "e_1*3,e_2,e_3,e_wb*2")
+
+;; The *S variants set the condition flags, which requires three more cycles.
+(define_insn_reservation "11_mult4" 5
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "smulls,umulls,smlals,umlals"))
+ "e_1*3,e_2,e_3,e_wb*2")
+
+(define_bypass 4 "11_mult3,11_mult4"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+(define_bypass 4 "11_mult3,11_mult4"
+ "11_alu_op")
+(define_bypass 4 "11_mult3,11_mult4"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 4 "11_mult3,11_mult4"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 4 "11_mult3,11_mult4"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+
+;; Various 16x16->32 multiplies and multiply-accumulates, using combinations
+;; of high and low halves of the argument registers. They take a single
+;; pass through the pipeline and make the result available after three
+;; cycles.
+(define_insn_reservation "11_mult5" 3
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "smulxy,smlaxy,smulwy,smlawy,smuad,smuadx,smlad,smladx,smusd,smusdx,smlsd,smlsdx"))
+ "e_1,e_2,e_3,e_wb")
+
+(define_bypass 2 "11_mult5"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+(define_bypass 2 "11_mult5"
+ "11_alu_op")
+(define_bypass 2 "11_mult5"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 2 "11_mult5"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 2 "11_mult5"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+
+;; The same idea, then the 32-bit result is added to a 64-bit quantity.
+(define_insn_reservation "11_mult6" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "smlalxy"))
+ "e_1*2,e_2,e_3,e_wb*2")
+
+;; Signed 32x32 multiply, then the most significant 32 bits are extracted
+;; and are available after the memory stage.
+(define_insn_reservation "11_mult7" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "insn" "smmul,smmulr"))
+ "e_1*2,e_2,e_3,e_wb")
+
+(define_bypass 3 "11_mult6,11_mult7"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+(define_bypass 3 "11_mult6,11_mult7"
+ "11_alu_op")
+(define_bypass 3 "11_mult6,11_mult7"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 3 "11_mult6,11_mult7"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 3 "11_mult6,11_mult7"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Branch Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; These vary greatly depending on their arguments and the results of
+;; stat prediction. Cycle count ranges from zero (unconditional branch,
+;; folded dynamic prediction) to seven (incorrect predictions, etc). We
+;; assume an optimal case for now, because the cost of a cache miss
+;; overwhelms the cost of everything else anyhow.
+
+(define_insn_reservation "11_branches" 0
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "branch"))
+ "nothing")
+
+;; Call latencies are not predictable. A semi-arbitrary very large
+;; number is used as "positive infinity" so that everything should be
+;; finished by the time of return.
+(define_insn_reservation "11_call" 32
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "call"))
+ "nothing")
+
+;; Branches are predicted. A correctly predicted branch will be no
+;; cost, but we're conservative here, and use the timings a
+;; late-register would give us.
+(define_bypass 1 "11_alu_op,11_alu_shift_op"
+ "11_branches")
+(define_bypass 2 "11_alu_shift_reg_op"
+ "11_branches")
+;; APPLE LOCAL 6930582 load latencies
+(define_bypass 3 "11_load1,11_load2"
+ "11_branches")
+;; APPLE LOCAL 6930582 load latencies
+(define_bypass 4 "11_load34"
+ "11_branches")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Load/Store Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; The models for load/store instructions do not accurately describe
+;; the difference between operations with a base register writeback.
+;; These models assume that all memory references hit in dcache. Also,
+;; if the PC is one of the registers involved, there are additional stalls
+;; not modelled here. Addressing modes are also not modelled.
+
+;; APPLE LOCAL 6930582 load latencies
+(define_insn_reservation "11_load1" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "load1"))
+ "l_a+e_1,l_dc1,l_dc2,l_wb")
+
+;; Load byte results are not available until the writeback stage, where
+;; the correct byte is extracted.
+
+(define_insn_reservation "11_loadb" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "load_byte"))
+ "l_a+e_1,l_dc1,l_dc2,l_wb")
+
+(define_insn_reservation "11_store1" 0
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "store1"))
+ "l_a+e_1,l_dc1,l_dc2,l_wb")
+
+;; Load/store double words into adjacent registers. The timing and
+;; latencies are different depending on whether the address is 64-bit
+;; aligned. This model assumes that it is.
+;; APPLE LOCAL 6930582 load latencies
+(define_insn_reservation "11_load2" 4
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "load2"))
+ "l_a+e_1,l_dc1,l_dc2,l_wb")
+
+(define_insn_reservation "11_store2" 0
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "store2"))
+ "l_a+e_1,l_dc1,l_dc2,l_wb")
+
+;; Load/store multiple registers. Two registers are stored per cycle.
+;; Actual timing depends on how many registers are affected, so we
+;; optimistically schedule a low latency.
+;; APPLE LOCAL 6930582 load latencies
+(define_insn_reservation "11_load34" 5
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "load3,load4"))
+ "l_a+e_1,l_dc1*2,l_dc2,l_wb")
+
+(define_insn_reservation "11_store34" 0
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+ (eq_attr "type" "store3,store4"))
+ "l_a+e_1,l_dc1*2,l_dc2,l_wb")
+
+;; A store can start immediately after an alu op, if that alu op does
+;; not provide part of the address to access.
+(define_bypass 1 "11_alu_op,11_alu_shift_op"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+(define_bypass 2 "11_alu_shift_reg_op"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+
+;; An alu op can start sooner after a load, if that alu op does not
+;; have an early register dependency on the load
+;; APPLE LOCAL begin 6930582 load latencies
+(define_bypass 3 "11_load1"
+ "11_alu_op")
+(define_bypass 3 "11_load1"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 3 "11_load1"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+;; APPLE LOCAL end 6930582 load latencies
+
+(define_bypass 3 "11_loadb"
+ "11_alu_op")
+(define_bypass 3 "11_loadb"
+ "11_alu_shift_op"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 3 "11_loadb"
+ "11_alu_shift_reg_op"
+ "arm_no_early_alu_shift_dep")
+
+;; A mul op can start sooner after a load, if that mul op does not
+;; have an early multiply dependency
+;; APPLE LOCAL 6930582 load latencies
+(define_bypass 3 "11_load1"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+;; APPLE LOCAL 6930582 load latencies
+(define_bypass 4 "11_load34"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+(define_bypass 3 "11_loadb"
+ "11_mult1,11_mult2,11_mult3,11_mult4,11_mult5,11_mult6,11_mult7"
+ "arm_no_early_mul_dep")
+
+;; A store can start sooner after a load, if that load does not
+;; produce part of the address to access
+;; APPLE LOCAL 6930582 load latencies
+(define_bypass 3 "11_load1"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
+(define_bypass 3 "11_loadb"
+ "11_store1"
+ "arm_no_early_store_addr_dep")
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm926ejs.md b/gcc-4.2.1-5666.3/gcc/config/arm/arm926ejs.md
new file mode 100644
index 000000000..244e3a91c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm926ejs.md
@@ -0,0 +1,188 @@
+;; ARM 926EJ-S Pipeline Description
+;; Copyright (C) 2003 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+;; These descriptions are based on the information contained in the
+;; ARM926EJ-S Technical Reference Manual, Copyright (c) 2002 ARM
+;; Limited.
+;;
+
+;; This automaton provides a pipeline description for the ARM
+;; 926EJ-S core.
+;;
+;; The model given here assumes that the condition for all conditional
+;; instructions is "true", i.e., that all of the instructions are
+;; actually executed.
+
+(define_automaton "arm926ejs")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Pipelines
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; There is a single pipeline
+;;
+;; The ALU pipeline has fetch, decode, execute, memory, and
+;; write stages. We only need to model the execute, memory and write
+;; stages.
+
+(define_cpu_unit "e,m,w" "arm926ejs")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; ALU Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ALU instructions require three cycles to execute, and use the ALU
+;; pipeline in each of the three stages. The results are available
+;; after the execute stage stage has finished.
+;;
+;; If the destination register is the PC, the pipelines are stalled
+;; for several cycles. That case is not modeled here.
+
+;; ALU operations with no shifted operand
+(define_insn_reservation "9_alu_op" 1
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "alu,alu_shift"))
+ "e,m,w")
+
+;; ALU operations with a shift-by-register operand
+;; These really stall in the decoder, in order to read
+;; the shift value in a second cycle. Pretend we take two cycles in
+;; the execute stage.
+(define_insn_reservation "9_alu_shift_reg_op" 2
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "alu_shift_reg"))
+ "e*2,m,w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Multiplication Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Multiplication instructions loop in the execute stage until the
+;; instruction has been passed through the multiplier array enough
+;; times. Multiply operations occur in both the execute and memory
+;; stages of the pipeline
+
+(define_insn_reservation "9_mult1" 3
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "insn" "smlalxy,mul,mla"))
+ "e*2,m,w")
+
+(define_insn_reservation "9_mult2" 4
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "insn" "muls,mlas"))
+ "e*3,m,w")
+
+(define_insn_reservation "9_mult3" 4
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "insn" "umull,umlal,smull,smlal"))
+ "e*3,m,w")
+
+(define_insn_reservation "9_mult4" 5
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "insn" "umulls,umlals,smulls,smlals"))
+ "e*4,m,w")
+
+(define_insn_reservation "9_mult5" 2
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "insn" "smulxy,smlaxy,smlawx"))
+ "e,m,w")
+
+(define_insn_reservation "9_mult6" 3
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "insn" "smlalxy"))
+ "e*2,m,w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Load/Store Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; The models for load/store instructions do not accurately describe
+;; the difference between operations with a base register writeback
+;; (such as "ldm!"). These models assume that all memory references
+;; hit in dcache.
+
+;; Loads with a shifted offset take 3 cycles, and are (a) probably the
+;; most common and (b) the pessimistic assumption will lead to fewer stalls.
+(define_insn_reservation "9_load1_op" 3
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "load1,load_byte"))
+ "e*2,m,w")
+
+(define_insn_reservation "9_store1_op" 0
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "store1"))
+ "e,m,w")
+
+;; multiple word loads and stores
+(define_insn_reservation "9_load2_op" 3
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "load2"))
+ "e,m*2,w")
+
+(define_insn_reservation "9_load3_op" 4
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "load3"))
+ "e,m*3,w")
+
+(define_insn_reservation "9_load4_op" 5
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "load4"))
+ "e,m*4,w")
+
+(define_insn_reservation "9_store2_op" 0
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "store2"))
+ "e,m*2,w")
+
+(define_insn_reservation "9_store3_op" 0
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "store3"))
+ "e,m*3,w")
+
+(define_insn_reservation "9_store4_op" 0
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "store4"))
+ "e,m*4,w")
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Branch and Call Instructions
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; Branch instructions are difficult to model accurately. The ARM
+;; core can predict most branches. If the branch is predicted
+;; correctly, and predicted early enough, the branch can be completely
+;; eliminated from the instruction stream. Some branches can
+;; therefore appear to require zero cycles to execute. We assume that
+;; all branches are predicted correctly, and that the latency is
+;; therefore the minimum value.
+
+(define_insn_reservation "9_branch_op" 0
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "branch"))
+ "nothing")
+
+;; The latency for a call is not predictable. Therefore, we use 32 as
+;; roughly equivalent to positive infinity.
+
+(define_insn_reservation "9_call_op" 32
+ (and (eq_attr "tune" "arm926ejs")
+ (eq_attr "type" "call"))
+ "nothing")
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/arm_neon.h b/gcc-4.2.1-5666.3/gcc/config/arm/arm_neon.h
new file mode 100644
index 000000000..a170b798b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/arm_neon.h
@@ -0,0 +1,12180 @@
+/* APPLE LOCAL file v7 support. Merge from Codesourcery */
+/* ARM NEON intrinsics include file. This file is generated automatically
+ using neon-gen.ml. Please do not edit manually.
+
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+#ifndef _GCC_ARM_NEON_H
+#define _GCC_ARM_NEON_H 1
+
+#ifndef __ARM_NEON__
+#error You must enable NEON instructions (e.g. -mfloat-abi=softfp -mfpu=neon) to use arm_neon.h
+#else
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_si int32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_di int64x1_t;
+typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_poly8 poly8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_poly16 poly16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_uqi uint8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_uhi uint16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_usi uint32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_udi uint64x1_t;
+typedef __builtin_neon_qi int8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_hi int16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_si int32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_di int64x2_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_sf float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_poly8 poly8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_poly16 poly16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_uqi uint8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_uhi uint16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_usi uint32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_udi uint64x2_t __attribute__ ((__vector_size__ (16)));
+
+typedef __builtin_neon_sf float32_t;
+typedef __builtin_neon_poly8 poly8_t;
+typedef __builtin_neon_poly16 poly16_t;
+
+typedef struct int8x8x2_t
+{
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float32x2x2_t
+{
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+ float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct poly8x8x2_t
+{
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+typedef struct int8x8x3_t
+{
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float32x2x3_t
+{
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+ float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct poly8x8x3_t
+{
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+typedef struct int8x8x4_t
+{
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float32x2x4_t
+{
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+ float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct poly8x8x4_t
+{
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vadddi (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vaddv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddv2di (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vaddv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddwv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddwv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddwv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 2);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqadddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqaddv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmulv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmulv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmulv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmulv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmulv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmulv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmulv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmulv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmulv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmulv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmulv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmulv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 4);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmullv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmullv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmullv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmullv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmullv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlalv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlslv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlslv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlslv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlslv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlslv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlslv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vsubdi (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vsubv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubv2di (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vsubv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsublv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsublv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsublv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsublv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsublv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsublv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubwv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubwv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubwv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqsubdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqsubv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 2);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vceqv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vceqv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vceqv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vceqv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 4);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcage_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcageq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcale_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcagt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcagtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcalt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vtstv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vtstv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vtstv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vtstv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vtstv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vtstv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vabdv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vabdv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vabdv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vabdv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vabd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vabdv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vabd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vabdv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vabd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vabdv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabdq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vabdv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vabdv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vabdv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vabdlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vabdlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vabav8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vabav4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vabav2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vabav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vabav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vabav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vabav16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabav8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabav4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vabav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabalv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vabalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vabalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmaxv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmaxv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmaxv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmaxv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmaxv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmaxv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmaxv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmaxv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmaxv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmaxv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmaxv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vminv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vminv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vminv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vminv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vminv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vminv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vminv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vminv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vminv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vminv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vminv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpaddl_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vpaddlv8qi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpaddl_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vpaddlv4hi (__a, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddl_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vpaddlv2si (__a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpaddl_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vpaddlv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpaddl_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vpaddlv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpaddl_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vpaddlv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddlq_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vpaddlv16qi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddlq_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vpaddlv8hi (__a, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddlq_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vpaddlv4si (__a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddlq_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vpaddlv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddlq_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vpaddlv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddlq_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vpaddlv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadal_s8 (int16x4_t __a, int8x8_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpadalv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadal_s16 (int32x2_t __a, int16x4_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpadalv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpadal_s32 (int64x1_t __a, int32x2_t __b)
+{
+ return (int64x1_t)__builtin_neon_vpadalv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadal_u8 (uint16x4_t __a, uint8x8_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpadalv8qi ((int16x4_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadal_u16 (uint32x2_t __a, uint16x4_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpadalv4hi ((int32x2_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpadal_u32 (uint64x1_t __a, uint32x2_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vpadalv2si ((int64x1_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpadalq_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vpadalv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpadalq_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t)__builtin_neon_vpadalv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpadalq_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t)__builtin_neon_vpadalv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpadalq_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vpadalv16qi ((int16x8_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpadalq_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vpadalv8hi ((int32x4_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpadalq_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vpadalv4si ((int64x2_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpmaxv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpmaxv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpmaxv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpmaxv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpminv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpminv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpminv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpminv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecps_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshldi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 3);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshldi (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 3);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 3);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 2);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshl_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshl_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshl_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vqshl_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshl_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshl_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshl_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshl_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vqshl_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vqshl_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vqshl_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vqshl_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshl_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshl_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshl_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshl_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshll_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshll_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshll_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshll_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshll_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshll_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 2);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 2);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 2);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 2);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 2);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 2);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsri_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsri_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsri_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vsri_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsri_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsri_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsri_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsri_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsri_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsriq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsli_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsli_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsli_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vsli_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsli_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsli_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsli_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsli_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsli_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsliq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsliq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vabsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vabsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vabsv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vabsv2sf (__a, 5);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vabsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vabsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vabsv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vabsv4sf (__a, 5);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqabsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqabsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqabsv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqabsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqabsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqabsv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vnegv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vnegv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vnegv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vneg_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vnegv2sf (__a, 5);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vnegv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vnegv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vnegv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vnegq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vnegv4sf (__a, 5);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqnegv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqnegv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqnegv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqnegv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqnegv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqnegv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmvnv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmvnv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmvnv2si (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmvnv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmvnv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vmvnv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmvnv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmvnv4si (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmvnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmvnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcls_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcls_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcls_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclsv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclsv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vclz_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclzv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vclz_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclzv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vclz_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclzv2si (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclz_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclz_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclz_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclzq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclzv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclzq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclzv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclzq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclzv4si (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclzq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclzq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclzq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcnt_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcntv8qi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcnt_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcnt_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcntq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vcntv16qi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcntq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcntq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 4);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecpe_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrecpev2sf (__a, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrecpe_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpeq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrecpev4sf (__a, 5);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrecpeq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrte_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsqrte_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a, 5);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_lanev2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanedi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_lanev4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanev2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vset_lane_s8 (int8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vset_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vset_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vset_lanev2sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vset_lane_u8 (uint8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vset_lane_u16 (uint16_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vset_lane_u32 (uint32_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vset_lane_p8 (poly8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vset_lane_p16 (poly16_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vset_lane_s64 (int64_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vset_lane_u64 (uint64_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vset_lanev4sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_p8 (poly8_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_p16 (poly16_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcreate_s8 (uint64_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcreate_s16 (uint64_t __a)
+{
+ return (int16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcreate_s32 (uint64_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vcreate_s64 (uint64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcreate_f32 (uint64_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcreatev2sf ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcreate_u8 (uint64_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcreate_u16 (uint64_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcreate_u32 (uint64_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcreate_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcreate_p8 (uint64_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vcreate_p16 (uint64_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_n_s8 (int8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_n_s16 (int16_t __a)
+{
+ return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_n_s32 (int32_t __a)
+{
+ return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_n_f32 (float32_t __a)
+{
+ return (float32x2_t)__builtin_neon_vdup_nv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_n_s64 (int64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_n_s8 (int8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_n_s16 (int16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_n_s32 (int32_t __a)
+{
+ return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_n_f32 (float32_t __a)
+{
+ return (float32x4_t)__builtin_neon_vdup_nv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_n_s64 (int64_t __a)
+{
+ return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmov_n_s8 (int8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmov_n_s16 (int16_t __a)
+{
+ return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmov_n_s32 (int32_t __a)
+{
+ return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmov_n_f32 (float32_t __a)
+{
+ return (float32x2_t)__builtin_neon_vdup_nv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmov_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmov_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmov_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmov_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vmov_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vmov_n_s64 (int64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vmov_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovq_n_s8 (int8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovq_n_s16 (int16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovq_n_s32 (int32_t __a)
+{
+ return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmovq_n_f32 (float32_t __a)
+{
+ return (float32x4_t)__builtin_neon_vdup_nv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmovq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmovq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovq_n_s64 (int64_t __a)
+{
+ return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vdup_lanev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vdup_lanev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vdup_lanev2si (__a, __b);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vdup_lanev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vdup_lanev2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vdup_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vdup_lanev16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vdup_lanev8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vdup_lanev4si (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vdup_lanev4sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vdup_lanev4si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x16_t)__builtin_neon_vcombinev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x8_t)__builtin_neon_vcombinev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x4_t)__builtin_neon_vcombinev2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x4_t)__builtin_neon_vcombinev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcombinev2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vcombinedi ((int64x1_t) __a, (int64x1_t) __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_high_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_highv16qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_high_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_highv8hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_high_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_highv4si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_high_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_highv2di (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_high_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_highv4sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_high_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_high_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_high_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_highv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_high_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_high_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_high_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_low_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_lowv16qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_low_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_lowv8hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_low_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_lowv4si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_low_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_lowv2di (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_low_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_lowv4sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_low_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_low_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_low_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_lowv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_low_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_low_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_low_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcvtv2sf (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcvtv2sf (__a, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vcvtv4sf (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vcvtv4sf (__a, 0);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_n_s32_f32 (float32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_n_f32_s32 (int32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvt_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvt_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_n_u32_f32 (float32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvt_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvt_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmovnv8hi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmovnv4si (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmovnv2di (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqmovnv8hi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqmovnv4si (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqmovnv2di (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovnv2di ((int64x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovun_s16 (int16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovun_s32 (int32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovunv4si (__a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovun_s64 (int64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovunv2di (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_s8 (int8x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmovlv8qi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_s16 (int16x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmovlv4hi (__a, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_s32 (int32x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vmovlv2si (__a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_u8 (uint8x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmovlv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_u16 (uint16x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmovlv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_u32 (uint32x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vmovlv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl1_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vtbl1v8qi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl1_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl1_p8 (poly8x8_t __a, uint8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl2_s8 (int8x8x2_t __a, int8x8_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl2_u8 (uint8x8x2_t __a, uint8x8_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl2_p8 (poly8x8x2_t __a, uint8x8_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl3_s8 (int8x8x3_t __a, int8x8_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl3_u8 (uint8x8x3_t __a, uint8x8_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl3_p8 (poly8x8x3_t __a, uint8x8_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl4_s8 (int8x8x4_t __a, int8x8_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl4_u8 (uint8x8x4_t __a, uint8x8_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl4_p8 (poly8x8x4_t __a, uint8x8_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx1_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vtbx1v8qi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx1_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx1_p8 (poly8x8_t __a, poly8x8_t __b, uint8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx2_s8 (int8x8_t __a, int8x8x2_t __b, int8x8_t __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx2v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx2_u8 (uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx2_p8 (poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx3_s8 (int8x8_t __a, int8x8x3_t __b, int8x8_t __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx3v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx3_u8 (uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx3_p8 (poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx4_s8 (int8x8_t __a, int8x8x4_t __b, int8x8_t __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx4v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx4_u8 (uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx4_p8 (poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d, 5);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlal_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlal_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlal_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlal_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d, 5);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlsl_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlsl_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlsl_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlsl_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmull_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vmull_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmull_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vmull_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmul_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmul_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, __b, 5);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmul_nv8hi ((int16x8_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmul_nv4si ((int32x4_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmull_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmull_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 3);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 3);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 3);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlal_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlal_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsl_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlsl_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vext_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vextv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vext_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vextv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vext_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vextv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vext_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vext_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vextv2sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vext_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vext_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vext_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vextv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vext_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vextdi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vext_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vextv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vextq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vextv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vextq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vextv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vextq_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vextv2di (__a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vextq_f32 (float32x4_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vextv4sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vextq_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vextq_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vextq_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vextv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vextq_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vextq_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vextq_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vrev64v8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vrev64v4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vrev64v2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrev64v2sf (__a, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vrev64v8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vrev64v4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrev64v2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vrev64v8qi ((int8x8_t) __a, 4);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vrev64v4hi ((int16x4_t) __a, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vrev64v16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vrev64v8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vrev64v4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrev64v4sf (__a, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vrev64v16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vrev64v8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrev64v4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vrev64v16qi ((int8x16_t) __a, 4);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vrev64v8hi ((int16x8_t) __a, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vrev32v8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vrev32v4hi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vrev32v8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vrev32v4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vrev32v8qi ((int8x8_t) __a, 4);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vrev32v4hi ((int16x4_t) __a, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vrev32v16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vrev32v8hi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vrev32v16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vrev32v8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vrev32v16qi ((int8x16_t) __a, 4);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vrev32v8hi ((int16x8_t) __a, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vrev16v8qi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vrev16v8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vrev16v8qi ((int8x8_t) __a, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vrev16v16qi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vrev16v16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vrev16v16qi ((int8x16_t) __a, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
+{
+ return (int64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vbslv2sf ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
+{
+ return (uint64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, (int64x1_t) __b, (int64x1_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
+{
+ return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vbslv4sf ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
+{
+ return (poly8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
+{
+ return (poly16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vtrn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+ __builtin_neon_vtrnv8qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vtrn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+ __builtin_neon_vtrnv4hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vtrn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+ __builtin_neon_vtrnv2si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vtrn_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+ __builtin_neon_vtrnv2sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vtrn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+ __builtin_neon_vtrnv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vtrn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+ __builtin_neon_vtrnv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vtrn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+ __builtin_neon_vtrnv2si ((int32x2_t *) &__rv.val[0], (int32x2_t) __a, (int32x2_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vtrn_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+ __builtin_neon_vtrnv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vtrn_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+ __builtin_neon_vtrnv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+ __builtin_neon_vtrnv16qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+ __builtin_neon_vtrnv8hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+ __builtin_neon_vtrnv4si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+ __builtin_neon_vtrnv4sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+ __builtin_neon_vtrnv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+ __builtin_neon_vtrnv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+ __builtin_neon_vtrnv4si ((int32x4_t *) &__rv.val[0], (int32x4_t) __a, (int32x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+ __builtin_neon_vtrnv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+ __builtin_neon_vtrnv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vzip_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+ __builtin_neon_vzipv8qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vzip_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+ __builtin_neon_vzipv4hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vzip_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+ __builtin_neon_vzipv2si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vzip_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+ __builtin_neon_vzipv2sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vzip_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+ __builtin_neon_vzipv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vzip_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+ __builtin_neon_vzipv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vzip_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+ __builtin_neon_vzipv2si ((int32x2_t *) &__rv.val[0], (int32x2_t) __a, (int32x2_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vzip_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+ __builtin_neon_vzipv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vzip_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+ __builtin_neon_vzipv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vzipq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+ __builtin_neon_vzipv16qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vzipq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+ __builtin_neon_vzipv8hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vzipq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+ __builtin_neon_vzipv4si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vzipq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+ __builtin_neon_vzipv4sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vzipq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+ __builtin_neon_vzipv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vzipq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+ __builtin_neon_vzipv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vzipq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+ __builtin_neon_vzipv4si ((int32x4_t *) &__rv.val[0], (int32x4_t) __a, (int32x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vzipq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+ __builtin_neon_vzipv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vzipq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+ __builtin_neon_vzipv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vuzp_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+ __builtin_neon_vuzpv8qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vuzp_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+ __builtin_neon_vuzpv4hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vuzp_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+ __builtin_neon_vuzpv2si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vuzp_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+ __builtin_neon_vuzpv2sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vuzp_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+ __builtin_neon_vuzpv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vuzp_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+ __builtin_neon_vuzpv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vuzp_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+ __builtin_neon_vuzpv2si ((int32x2_t *) &__rv.val[0], (int32x2_t) __a, (int32x2_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vuzp_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+ __builtin_neon_vuzpv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vuzp_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+ __builtin_neon_vuzpv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+ __builtin_neon_vuzpv16qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+ __builtin_neon_vuzpv8hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+ __builtin_neon_vuzpv4si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+ __builtin_neon_vuzpv4sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+ __builtin_neon_vuzpv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+ __builtin_neon_vuzpv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+ __builtin_neon_vuzpv4si ((int32x4_t *) &__rv.val[0], (int32x4_t) __a, (int32x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+ __builtin_neon_vuzpv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+ __builtin_neon_vuzpv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1v2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1v4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_lane_s16 (const int16_t * __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_lane_s32 (const int32_t * __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vld1_lanev2sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_lane_u8 (const uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_lane_u16 (const uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_lane_u32 (const uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_lane_p8 (const poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_lane_u64 (const uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_s8 (const int8_t * __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_s16 (const int16_t * __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_s32 (const int32_t * __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_f32 (const float32_t * __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vld1_lanev4sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_u8 (const uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_u16 (const uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_u32 (const uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_p8 (const poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_u64 (const uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_dup_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_dup_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_dup_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_dup_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1_dupv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_dup_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1_dupv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s8 (int8_t * __a, int8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s16 (int16_t * __a, int16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s32 (int32_t * __a, int32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s64 (int64_t * __a, int64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f32 (float32_t * __a, float32x2_t __b)
+{
+ __builtin_neon_vst1v2sf (__a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u8 (uint8_t * __a, uint8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u16 (uint16_t * __a, uint16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u32 (uint32_t * __a, uint32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, (int32x2_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u64 (uint64_t * __a, uint64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, (int64x1_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p8 (poly8_t * __a, poly8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p16 (poly16_t * __a, poly16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s8 (int8_t * __a, int8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s16 (int16_t * __a, int16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s32 (int32_t * __a, int32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s64 (int64_t * __a, int64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f32 (float32_t * __a, float32x4_t __b)
+{
+ __builtin_neon_vst1v4sf (__a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u8 (uint8_t * __a, uint8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u16 (uint16_t * __a, uint16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u32 (uint32_t * __a, uint32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, (int32x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u64 (uint64_t * __a, uint64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p8 (poly8_t * __a, poly8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p16 (poly16_t * __a, poly16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s8 (int8_t * __a, int8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s16 (int16_t * __a, int16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s32 (int32_t * __a, int32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_f32 (float32_t * __a, float32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2sf (__a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u8 (uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u16 (uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u32 (uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p8 (poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u64 (uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s8 (int8_t * __a, int8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s16 (int16_t * __a, int16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s32 (int32_t * __a, int32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_f32 (float32_t * __a, float32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4sf (__a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u8 (uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u16 (uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u32 (uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p8 (poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u64 (uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vld2q_s8 (const int8_t * __a)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_s16 (const int16_t * __a)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_s32 (const int32_t * __a)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_f32 (const float32_t * __a)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vld2q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vld2q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_s8 (const int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_s16 (const int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_s32 (const int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_f32 (const float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2sf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_u8 (const uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_u16 (const uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_u32 (const uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_p8 (const poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_p16 (const poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_s16 (const int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_s32 (const int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_f32 (const float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4sf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_u16 (const uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_u32 (const uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_p16 (const poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s8 (int8_t * __a, int8x8x2_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s16 (int16_t * __a, int16x4x2_t __b)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s32 (int32_t * __a, int32x2x2_t __b)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_f32 (float32_t * __a, float32x2x2_t __b)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2sf (__a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t __b)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t __b)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t __b)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s64 (int64_t * __a, int64x1x2_t __b)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u64 (uint64_t * __a, uint64x1x2_t __b)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s8 (int8_t * __a, int8x16x2_t __b)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s16 (int16_t * __a, int16x8x2_t __b)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s32 (int32_t * __a, int32x4x2_t __b)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f32 (float32_t * __a, float32x4x2_t __b)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4sf (__a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t __b)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t __b)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t __b)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t __b)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t __b)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s8 (int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s16 (int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s32 (int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_f32 (float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2sf (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u8 (uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u16 (uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u32 (uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_p8 (poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_p16 (poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_s16 (int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_s32 (int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_f32 (float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4sf (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_u16 (uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_u32 (uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_p16 (poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+vld3q_s8 (const int8_t * __a)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_s16 (const int16_t * __a)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_s32 (const int32_t * __a)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_f32 (const float32_t * __a)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+vld3q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+vld3q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_s8 (const int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_s16 (const int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_s32 (const int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_f32 (const float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2sf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_u8 (const uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_u16 (const uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_u32 (const uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_p8 (const poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_p16 (const poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_s16 (const int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_s32 (const int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_f32 (const float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4sf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_u16 (const uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_u32 (const uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_p16 (const poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s8 (int8_t * __a, int8x8x3_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s16 (int16_t * __a, int16x4x3_t __b)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s32 (int32_t * __a, int32x2x3_t __b)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_f32 (float32_t * __a, float32x2x3_t __b)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2sf (__a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t __b)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t __b)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t __b)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s64 (int64_t * __a, int64x1x3_t __b)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u64 (uint64_t * __a, uint64x1x3_t __b)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s8 (int8_t * __a, int8x16x3_t __b)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s16 (int16_t * __a, int16x8x3_t __b)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s32 (int32_t * __a, int32x4x3_t __b)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f32 (float32_t * __a, float32x4x3_t __b)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4sf (__a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t __b)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t __b)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t __b)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t __b)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t __b)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s8 (int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s16 (int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s32 (int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_f32 (float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2sf (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u8 (uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u16 (uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u32 (uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_p8 (poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_p16 (poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_s16 (int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_s32 (int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_f32 (float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4sf (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_u16 (uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_u32 (uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_p16 (poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+vld4q_s8 (const int8_t * __a)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_s16 (const int16_t * __a)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_s32 (const int32_t * __a)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_f32 (const float32_t * __a)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+vld4q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+vld4q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_s8 (const int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_s16 (const int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_s32 (const int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_f32 (const float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2sf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_u8 (const uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_u16 (const uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_u32 (const uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_p8 (const poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_p16 (const poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_s16 (const int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_s32 (const int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_f32 (const float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4sf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_u16 (const uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_u32 (const uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_p16 (const poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2sf (__a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s8 (int8_t * __a, int8x8x4_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s16 (int16_t * __a, int16x4x4_t __b)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s32 (int32_t * __a, int32x2x4_t __b)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_f32 (float32_t * __a, float32x2x4_t __b)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2sf (__a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t __b)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t __b)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t __b)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s64 (int64_t * __a, int64x1x4_t __b)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u64 (uint64_t * __a, uint64x1x4_t __b)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s8 (int8_t * __a, int8x16x4_t __b)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s16 (int16_t * __a, int16x8x4_t __b)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s32 (int32_t * __a, int32x4x4_t __b)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f32 (float32_t * __a, float32x4x4_t __b)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4sf (__a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t __b)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t __b)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t __b)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t __b)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t __b)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s8 (int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s16 (int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s32 (int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_f32 (float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2sf (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u8 (uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u16 (uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u32 (uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_p8 (poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_p16 (poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_s16 (int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_s32 (int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_f32 (float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4sf (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_u16 (uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_u32 (uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_p16 (poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vandv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vandv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vandv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vanddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vandv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vandv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vandv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vanddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vandv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vandv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vandv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vandv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vandv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vandv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vandv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vandv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vorrv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vorrv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vorrv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vorrdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vorrv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vorrv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vorrv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vorrdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vorrv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vorrv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vorrv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vorrv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vorrv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vorrv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vorrv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vorrv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_veorv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_veorv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_veorv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_veordi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_veorv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_veorv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_veorv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_veordi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_veorv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_veorv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_veorv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_veorv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_veorv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_veorv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_veorv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_veorv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vbicv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vbicv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vbicv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vbicdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vbicv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vbicv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vbicv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vbicdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vbicv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vbicv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vbicv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vbicv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vbicv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vbicv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vbicv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vbicv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vornv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vornv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vornv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vorndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vornv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vornv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vornv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vorndi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vornv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vornv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vornv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vornv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vornv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vornv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vornv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vornv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.S b/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.S
new file mode 100644
index 000000000..f1f61886e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.S
@@ -0,0 +1,120 @@
+/* Miscellaneous BPABI functions.
+
+ Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#ifdef __ARMEB__
+#define xxh r0
+#define xxl r1
+#define yyh r2
+#define yyl r3
+#else
+#define xxh r1
+#define xxl r0
+#define yyh r3
+#define yyl r2
+#endif
+
+#ifdef L_aeabi_lcmp
+
+ARM_FUNC_START aeabi_lcmp
+ subs ip, xxl, yyl
+ sbcs ip, xxh, yyh
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq
+ COND(sub,s,eq) ip, xxl, yyl
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ mov r0, ip
+ RET
+ FUNC_END aeabi_lcmp
+
+#endif /* L_aeabi_lcmp */
+
+#ifdef L_aeabi_ulcmp
+
+ARM_FUNC_START aeabi_ulcmp
+ cmp xxh, yyh
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it lo
+ movlo r0, #-1
+ do_it hi
+ movhi r0, #1
+ do_it ne
+ RETc(ne)
+ cmp xxl, yyl
+ do_it lo
+ movlo r0, #-1
+ do_it hi
+ movhi r0, #1
+ do_it eq
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ moveq r0, #0
+ RET
+ FUNC_END aeabi_ulcmp
+
+#endif /* L_aeabi_ulcmp */
+
+#ifdef L_aeabi_ldivmod
+
+ARM_FUNC_START aeabi_ldivmod
+ sub sp, sp, #8
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ mov ip, sp
+ push {ip, lr}
+#else
+ do_push {sp, lr}
+#endif
+ bl SYM(__gnu_ldivmod_helper) __PLT__
+ ldr lr, [sp, #4]
+ add sp, sp, #8
+ do_pop {r2, r3}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ RET
+
+#endif /* L_aeabi_ldivmod */
+
+#ifdef L_aeabi_uldivmod
+
+ARM_FUNC_START aeabi_uldivmod
+ sub sp, sp, #8
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ mov ip, sp
+ push {ip, lr}
+#else
+ do_push {sp, lr}
+#endif
+ bl SYM(__gnu_uldivmod_helper) __PLT__
+ ldr lr, [sp, #4]
+ add sp, sp, #8
+ do_pop {r2, r3}
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ RET
+
+#endif /* L_aeabi_divmod */
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.c b/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.c
new file mode 100644
index 000000000..69f6e4ede
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.c
@@ -0,0 +1,61 @@
+/* Miscellaneous BPABI functions.
+
+ Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+extern long long __divdi3 (long long, long long);
+extern unsigned long long __udivdi3 (unsigned long long,
+ unsigned long long);
+extern long long __gnu_ldivmod_helper (long long, long long, long long *);
+extern unsigned long long __gnu_uldivmod_helper (unsigned long long,
+ unsigned long long,
+ unsigned long long *);
+
+
+long long
+__gnu_ldivmod_helper (long long a,
+ long long b,
+ long long *remainder)
+{
+ long long quotient;
+
+ quotient = __divdi3 (a, b);
+ *remainder = a - b * quotient;
+ return quotient;
+}
+
+unsigned long long
+__gnu_uldivmod_helper (unsigned long long a,
+ unsigned long long b,
+ unsigned long long *remainder)
+{
+ unsigned long long quotient;
+
+ quotient = __udivdi3 (a, b);
+ *remainder = a - b * quotient;
+ return quotient;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.h b/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.h
new file mode 100644
index 000000000..4c730885e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/bpabi.h
@@ -0,0 +1,118 @@
+/* Configuration file for ARM BPABI targets.
+ Copyright (C) 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Use the AAPCS ABI by default. */
+#define ARM_DEFAULT_ABI ARM_ABI_AAPCS
+
+/* Assume that AAPCS ABIs should adhere to the full BPABI. */
+#define TARGET_BPABI (TARGET_AAPCS_BASED)
+
+/* BPABI targets use EABI frame unwinding tables. */
+#define TARGET_UNWIND_INFO 1
+
+/* Section 4.1 of the AAPCS requires the use of VFP format. */
+#undef FPUTYPE_DEFAULT
+#define FPUTYPE_DEFAULT FPUTYPE_VFP
+
+/* EABI targets should enable interworking by default. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_INTERWORK
+
+/* The ARM BPABI functions return a boolean; they use no special
+ calling convention. */
+#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) TARGET_BPABI
+
+/* The BPABI integer comparison routines return { -1, 0, 1 }. */
+#define TARGET_LIB_INT_CMP_BIASED !TARGET_BPABI
+
+/* Tell the assembler to build BPABI binaries. */
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=4}"
+
+/* The generic link spec in elf.h does not support shared libraries. */
+#undef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} " \
+ "%{static:-Bstatic} %{shared:-shared} %{symbolic:-Bsymbolic} " \
+ "-X"
+
+#if defined (__thumb__)
+#define RENAME_LIBRARY_SET ".thumb_set"
+#else
+#define RENAME_LIBRARY_SET ".set"
+#endif
+
+/* Make __aeabi_AEABI_NAME an alias for __GCC_NAME. */
+#define RENAME_LIBRARY(GCC_NAME, AEABI_NAME) \
+ __asm__ (".globl\t__aeabi_" #AEABI_NAME "\n" \
+ RENAME_LIBRARY_SET "\t__aeabi_" #AEABI_NAME \
+ ", __" #GCC_NAME "\n");
+
+/* Give some libgcc functions an additional __aeabi name. */
+#ifdef L_muldi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, lmul)
+#endif
+#ifdef L_muldi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, lmul)
+#endif
+#ifdef L_fixdfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixdfdi, d2lz)
+#endif
+#ifdef L_fixunsdfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfdi, d2ulz)
+#endif
+#ifdef L_fixsfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, f2lz)
+#endif
+#ifdef L_fixunssfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, f2ulz)
+#endif
+#ifdef L_floatdidf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, l2d)
+#endif
+#ifdef L_floatdisf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, l2f)
+#endif
+
+/* The BPABI requires that we always use an out-of-line implementation
+ of RTTI comparison, even if the target supports weak symbols,
+ because the same object file might be used on a target that does
+ not support merging symbols across DLL boundaries. This macro is
+ broken out separately so that it can be used within
+ TARGET_OS_CPP_BUILTINS in configuration files for systems based on
+ the BPABI. */
+#define TARGET_BPABI_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__GXX_MERGED_TYPEINFO_NAMES=0"); \
+ } \
+ while (false)
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ TARGET_BPABI_CPP_BUILTINS()
+
+/* The BPABI specifies the use of .{init,fini}_array. Therefore, we
+ do not want GCC to put anything into the .{init,fini} sections. */
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#define INIT_ARRAY_SECTION_ASM_OP ARM_EABI_CTORS_SECTION_OP
+#define FINI_ARRAY_SECTION_ASM_OP ARM_EABI_DTORS_SECTION_OP
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/cirrus.md b/gcc-4.2.1-5666.3/gcc/config/arm/cirrus.md
new file mode 100644
index 000000000..39cc319eb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/cirrus.md
@@ -0,0 +1,604 @@
+;; Cirrus EP9312 "Maverick" ARM floating point co-processor description.
+;; Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+;; Written by Aldy Hernandez (aldyh@redhat.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+
+; Cirrus types for invalid insn combinations
+; not Not a cirrus insn
+; normal Any Cirrus insn not covered by the special cases below
+; double cfldrd, cfldr64, cfstrd, cfstr64
+; compare cfcmps, cfcmpd, cfcmp32, cfcmp64
+; move cfmvdlr, cfmvdhr, cfmvsr, cfmv64lr, cfmv64hr
+(define_attr "cirrus" "not,normal,double,compare,move" (const_string "not"))
+
+
+(define_insn "cirrus_adddi3"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (plus:DI (match_operand:DI 1 "cirrus_fp_register" "v")
+ (match_operand:DI 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfadd64%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_addsi3"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (plus:SI (match_operand:SI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfadd32%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_addsf3"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (plus:SF (match_operand:SF 1 "cirrus_fp_register" "v")
+ (match_operand:SF 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfadds%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_adddf3"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (plus:DF (match_operand:DF 1 "cirrus_fp_register" "v")
+ (match_operand:DF 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfaddd%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_subdi3"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (minus:DI (match_operand:DI 1 "cirrus_fp_register" "v")
+ (match_operand:DI 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfsub64%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_subsi3_insn"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (minus:SI (match_operand:SI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfsub32%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_subsf3"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (minus:SF (match_operand:SF 1 "cirrus_fp_register" "v")
+ (match_operand:SF 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfsubs%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_subdf3"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (minus:DF (match_operand:DF 1 "cirrus_fp_register" "v")
+ (match_operand:DF 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfsubd%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_mulsi3"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (mult:SI (match_operand:SI 2 "cirrus_fp_register" "v")
+ (match_operand:SI 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfmul32%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+;; APPLE LOCAL DImode multiply enhancement
+(define_insn "cirrus_muldi3"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (mult:DI (match_operand:DI 2 "cirrus_fp_register" "v")
+ (match_operand:DI 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfmul64%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_dmult")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_mulsi3addsi"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (plus:SI
+ (mult:SI (match_operand:SI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_fp_register" "v"))
+ (match_operand:SI 3 "cirrus_fp_register" "0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfmac32%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+;; Cirrus SI multiply-subtract
+(define_insn "*cirrus_mulsi3subsi"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (minus:SI
+ (match_operand:SI 1 "cirrus_fp_register" "0")
+ (mult:SI (match_operand:SI 2 "cirrus_fp_register" "v")
+ (match_operand:SI 3 "cirrus_fp_register" "v"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "0 && TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfmsc32%?\\t%V0, %V2, %V3"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_mulsf3"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (mult:SF (match_operand:SF 1 "cirrus_fp_register" "v")
+ (match_operand:SF 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfmuls%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_farith")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_muldf3"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (mult:DF (match_operand:DF 1 "cirrus_fp_register" "v")
+ (match_operand:DF 2 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfmuld%?\\t%V0, %V1, %V2"
+ [(set_attr "type" "mav_dmult")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_ashl_const"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (ashift:SI (match_operand:SI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_shift_const" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfsh32%?\\t%V0, %V1, #%s2"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_ashiftrt_const"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (ashiftrt:SI (match_operand:SI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_shift_const" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfsh32%?\\t%V0, %V1, #-%s2"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_ashlsi3"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (ashift:SI (match_operand:SI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfrshl32%?\\t%V1, %V0, %s2"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "ashldi3_cirrus"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (ashift:DI (match_operand:DI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfrshl64%?\\t%V1, %V0, %s2"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_ashldi_const"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (ashift:DI (match_operand:DI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_shift_const" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfsh64%?\\t%V0, %V1, #%s2"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_ashiftrtdi_const"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (ashiftrt:DI (match_operand:DI 1 "cirrus_fp_register" "v")
+ (match_operand:SI 2 "cirrus_shift_const" "")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfsh64%?\\t%V0, %V1, #-%s2"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_absdi2"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (abs:DI (match_operand:DI 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfabs64%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+;; This doesn't really clobber ``cc''. Fixme: aldyh.
+(define_insn "*cirrus_negdi2"
+ [(set (match_operand:DI 0 "cirrus_fp_register" "=v")
+ (neg:DI (match_operand:DI 1 "cirrus_fp_register" "v")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfneg64%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_negsi2"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (neg:SI (match_operand:SI 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfneg32%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_negsf2"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (neg:SF (match_operand:SF 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfnegs%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_negdf2"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (neg:DF (match_operand:DF 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfnegd%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+;; This doesn't really clobber the condition codes either.
+(define_insn "*cirrus_abssi2"
+ [(set (match_operand:SI 0 "cirrus_fp_register" "=v")
+ (abs:SI (match_operand:SI 1 "cirrus_fp_register" "v")))
+ (clobber (reg:CC CC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0"
+ "cfabs32%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_abssf2"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (abs:SF (match_operand:SF 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfabss%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_absdf2"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (abs:DF (match_operand:DF 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfabsd%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+;; Convert Cirrus-SI to Cirrus-SF
+(define_insn "cirrus_floatsisf2"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))
+ (clobber (match_scratch:DF 2 "=v"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfmv64lr%?\\t%Z2, %1\;cfcvt32s%?\\t%V0, %Y2"
+ [(set_attr "length" "8")
+ (set_attr "cirrus" "move")]
+)
+
+(define_insn "cirrus_floatsidf2"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))
+ (clobber (match_scratch:DF 2 "=v"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfmv64lr%?\\t%Z2, %1\;cfcvt32d%?\\t%V0, %Y2"
+ [(set_attr "length" "8")
+ (set_attr "cirrus" "move")]
+)
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (float:SF (match_operand:DI 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcvt64s%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (float:DF (match_operand:DI 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcvt64d%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")])
+
+(define_insn "cirrus_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (fix:SF (match_operand:SF 1 "cirrus_fp_register" "v"))))
+ (clobber (match_scratch:DF 2 "=v"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cftruncs32%?\\t%Y2, %V1\;cfmvr64l%?\\t%0, %Z2"
+ [(set_attr "length" "8")
+ (set_attr "cirrus" "normal")]
+)
+
+(define_insn "cirrus_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (fix:DF (match_operand:DF 1 "cirrus_fp_register" "v"))))
+ (clobber (match_scratch:DF 2 "=v"))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cftruncd32%?\\t%Y2, %V1\;cfmvr64l%?\\t%0, %Z2"
+ [(set_attr "length" "8")]
+)
+
+(define_insn "*cirrus_truncdfsf2"
+ [(set (match_operand:SF 0 "cirrus_fp_register" "=v")
+ (float_truncate:SF
+ (match_operand:DF 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcvtds%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_extendsfdf2"
+ [(set (match_operand:DF 0 "cirrus_fp_register" "=v")
+ (float_extend:DF (match_operand:SF 1 "cirrus_fp_register" "v")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "cfcvtsd%?\\t%V0, %V1"
+ [(set_attr "cirrus" "normal")]
+)
+
+(define_insn "*cirrus_arm_movdi"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r,r,o<>,v,r,v,m,v")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r,r,v,mi,v,v"))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"#\";
+ case 1:
+ case 2:
+ return output_move_double (operands);
+
+ case 3: return \"cfmv64lr%?\\t%V0, %Q1\;cfmv64hr%?\\t%V0, %R1\";
+ case 4: return \"cfmvr64l%?\\t%Q0, %V1\;cfmvr64h%?\\t%R0, %V1\";
+
+ case 5: return \"cfldr64%?\\t%V0, %1\";
+ case 6: return \"cfstr64%?\\t%V1, %0\";
+
+ /* Shifting by 0 will just copy %1 into %0. */
+ case 7: return \"cfsh64%?\\t%V0, %V1, #0\";
+
+ default: gcc_unreachable ();
+ }
+ }"
+ [(set_attr "length" " 8, 8, 8, 8, 8, 4, 4, 4")
+ (set_attr "type" " *,load2,store2, *, *, load2,store2, *")
+ (set_attr "pool_range" " *,1020, *, *, *, 1020, *, *")
+ (set_attr "neg_pool_range" " *,1012, *, *, *, 1008, *, *")
+ (set_attr "cirrus" "not, not, not,move,normal,double,double,normal")]
+)
+
+;; Cirrus SI values have been outlawed. Look in arm.h for the comment
+;; on HARD_REGNO_MODE_OK.
+
+(define_insn "*cirrus_movsf_hard_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=v,v,v,r,m,r,r,m")
+ (match_operand:SF 1 "general_operand" "v,mE,r,v,v,r,mE,r"))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ cfcpys%?\\t%V0, %V1
+ cfldrs%?\\t%V0, %1
+ cfmvsr%?\\t%V0, %1
+ cfmvrs%?\\t%0, %V1
+ cfstrs%?\\t%V1, %0
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "length" " *, *, *, *, *, 4, 4, 4")
+ (set_attr "type" " *, load1, *, *,store1, *,load1,store1")
+ (set_attr "pool_range" " *, 1020, *, *, *, *,4096, *")
+ (set_attr "neg_pool_range" " *, 1008, *, *, *, *,4084, *")
+ (set_attr "cirrus" "normal,normal,move,normal,normal,not, not, not")]
+)
+
+(define_insn "*cirrus_movdf_hard_insn"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,Q,r,m,r,v,v,v,r,m")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,v,mF,r,v,v"))]
+ "TARGET_ARM
+ && TARGET_HARD_FLOAT && TARGET_MAVERICK
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: return \"#\";
+ case 3: case 4: return output_move_double (operands);
+ case 5: return \"cfcpyd%?\\t%V0, %V1\";
+ case 6: return \"cfldrd%?\\t%V0, %1\";
+ case 7: return \"cfmvdlr\\t%V0, %Q1\;cfmvdhr%?\\t%V0, %R1\";
+ case 8: return \"cfmvrdl%?\\t%Q0, %V1\;cfmvrdh%?\\t%R0, %V1\";
+ case 9: return \"cfstrd%?\\t%V1, %0\";
+ default: gcc_unreachable ();
+ }
+ }"
+ [(set_attr "type" "load1,store2, *,store2,load1, *, load1, *, *,store2")
+ (set_attr "length" " 4, 4, 8, 8, 8, 4, 4, 8, 8, 4")
+ (set_attr "pool_range" " *, *, *, *, 252, *, 1020, *, *, *")
+ (set_attr "neg_pool_range" " *, *, *, *, 244, *, 1008, *, *, *")
+ (set_attr "cirrus" " not, not,not, not, not,normal,double,move,normal,double")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*cirrus_thumb2_movdi"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r,r,o<>,v,r,v,m,v")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r,r,v,mi,v,v"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_MAVERICK"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return (output_move_double (operands));
+
+ case 3: return \"cfmv64lr%?\\t%V0, %Q1\;cfmv64hr%?\\t%V0, %R1\";
+ case 4: return \"cfmvr64l%?\\t%Q0, %V1\;cfmvr64h%?\\t%R0, %V1\";
+
+ case 5: return \"cfldr64%?\\t%V0, %1\";
+ case 6: return \"cfstr64%?\\t%V1, %0\";
+
+ /* Shifting by 0 will just copy %1 into %0. */
+ case 7: return \"cfsh64%?\\t%V0, %V1, #0\";
+
+ default: abort ();
+ }
+ }"
+ [(set_attr "length" " 8, 8, 8, 8, 8, 4, 4, 4")
+ (set_attr "type" " *,load2,store2, *, *, load2,store2, *")
+ (set_attr "pool_range" " *,4096, *, *, *, 1020, *, *")
+ (set_attr "neg_pool_range" " *, 0, *, *, *, 1008, *, *")
+ (set_attr "cirrus" "not, not, not,move,normal,double,double,normal")]
+)
+
+;; Cirrus SI values have been outlawed. Look in arm.h for the comment
+;; on HARD_REGNO_MODE_OK.
+
+(define_insn "*cirrus_thumb2_movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m,*v,r,*v,T,*v")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r,r,*v,T,*v,*v"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_MAVERICK && 0
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0
+ cfmv64lr%?\\t%Z0, %1
+ cfmvr64l%?\\t%0, %Z1
+ cfldr32%?\\t%V0, %1
+ cfstr32%?\\t%V1, %0
+ cfsh32%?\\t%V0, %V1, #0"
+ [(set_attr "type" "*, *, load1,store1, *, *, load1,store1, *")
+ (set_attr "pool_range" "*, *, 4096, *, *, *, 1024, *, *")
+ (set_attr "neg_pool_range" "*, *, 0, *, *, *, 1012, *, *")
+ (set_attr "cirrus" "not,not, not, not,move,normal,normal,normal,normal")]
+)
+
+(define_insn "*thumb2_cirrus_movsf_hard_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=v,v,v,r,m,r,r,m")
+ (match_operand:SF 1 "general_operand" "v,mE,r,v,v,r,mE,r"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_MAVERICK
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ cfcpys%?\\t%V0, %V1
+ cfldrs%?\\t%V0, %1
+ cfmvsr%?\\t%V0, %1
+ cfmvrs%?\\t%0, %V1
+ cfstrs%?\\t%V1, %0
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "length" " *, *, *, *, *, 4, 4, 4")
+ (set_attr "type" " *, load1, *, *,store1, *,load1,store1")
+ (set_attr "pool_range" " *, 1020, *, *, *, *,4096, *")
+ (set_attr "neg_pool_range" " *, 1008, *, *, *, *, 0, *")
+ (set_attr "cirrus" "normal,normal,move,normal,normal,not, not, not")]
+)
+
+(define_insn "*thumb2_cirrus_movdf_hard_insn"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,Q,r,m,r,v,v,v,r,m")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,v,mF,r,v,v"))]
+ "TARGET_THUMB2
+ && TARGET_HARD_FLOAT && TARGET_MAVERICK
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"cfcpyd%?\\t%V0, %V1\";
+ case 6: return \"cfldrd%?\\t%V0, %1\";
+ case 7: return \"cfmvdlr\\t%V0, %Q1\;cfmvdhr%?\\t%V0, %R1\";
+ case 8: return \"cfmvrdl%?\\t%Q0, %V1\;cfmvrdh%?\\t%R0, %V1\";
+ case 9: return \"cfstrd%?\\t%V1, %0\";
+ default: abort ();
+ }
+ }"
+ [(set_attr "type" "load1,store2, *,store2,load1, *, load1, *, *,store2")
+ (set_attr "length" " 4, 4, 8, 8, 8, 4, 4, 8, 8, 4")
+ (set_attr "pool_range" " *, *, *, *,4092, *, 1020, *, *, *")
+ (set_attr "neg_pool_range" " *, *, *, *, 0, *, 1008, *, *, *")
+ (set_attr "cirrus" " not, not,not, not, not,normal,double,move,normal,double")]
+)
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/coff.h b/gcc-4.2.1-5666.3/gcc/config/arm/coff.h
new file mode 100644
index 000000000..04c0936d2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/coff.h
@@ -0,0 +1,88 @@
+/* Definitions of target machine for GNU compiler.
+ For ARM with COFF object format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Doug Evans (devans@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Note - it is important that this definition matches the one in tcoff.h. */
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/coff)", stderr)
+
+#undef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "marm", "mlittle-endian", "msoft-float", "mno-thumb-interwork" }
+#endif
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO 1
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+
+#define TARGET_ASM_FILE_START_APP_OFF true
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION default_coff_asm_named_section
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* We put ARM and Thumb-2 jump tables in the text section, because it makes
+ the code more efficient, but for Thumb-1 it's better to put them out of
+ band. */
+#define JUMP_TABLES_IN_TEXT_SECTION (TARGET_32BIT)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP "\t.section .rdata"
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* Support the ctors/dtors sections for g++. */
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define SUPPORTS_INIT_PRIORITY 0
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/constraints.md b/gcc-4.2.1-5666.3/gcc/config/arm/constraints.md
new file mode 100644
index 000000000..3fd2a17e0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/constraints.md
@@ -0,0 +1,281 @@
+;; Constraint definitions for ARM and Thumb
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; The following register constraints have been used:
+;; - in ARM/Thumb-2 state: f, t, v, w, x, y, z
+;; - in Thumb state: h, k, b
+;; - in both states: l, c
+;; In ARM state, 'l' is an alias for 'r'
+
+;; The following normal constraints have been used:
+;; in ARM/Thumb-2 state: G, H, I, J, K, L, M
+;; in Thumb-1 state: I, J, K, L, M, N, O
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+;; The following multi-letter normal constraints have been used:
+;; APPLE LOCAL 5831562 long long constants
+;; in ARM/Thumb-2 state: Da, Db, Dc, Dd, Dn, Dl, DL, Dv
+
+;; The following memory constraints have been used:
+;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Us
+;; in ARM state: Uq
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+
+(define_register_constraint "f" "TARGET_ARM ? FPA_REGS : NO_REGS"
+ "Legacy FPA registers @code{f0}-@code{f7}.")
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_register_constraint "t" "TARGET_32BIT ? VFP_LO_REGS : NO_REGS"
+ "The VFP registers @code{s0}-@code{s31}.")
+
+;; APPLE LOCAL end v7 support. Merge from mainline
+(define_register_constraint "v" "TARGET_ARM ? CIRRUS_REGS : NO_REGS"
+ "The Cirrus Maverick co-processor registers.")
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_register_constraint "w"
+ "TARGET_32BIT ? (TARGET_VFP3 ? VFP_REGS : VFP_LO_REGS) : NO_REGS"
+ "The VFP registers @code{d0}-@code{d15}, or @code{d0}-@code{d31} for VFPv3.")
+
+(define_register_constraint "x" "TARGET_32BIT ? VFP_D0_D7_REGS : NO_REGS"
+ "The VFP registers @code{d0}-@code{d7}.")
+
+;; APPLE LOCAL end v7 support. Merge from mainline
+(define_register_constraint "y" "TARGET_REALLY_IWMMXT ? IWMMXT_REGS : NO_REGS"
+ "The Intel iWMMX co-processor registers.")
+
+(define_register_constraint "z"
+ "TARGET_REALLY_IWMMXT ? IWMMXT_GR_REGS : NO_REGS"
+ "The Intel iWMMX GR registers.")
+
+(define_register_constraint "l" "TARGET_THUMB ? LO_REGS : GENERAL_REGS"
+ "In Thumb state the core registers @code{r0}-@code{r7}.")
+
+(define_register_constraint "h" "TARGET_THUMB ? HI_REGS : NO_REGS"
+ "In Thumb state the core registers @code{r8}-@code{r15}.")
+
+(define_register_constraint "k" "TARGET_THUMB ? STACK_REG : NO_REGS"
+ "@internal
+ Thumb only. The stack register.")
+
+(define_register_constraint "b" "TARGET_THUMB ? BASE_REGS : NO_REGS"
+ "@internal
+ Thumb only. The union of the low registers and the stack register.")
+
+(define_register_constraint "c" "CC_REG"
+ "@internal The condition code register.")
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_constraint "I"
+ "In ARM/Thumb-2 state a constant that can be used as an immediate value in a
+ Data Processing instruction. In Thumb-1 state a constant in the range
+ 0-255."
+ (and (match_code "const_int")
+ (match_test "TARGET_32BIT ? const_ok_for_arm (ival)
+ : ival >= 0 && ival <= 255")))
+
+(define_constraint "J"
+ "In ARM/Thumb-2 state a constant in the range @minus{}4095-4095. In Thumb-1
+ state a constant in the range @minus{}255-@minus{}1."
+ (and (match_code "const_int")
+ (match_test "TARGET_32BIT ? (ival >= -4095 && ival <= 4095)
+ : (ival >= -255 && ival <= -1)")))
+
+(define_constraint "K"
+ "In ARM/Thumb-2 state a constant that satisfies the @code{I} constraint if
+ inverted. In Thumb-1 state a constant that satisfies the @code{I}
+ constraint multiplied by any power of 2."
+ (and (match_code "const_int")
+ (match_test "TARGET_32BIT ? const_ok_for_arm (~ival)
+ : thumb_shiftable_const (ival)")))
+
+(define_constraint "L"
+ "In ARM/Thumb-2 state a constant that satisfies the @code{I} constraint if
+ negated. In Thumb-1 state a constant in the range @minus{}7-7."
+ (and (match_code "const_int")
+ (match_test "TARGET_32BIT ? const_ok_for_arm (-ival)
+ : (ival >= -7 && ival <= 7)")))
+
+;; The ARM state version is internal...
+;; @internal In ARM/Thumb-2 state a constant in the range 0-32 or any
+;; power of 2.
+(define_constraint "M"
+ "In Thumb-1 state a constant that is a multiple of 4 in the range 0-1020."
+ (and (match_code "const_int")
+ (match_test "TARGET_32BIT ? ((ival >= 0 && ival <= 32)
+ || ((ival & (ival - 1)) == 0))
+ : ((ival >= 0 && ival <= 1020) && ((ival & 3) == 0))")))
+
+(define_constraint "N"
+ "In ARM/Thumb-2 state a constant suitable for a MOVW instruction.
+ In Thumb-1 state a constant in the range 0-31."
+ (and (match_code "const_int")
+ (match_test "TARGET_32BIT ? arm_arch_thumb2 && ((ival & 0xffff0000) == 0)
+ : (ival >= 0 && ival <= 31)")))
+
+(define_constraint "O"
+ "In Thumb-1 state a constant that is a multiple of 4 in the range
+ @minus{}508-508."
+ (and (match_code "const_int")
+ (match_test "TARGET_THUMB1 && ival >= -508 && ival <= 508
+ && ((ival & 3) == 0)")))
+
+(define_constraint "G"
+ "In ARM/Thumb-2 state a valid FPA immediate constant."
+ (and (match_code "const_double")
+ (match_test "TARGET_32BIT && arm_const_double_rtx (op)")))
+
+(define_constraint "H"
+ "In ARM/Thumb-2 state a valid FPA immediate constant when negated."
+ (and (match_code "const_double")
+ (match_test "TARGET_32BIT && neg_const_double_rtx_ok_for_fpa (op)")))
+
+(define_constraint "Da"
+ "@internal
+ In ARM/Thumb-2 state a const_int, const_double or const_vector that can
+ be generated with two Data Processing insns."
+ (and (match_code "const_double,const_int,const_vector")
+ (match_test "TARGET_32BIT && arm_const_double_inline_cost (op) == 2")))
+
+(define_constraint "Db"
+ "@internal
+ In ARM/Thumb-2 state a const_int, const_double or const_vector that can
+ be generated with three Data Processing insns."
+ (and (match_code "const_double,const_int,const_vector")
+ (match_test "TARGET_32BIT && arm_const_double_inline_cost (op) == 3")))
+
+(define_constraint "Dc"
+ "@internal
+ In ARM/Thumb-2 state a const_int, const_double or const_vector that can
+ be generated with four Data Processing insns. This pattern is disabled
+ if optimizing for space or when we have load-delay slots to fill."
+ (and (match_code "const_double,const_int,const_vector")
+ (match_test "TARGET_32BIT && arm_const_double_inline_cost (op) == 4
+ && !(optimize_size || arm_ld_sched)")))
+;; APPLE LOCAL begin 5831562 long long constants
+(define_constraint "Dd"
+ "@internal
+ In ARM state a const_int, const_double or const_vector that can
+ used directly in arithmetic instructions as two 32-bit immediates."
+ (and (match_code "const_double,const_int,const_vector")
+ (match_test "TARGET_32BIT && const64_ok_for_arm_immediate (op)")))
+;; APPLE LOCAL end 5831562 long long constants
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_constraint "Dn"
+ "@internal
+ In ARM/Thumb-2 state a const_vector which can be loaded with a Neon vmov
+ immediate instruction."
+ (and (match_code "const_vector")
+ (match_test "TARGET_32BIT
+ && imm_for_neon_mov_operand (op, GET_MODE (op))")))
+
+(define_constraint "Dl"
+ "@internal
+ In ARM/Thumb-2 state a const_vector which can be used with a Neon vorr or
+ vbic instruction."
+ (and (match_code "const_vector")
+ (match_test "TARGET_32BIT
+ && imm_for_neon_logic_operand (op, GET_MODE (op))")))
+
+(define_constraint "DL"
+ "@internal
+ In ARM/Thumb-2 state a const_vector which can be used with a Neon vorn or
+ vand instruction."
+ (and (match_code "const_vector")
+ (match_test "TARGET_32BIT
+ && imm_for_neon_inv_logic_operand (op, GET_MODE (op))")))
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; APPLE LOCAL begin v7 support. Merge from mainline
+
+(define_constraint "Dv"
+ "@internal
+ In ARM/Thumb-2 state a const_double which can be used with a VFP fconsts
+ or fconstd instruction."
+ (and (match_code "const_double")
+ (match_test "TARGET_32BIT && vfp3_const_double_rtx (op)")))
+;; APPLE LOCAL end v7 support. Merge from mainline
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+
+(define_memory_constraint "Ut"
+ "@internal
+ In ARM/Thumb-2 state an address valid for loading/storing opaque structure
+ types wider than TImode."
+ (and (match_code "mem")
+ (match_test "TARGET_32BIT && neon_struct_mem_operand (op)")))
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_memory_constraint "Uv"
+ "@internal
+ In ARM/Thumb-2 state a valid VFP load/store address."
+ (and (match_code "mem")
+ (match_test "TARGET_32BIT && arm_coproc_mem_operand (op, FALSE)")))
+
+(define_memory_constraint "Uy"
+ "@internal
+ In ARM/Thumb-2 state a valid iWMMX load/store address."
+ (and (match_code "mem")
+ (match_test "TARGET_32BIT && arm_coproc_mem_operand (op, TRUE)")))
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_memory_constraint "Un"
+ "@internal
+ In ARM/Thumb-2 state a valid address for Neon element and structure
+ load/store instructions."
+ (and (match_code "mem")
+ (match_test "TARGET_32BIT && neon_vector_mem_operand (op, FALSE)")))
+
+(define_memory_constraint "Us"
+ "@internal
+ In ARM/Thumb-2 state a valid address for non-offset loads/stores of
+ quad-word values in four ARM registers."
+ (and (match_code "mem")
+ (match_test "TARGET_32BIT && neon_vector_mem_operand (op, TRUE)")))
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+(define_memory_constraint "Uq"
+ "@internal
+ In ARM state an address valid in ldrsb instructions."
+ (and (match_code "mem")
+ (match_test "TARGET_ARM
+ && arm_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
+ SIGN_EXTEND, 0)")))
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_memory_constraint "Q"
+ "@internal
+ In ARM/Thumb-2 state an address that is a single base register."
+ (and (match_code "mem")
+ (match_test "REG_P (XEXP (op, 0))")))
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; We used to have constraint letters for S and R in ARM state, but
+;; all uses of these now appear to have been removed.
+
+;; Additionally, we used to have a Q constraint in Thumb state, but
+;; this wasn't really a valid memory constraint. Again, all uses of
+;; this now seem to have been removed.
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8-neon.md b/gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8-neon.md
new file mode 100644
index 000000000..d0718eddd
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8-neon.md
@@ -0,0 +1,1308 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM Cortex-A8 NEON scheduling description.
+;; Copyright (C) 2007 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+
+;; This file is part of GCC.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_automaton "cortex_a8_neon")
+
+;; Only one load, store, permute, MCR or MRC instruction can be issued
+;; per cycle.
+(define_cpu_unit "cortex_a8_neon_issue_perm" "cortex_a8_neon")
+
+;; Only one data-processing instruction can be issued per cycle.
+(define_cpu_unit "cortex_a8_neon_issue_dp" "cortex_a8_neon")
+
+;; The VFPLite unit (non-pipelined).
+(define_cpu_unit "cortex_a8_vfplite" "cortex_a8_neon")
+
+;; We need a special mutual exclusion (to be used in addition to
+;; cortex_a8_neon_issue_dp) for the case when an instruction such as
+;; vmla.f is forwarded from E5 of the floating-point multiply pipeline to
+;; E2 of the floating-point add pipeline. On the cycle previous to that
+;; forward we must prevent issue of any instruction to the floating-point
+;; add pipeline, but still allow issue of a data-processing instruction
+;; to any of the other pipelines.
+(define_cpu_unit "cortex_a8_neon_issue_fadd" "cortex_a8_neon")
+
+;; Patterns of reservation.
+;; We model the NEON issue units as running in parallel with the core ones.
+;; We assume that multi-cycle NEON instructions get decomposed into
+;; micro-ops as they are issued into the NEON pipeline, and not as they
+;; are issued into the ARM pipeline. Dual issue may not occur except
+;; upon the first and last cycles of a multi-cycle instruction, but it
+;; is unclear whether two multi-cycle instructions can issue together (in
+;; this model they cannot). It is also unclear whether a pair of
+;; a multi-cycle and single-cycle instructions, that could potentially
+;; issue together, only do so if (say) the single-cycle one precedes
+;; the other.
+
+(define_reservation "cortex_a8_neon_dp"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp")
+(define_reservation "cortex_a8_neon_dp_2"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp,\
+ cortex_a8_neon_issue_dp")
+(define_reservation "cortex_a8_neon_dp_4"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp")
+
+(define_reservation "cortex_a8_neon_fadd"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp+\
+ cortex_a8_neon_issue_fadd")
+(define_reservation "cortex_a8_neon_fadd_2"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp+\
+ cortex_a8_neon_issue_fadd,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_fadd")
+
+(define_reservation "cortex_a8_neon_perm"
+ "(cortex_a8_alu0|cortex_a8_alu1)+\
+ cortex_a8_neon_issue_perm")
+(define_reservation "cortex_a8_neon_perm_2"
+ "(cortex_a8_alu0|cortex_a8_alu1)+\
+ cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_perm")
+(define_reservation "cortex_a8_neon_perm_3"
+ "(cortex_a8_alu0|cortex_a8_alu1)+\
+ cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_perm")
+
+(define_reservation "cortex_a8_neon_ls"
+ "cortex_a8_issue_ls+cortex_a8_neon_issue_perm")
+(define_reservation "cortex_a8_neon_ls_2"
+ "cortex_a8_issue_ls+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_perm")
+(define_reservation "cortex_a8_neon_ls_3"
+ "cortex_a8_issue_ls+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_perm")
+(define_reservation "cortex_a8_neon_ls_4"
+ "cortex_a8_issue_ls+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_perm")
+(define_reservation "cortex_a8_neon_ls_5"
+ "cortex_a8_issue_ls+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_dp+cortex_a8_neon_issue_perm,\
+ cortex_a8_neon_issue_perm")
+
+(define_reservation "cortex_a8_neon_fmul_then_fadd"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp,\
+ nothing*3,\
+ cortex_a8_neon_issue_fadd")
+(define_reservation "cortex_a8_neon_fmul_then_fadd_2"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp,\
+ cortex_a8_neon_issue_dp,\
+ nothing*2,\
+ cortex_a8_neon_issue_fadd,\
+ cortex_a8_neon_issue_fadd")
+
+;; VFP instructions can only be single-issued into the NEON pipeline.
+(define_reservation "cortex_a8_vfp"
+ "(cortex_a8_alu0|cortex_a8_alu1)+cortex_a8_neon_issue_dp+\
+ cortex_a8_neon_issue_perm+cortex_a8_vfplite")
+
+;; VFP instructions.
+;; The VFPLite unit that executes these isn't pipelined; we give the
+;; worst-case latencies (and choose the double-precision ones where we
+;; do not distinguish on precision). We assume RunFast mode is not
+;; enabled and therefore do not model the possible VFP instruction
+;; execution in the NEON floating point pipelines, nor additional
+;; latencies for the processing of subnormals.
+;;
+;; TODO: RunFast mode could potentially be enabled when -ffast-math
+;; is specified.
+
+(define_insn_reservation "cortex_a8_vfp_add_sub" 10
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "farith"))
+ "cortex_a8_vfp,cortex_a8_vfplite*9")
+
+(define_insn_reservation "cortex_a8_vfp_muls" 12
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "fmuls"))
+ "cortex_a8_vfp,cortex_a8_vfplite*11")
+
+(define_insn_reservation "cortex_a8_vfp_muld" 17
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "fmuld"))
+ "cortex_a8_vfp,cortex_a8_vfplite*16")
+
+(define_insn_reservation "cortex_a8_vfp_macs" 21
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "fmacs"))
+ "cortex_a8_vfp,cortex_a8_vfplite*20")
+
+(define_insn_reservation "cortex_a8_vfp_macd" 26
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "fmacd"))
+ "cortex_a8_vfp,cortex_a8_vfplite*25")
+
+(define_insn_reservation "cortex_a8_vfp_divs" 37
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "fdivs"))
+ "cortex_a8_vfp,cortex_a8_vfplite*36")
+
+(define_insn_reservation "cortex_a8_vfp_divd" 65
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "fdivd"))
+ "cortex_a8_vfp,cortex_a8_vfplite*64")
+
+;; Comparisons can actually take 7 cycles sometimes instead of four,
+;; but given all the other instructions lumped into type=ffarith that
+;; take four cycles, we pick that latency.
+(define_insn_reservation "cortex_a8_vfp_farith" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "ffarith"))
+ "cortex_a8_vfp,cortex_a8_vfplite*3")
+
+(define_insn_reservation "cortex_a8_vfp_cvt" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "f_cvt"))
+ "cortex_a8_vfp,cortex_a8_vfplite*6")
+
+;; NEON -> core transfers.
+
+(define_insn_reservation "neon_mrc" 20
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mrc"))
+ "cortex_a8_neon_ls")
+
+(define_insn_reservation "neon_mrrc" 21
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mrrc"))
+ "cortex_a8_neon_ls_2")
+
+;; The remainder of this file is auto-generated by neon-schedgen.
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N3.
+(define_insn_reservation "neon_int_1" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_int_1"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)m operands at N1,
+;; their (D|Q)n operands at N2, and produce a result at N3.
+(define_insn_reservation "neon_int_2" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_int_2"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N3.
+(define_insn_reservation "neon_int_3" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_int_3"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N4.
+(define_insn_reservation "neon_int_4" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_int_4"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)m operands at N1,
+;; their (D|Q)n operands at N2, and produce a result at N4.
+(define_insn_reservation "neon_int_5" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_int_5"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N4.
+(define_insn_reservation "neon_vqneg_vqabs" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vqneg_vqabs"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation produce a result at N3.
+(define_insn_reservation "neon_vmov" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vmov"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N6.
+(define_insn_reservation "neon_vaba" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vaba"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N6 on cycle 2.
+(define_insn_reservation "neon_vaba_qqq" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vaba_qqq"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)m operands at N1,
+;; their (D|Q)d operands at N3, and produce a result at N6.
+(define_insn_reservation "neon_vsma" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vsma"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N6.
+(define_insn_reservation "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N6 on cycle 2.
+(define_insn_reservation "neon_mul_qqq_8_16_32_ddd_32" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mul_qqq_8_16_32_ddd_32"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, and produce a result at N6 on cycle 2.
+(define_insn_reservation "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N2, their (D|Q)d operands at N3, and
+;; produce a result at N6.
+(define_insn_reservation "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N2, their (D|Q)d operands at N3, and
+;; produce a result at N6 on cycle 2.
+(define_insn_reservation "neon_mla_qqq_8_16" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mla_qqq_8_16"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N6 on cycle 2.
+(define_insn_reservation "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N6 on cycle 4.
+(define_insn_reservation "neon_mla_qqq_32_qqd_32_scalar" 9
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mla_qqq_32_qqd_32_scalar"))
+ "cortex_a8_neon_dp_4")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, and produce a result at N6.
+(define_insn_reservation "neon_mul_ddd_16_scalar_32_16_long_scalar" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mul_ddd_16_scalar_32_16_long_scalar"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, and produce a result at N6 on cycle 4.
+(define_insn_reservation "neon_mul_qqd_32_scalar" 9
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mul_qqd_32_scalar"))
+ "cortex_a8_neon_dp_4")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N6.
+(define_insn_reservation "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N3.
+(define_insn_reservation "neon_shift_1" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_shift_1"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N4.
+(define_insn_reservation "neon_shift_2" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_shift_2"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N3 on cycle 2.
+(define_insn_reservation "neon_shift_3" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_shift_3"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N1.
+(define_insn_reservation "neon_vshl_ddd" 1
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vshl_ddd"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N4 on cycle 2.
+(define_insn_reservation "neon_vqshl_vrshl_vqrshl_qqq" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vqshl_vrshl_vqrshl_qqq"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)m operands at N1,
+;; their (D|Q)d operands at N3, and produce a result at N6.
+(define_insn_reservation "neon_vsra_vrsra" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vsra_vrsra"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N5.
+(define_insn_reservation "neon_fp_vadd_ddd_vabs_dd" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vadd_ddd_vabs_dd"))
+ "cortex_a8_neon_fadd")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N5 on cycle 2.
+(define_insn_reservation "neon_fp_vadd_qqq_vabs_qq" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vadd_qqq_vabs_qq"))
+ "cortex_a8_neon_fadd_2")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N5.
+(define_insn_reservation "neon_fp_vsum" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vsum"))
+ "cortex_a8_neon_fadd")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, and produce a result at N5.
+(define_insn_reservation "neon_fp_vmul_ddd" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vmul_ddd"))
+ "cortex_a8_neon_dp")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, and produce a result at N5 on cycle 2.
+(define_insn_reservation "neon_fp_vmul_qqd" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vmul_qqd"))
+ "cortex_a8_neon_dp_2")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N2, their (D|Q)d operands at N3, and
+;; produce a result at N9.
+(define_insn_reservation "neon_fp_vmla_ddd" 9
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vmla_ddd"))
+ "cortex_a8_neon_fmul_then_fadd")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N2, their (D|Q)d operands at N3, and
+;; produce a result at N9 on cycle 2.
+(define_insn_reservation "neon_fp_vmla_qqq" 10
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vmla_qqq"))
+ "cortex_a8_neon_fmul_then_fadd_2")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N9.
+(define_insn_reservation "neon_fp_vmla_ddd_scalar" 9
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vmla_ddd_scalar"))
+ "cortex_a8_neon_fmul_then_fadd")
+
+;; Instructions using this reservation read their (D|Q)n operands at N2,
+;; their (D|Q)m operands at N1, their (D|Q)d operands at N3, and
+;; produce a result at N9 on cycle 2.
+(define_insn_reservation "neon_fp_vmla_qqq_scalar" 10
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vmla_qqq_scalar"))
+ "cortex_a8_neon_fmul_then_fadd_2")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N9.
+(define_insn_reservation "neon_fp_vrecps_vrsqrts_ddd" 9
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vrecps_vrsqrts_ddd"))
+ "cortex_a8_neon_fmul_then_fadd")
+
+;; Instructions using this reservation read their source operands at N2, and
+;; produce a result at N9 on cycle 2.
+(define_insn_reservation "neon_fp_vrecps_vrsqrts_qqq" 10
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_fp_vrecps_vrsqrts_qqq"))
+ "cortex_a8_neon_fmul_then_fadd_2")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N2.
+(define_insn_reservation "neon_bp_simple" 2
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_bp_simple"))
+ "cortex_a8_neon_perm")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N2 on cycle 2.
+(define_insn_reservation "neon_bp_2cycle" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_bp_2cycle"))
+ "cortex_a8_neon_perm_2")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N2 on cycle 3.
+(define_insn_reservation "neon_bp_3cycle" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_bp_3cycle"))
+ "cortex_a8_neon_perm_3")
+
+;; Instructions using this reservation produce a result at N1.
+(define_insn_reservation "neon_ldr" 1
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_ldr"))
+ "cortex_a8_neon_ls")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_str" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_str"))
+ "cortex_a8_neon_ls")
+
+;; Instructions using this reservation produce a result at N1 on cycle 2.
+(define_insn_reservation "neon_vld1_1_2_regs" 2
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld1_1_2_regs"))
+ "cortex_a8_neon_ls_2")
+
+;; Instructions using this reservation produce a result at N1 on cycle 3.
+(define_insn_reservation "neon_vld1_3_4_regs" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld1_3_4_regs"))
+ "cortex_a8_neon_ls_3")
+
+;; Instructions using this reservation produce a result at N2 on cycle 2.
+(define_insn_reservation "neon_vld2_2_regs_vld1_vld2_all_lanes" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld2_2_regs_vld1_vld2_all_lanes"))
+ "cortex_a8_neon_ls_2")
+
+;; Instructions using this reservation produce a result at N2 on cycle 3.
+(define_insn_reservation "neon_vld2_4_regs" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld2_4_regs"))
+ "cortex_a8_neon_ls_3")
+
+;; Instructions using this reservation produce a result at N2 on cycle 4.
+(define_insn_reservation "neon_vld3_vld4" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld3_vld4"))
+ "cortex_a8_neon_ls_4")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_vst1_1_2_regs_vst2_2_regs" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs"))
+ "cortex_a8_neon_ls_2")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_vst1_3_4_regs" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vst1_3_4_regs"))
+ "cortex_a8_neon_ls_3")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_vst2_4_regs_vst3_vst4" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vst2_4_regs_vst3_vst4"))
+ "cortex_a8_neon_ls_4")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_vst3_vst4" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vst3_vst4"))
+ "cortex_a8_neon_ls_4")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N2 on cycle 3.
+(define_insn_reservation "neon_vld1_vld2_lane" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld1_vld2_lane"))
+ "cortex_a8_neon_ls_3")
+
+;; Instructions using this reservation read their source operands at N1, and
+;; produce a result at N2 on cycle 5.
+(define_insn_reservation "neon_vld3_vld4_lane" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld3_vld4_lane"))
+ "cortex_a8_neon_ls_5")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_vst1_vst2_lane" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vst1_vst2_lane"))
+ "cortex_a8_neon_ls_2")
+
+;; Instructions using this reservation read their source operands at N1.
+(define_insn_reservation "neon_vst3_vst4_lane" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vst3_vst4_lane"))
+ "cortex_a8_neon_ls_3")
+
+;; Instructions using this reservation produce a result at N2 on cycle 2.
+(define_insn_reservation "neon_vld3_vld4_all_lanes" 3
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_vld3_vld4_all_lanes"))
+ "cortex_a8_neon_ls_3")
+
+;; Instructions using this reservation produce a result at N2.
+(define_insn_reservation "neon_mcr" 2
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mcr"))
+ "cortex_a8_neon_perm")
+
+;; Instructions using this reservation produce a result at N2.
+(define_insn_reservation "neon_mcr_2_mcrr" 2
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "neon_type" "neon_mcr_2_mcrr"))
+ "cortex_a8_neon_perm_2")
+
+;; Exceptions to the default latencies.
+
+(define_bypass 1 "neon_mcr_2_mcrr"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 1 "neon_mcr"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_vld3_vld4_all_lanes"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_vld3_vld4_lane"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_vld1_vld2_lane"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 4 "neon_vld3_vld4"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_vld2_4_regs"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_vld2_2_regs_vld1_vld2_all_lanes"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_vld1_3_4_regs"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 1 "neon_vld1_1_2_regs"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 0 "neon_ldr"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_bp_3cycle"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_bp_2cycle"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 1 "neon_bp_simple"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 9 "neon_fp_vrecps_vrsqrts_qqq"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 8 "neon_fp_vrecps_vrsqrts_ddd"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 9 "neon_fp_vmla_qqq_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 8 "neon_fp_vmla_ddd_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 9 "neon_fp_vmla_qqq"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 8 "neon_fp_vmla_ddd"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_fp_vmul_qqd"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 4 "neon_fp_vmul_ddd"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 4 "neon_fp_vsum"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_fp_vadd_qqq_vabs_qq"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 4 "neon_fp_vadd_ddd_vabs_dd"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_vsra_vrsra"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 4 "neon_vqshl_vrshl_vqrshl_qqq"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 0 "neon_vshl_ddd"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_shift_3"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_shift_2"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_shift_1"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 8 "neon_mul_qqd_32_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_mul_ddd_16_scalar_32_16_long_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 8 "neon_mla_qqq_32_qqd_32_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 6 "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 6 "neon_mla_qqq_8_16"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 6 "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 6 "neon_mul_qqq_8_16_32_ddd_32"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_vsma"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 6 "neon_vaba_qqq"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 5 "neon_vaba"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_vmov"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_vqneg_vqabs"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_int_5"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 3 "neon_int_4"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_int_3"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_int_2"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
+(define_bypass 2 "neon_int_1"
+ "neon_int_1,\
+ neon_int_4,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8.md b/gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8.md
new file mode 100644
index 000000000..c311f6587
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/cortex-a8.md
@@ -0,0 +1,275 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM Cortex-A8 scheduling description.
+;; Copyright (C) 2007 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+
+;; This file is part of GCC.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_automaton "cortex_a8")
+
+;; Only one load/store instruction can be issued per cycle
+;; (although reservation of this unit is only required for single
+;; loads and stores -- see below).
+(define_cpu_unit "cortex_a8_issue_ls" "cortex_a8")
+
+;; Only one branch instruction can be issued per cycle.
+(define_cpu_unit "cortex_a8_issue_branch" "cortex_a8")
+
+;; The two ALU pipelines.
+(define_cpu_unit "cortex_a8_alu0" "cortex_a8")
+(define_cpu_unit "cortex_a8_alu1" "cortex_a8")
+
+;; The usual flow of an instruction through the pipelines.
+(define_reservation "cortex_a8_default"
+ "cortex_a8_alu0|cortex_a8_alu1")
+
+;; The flow of a branch instruction through the pipelines.
+(define_reservation "cortex_a8_branch"
+ "(cortex_a8_alu0+cortex_a8_issue_branch)|\
+ (cortex_a8_alu1+cortex_a8_issue_branch)")
+
+;; The flow of a load or store instruction through the pipeline in
+;; the case where that instruction consists of only one micro-op...
+(define_reservation "cortex_a8_load_store_1"
+ "(cortex_a8_alu0+cortex_a8_issue_ls)|\
+ (cortex_a8_alu1+cortex_a8_issue_ls)")
+
+;; ...and in the case of two micro-ops. We don't need to reserve
+;; cortex_a8_issue_ls here because dual issue is altogether forbidden
+;; during the issue cycle of the first micro-op. (Instead of modelling
+;; a separate issue unit, we instead reserve alu0 and alu1 to
+;; prevent any other instructions from being issued upon that first cycle.)
+;; Even though the load/store pipeline is usually available in either
+;; ALU pipe, multi-cycle instructions always issue in pipeline 0. This
+;; reservation is therefore the same as cortex_a8_multiply_2 below.
+(define_reservation "cortex_a8_load_store_2"
+ "cortex_a8_alu0+cortex_a8_alu1,\
+ cortex_a8_alu0")
+
+;; The flow of a single-cycle multiplication.
+(define_reservation "cortex_a8_multiply"
+ "cortex_a8_alu0")
+
+;; The flow of a multiplication instruction that gets decomposed into
+;; two micro-ops. The two micro-ops will be issued to pipeline 0 on
+;; successive cycles. Dual issue cannot happen at the same time as the
+;; first of the micro-ops.
+(define_reservation "cortex_a8_multiply_2"
+ "cortex_a8_alu0+cortex_a8_alu1,\
+ cortex_a8_alu0")
+
+;; Similarly, the flow of a multiplication instruction that gets
+;; decomposed into three micro-ops. Dual issue cannot occur except on
+;; the cycle upon which the third micro-op is issued.
+(define_reservation "cortex_a8_multiply_3"
+ "cortex_a8_alu0+cortex_a8_alu1,\
+ cortex_a8_alu0+cortex_a8_alu1,\
+ cortex_a8_alu0")
+
+;; The model given here assumes that all instructions are unconditional.
+
+;; Data processing instructions, but not move instructions.
+
+;; We include CLZ with these since it has the same execution pattern
+;; (source read in E2 and destination available at the end of that cycle).
+(define_insn_reservation "cortex_a8_alu" 2
+ (and (eq_attr "tune" "cortexa8")
+ (ior (and (eq_attr "type" "alu")
+ (not (eq_attr "insn" "mov,mvn")))
+ (eq_attr "insn" "clz")))
+ "cortex_a8_default")
+
+(define_insn_reservation "cortex_a8_alu_shift" 2
+ (and (eq_attr "tune" "cortexa8")
+ (and (eq_attr "type" "alu_shift")
+ (not (eq_attr "insn" "mov,mvn"))))
+ "cortex_a8_default")
+
+(define_insn_reservation "cortex_a8_alu_shift_reg" 2
+ (and (eq_attr "tune" "cortexa8")
+ (and (eq_attr "type" "alu_shift_reg")
+ (not (eq_attr "insn" "mov,mvn"))))
+ "cortex_a8_default")
+
+;; Move instructions.
+
+(define_insn_reservation "cortex_a8_mov" 1
+ (and (eq_attr "tune" "cortexa8")
+ (and (eq_attr "type" "alu,alu_shift,alu_shift_reg")
+ (eq_attr "insn" "mov,mvn")))
+ "cortex_a8_default")
+
+;; Exceptions to the default latencies for data processing instructions.
+
+;; A move followed by an ALU instruction with no early dep.
+;; (Such a pair can be issued in parallel, hence latency zero.)
+(define_bypass 0 "cortex_a8_mov" "cortex_a8_alu")
+(define_bypass 0 "cortex_a8_mov" "cortex_a8_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 0 "cortex_a8_mov" "cortex_a8_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; An ALU instruction followed by an ALU instruction with no early dep.
+(define_bypass 1 "cortex_a8_alu,cortex_a8_alu_shift,cortex_a8_alu_shift_reg"
+ "cortex_a8_alu")
+(define_bypass 1 "cortex_a8_alu,cortex_a8_alu_shift,cortex_a8_alu_shift_reg"
+ "cortex_a8_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 1 "cortex_a8_alu,cortex_a8_alu_shift,cortex_a8_alu_shift_reg"
+ "cortex_a8_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; Multiplication instructions. These are categorized according to their
+;; reservation behaviour and the need below to distinguish certain
+;; varieties for bypasses. Results are available at the E5 stage
+;; (but some of these are multi-cycle instructions which explains the
+;; latencies below).
+
+(define_insn_reservation "cortex_a8_mul" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "insn" "mul,smulxy,smmul"))
+ "cortex_a8_multiply_2")
+
+(define_insn_reservation "cortex_a8_mla" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "insn" "mla,smlaxy,smlawy,smmla,smlad,smlsd"))
+ "cortex_a8_multiply_2")
+
+(define_insn_reservation "cortex_a8_mull" 7
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "insn" "smull,umull,smlal,umlal,umaal,smlalxy"))
+ "cortex_a8_multiply_3")
+
+(define_insn_reservation "cortex_a8_smulwy" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "insn" "smulwy,smuad,smusd"))
+ "cortex_a8_multiply")
+
+;; smlald and smlsld are multiply-accumulate instructions but do not
+;; received bypassed data from other multiplication results; thus, they
+;; cannot go in cortex_a8_mla above. (See below for bypass details.)
+(define_insn_reservation "cortex_a8_smlald" 6
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "insn" "smlald,smlsld"))
+ "cortex_a8_multiply_2")
+
+;; A multiply with a single-register result or an MLA, followed by an
+;; MLA with an accumulator dependency, has its result forwarded so two
+;; such instructions can issue back-to-back.
+(define_bypass 1 "cortex_a8_mul,cortex_a8_mla,cortex_a8_smulwy"
+ "cortex_a8_mla"
+ "arm_mac_accumulator_is_mul_result")
+
+;; A multiply followed by an ALU instruction needing the multiply
+;; result only at E2 has lower latency than one needing it at E1.
+(define_bypass 4 "cortex_a8_mul,cortex_a8_mla,cortex_a8_mull,\
+ cortex_a8_smulwy,cortex_a8_smlald"
+ "cortex_a8_alu")
+(define_bypass 4 "cortex_a8_mul,cortex_a8_mla,cortex_a8_mull,\
+ cortex_a8_smulwy,cortex_a8_smlald"
+ "cortex_a8_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 4 "cortex_a8_mul,cortex_a8_mla,cortex_a8_mull,\
+ cortex_a8_smulwy,cortex_a8_smlald"
+ "cortex_a8_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; Load instructions.
+;; The presence of any register writeback is ignored here.
+
+;; APPLE LOCAL begin 6930582 load latencies
+;; A load result has latency 4 unless the dependent instruction has
+;; no early dep, in which case it is only latency three.
+;; We assume 64-bit alignment for doubleword loads.
+(define_insn_reservation "cortex_a8_load1_2" 4
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "load1,load2,load_byte"))
+ "cortex_a8_load_store_1")
+
+(define_bypass 3 "cortex_a8_load1_2"
+ "cortex_a8_alu")
+(define_bypass 3 "cortex_a8_load1_2"
+ "cortex_a8_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 3 "cortex_a8_load1_2"
+ "cortex_a8_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+;; APPLE LOCAL end 6930582 load latencies
+
+;; We do not currently model the fact that loads with scaled register
+;; offsets that are not LSL #2 have an extra cycle latency (they issue
+;; as two micro-ops).
+
+;; A load multiple of three registers is usually issued as two micro-ops.
+;; The first register will be available at E3 of the first iteration,
+;; the second at E3 of the second iteration, and the third at E4 of
+;; the second iteration. A load multiple of four registers is usually
+;; issued as two micro-ops.
+(define_insn_reservation "cortex_a8_load3_4" 5
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "load3,load4"))
+ "cortex_a8_load_store_2")
+
+(define_bypass 4 "cortex_a8_load3_4"
+ "cortex_a8_alu")
+(define_bypass 4 "cortex_a8_load3_4"
+ "cortex_a8_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 4 "cortex_a8_load3_4"
+ "cortex_a8_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; Store instructions.
+;; Writeback is again ignored.
+
+(define_insn_reservation "cortex_a8_store1_2" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "store1,store2"))
+ "cortex_a8_load_store_1")
+
+(define_insn_reservation "cortex_a8_store3_4" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "store3,store4"))
+ "cortex_a8_load_store_2")
+
+;; An ALU instruction acting as a producer for a store instruction
+;; that only uses the result as the value to be stored (as opposed to
+;; using it to calculate the address) has latency zero; the store
+;; reads the value to be stored at the start of E3 and the ALU insn
+;; writes it at the end of E2. Move instructions actually produce the
+;; result at the end of E1, but since we don't have delay slots, the
+;; scheduling behaviour will be the same.
+(define_bypass 0 "cortex_a8_alu,cortex_a8_alu_shift,\
+ cortex_a8_alu_shift_reg,cortex_a8_mov"
+ "cortex_a8_store1_2,cortex_a8_store3_4"
+ "arm_no_early_store_addr_dep")
+
+;; Branch instructions
+
+(define_insn_reservation "cortex_a8_branch" 0
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "branch"))
+ "cortex_a8_branch")
+
+;; Call latencies are not predictable. A semi-arbitrary very large
+;; number is used as "positive infinity" so that everything should be
+;; finished by the time of return.
+(define_insn_reservation "cortex_a8_call" 32
+ (and (eq_attr "tune" "cortexa8")
+ (eq_attr "type" "call"))
+ "cortex_a8_issue_branch")
+
+;; NEON (including VFP) instructions.
+
+(include "cortex-a8-neon.md")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/cortex-r4.md b/gcc-4.2.1-5666.3/gcc/config/arm/cortex-r4.md
new file mode 100644
index 000000000..5f3152409
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/cortex-r4.md
@@ -0,0 +1,289 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM Cortex-R4 scheduling description.
+;; Copyright (C) 2007 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+
+;; This file is part of GCC.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_automaton "cortex_r4")
+
+;; We approximate the dual-issue constraints of this core using four
+;; "issue units" and a reservation matrix as follows. The numbers indicate
+;; the instruction groups' preferences in order. Multiple entries for
+;; the same numbered preference indicate units that must be reserved
+;; together.
+;;
+;; Issue unit: A B C ALU
+;;
+;; ALU w/o reg shift 1st 2nd 1st and 2nd
+;; ALU w/ reg shift 1st 2nd 2nd 1st and 2nd
+;; Moves 1st 2nd 2nd
+;; Multiplication 1st 1st
+;; Division 1st 1st
+;; Load/store single 1st 1st
+;; Other load/store 1st 1st
+;; Branches 1st
+
+(define_cpu_unit "cortex_r4_issue_a" "cortex_r4")
+(define_cpu_unit "cortex_r4_issue_b" "cortex_r4")
+(define_cpu_unit "cortex_r4_issue_c" "cortex_r4")
+(define_cpu_unit "cortex_r4_issue_alu" "cortex_r4")
+
+(define_reservation "cortex_r4_alu"
+ "(cortex_r4_issue_a+cortex_r4_issue_alu)|\
+ (cortex_r4_issue_b+cortex_r4_issue_alu)")
+(define_reservation "cortex_r4_alu_shift_reg"
+ "(cortex_r4_issue_a+cortex_r4_issue_alu)|\
+ (cortex_r4_issue_b+cortex_r4_issue_c+\
+ cortex_r4_issue_alu)")
+(define_reservation "cortex_r4_mov"
+ "cortex_r4_issue_a|(cortex_r4_issue_b+\
+ cortex_r4_issue_alu)")
+(define_reservation "cortex_r4_mul" "cortex_r4_issue_a+cortex_r4_issue_alu")
+(define_reservation "cortex_r4_mul_2"
+ "(cortex_r4_issue_a+cortex_r4_issue_alu)*2")
+;; Division instructions execute out-of-order with respect to the
+;; rest of the pipeline and only require reservations on their first and
+;; final cycles.
+(define_reservation "cortex_r4_div_9"
+ "cortex_r4_issue_a+cortex_r4_issue_alu,\
+ nothing*7,\
+ cortex_r4_issue_a+cortex_r4_issue_alu")
+(define_reservation "cortex_r4_div_10"
+ "cortex_r4_issue_a+cortex_r4_issue_alu,\
+ nothing*8,\
+ cortex_r4_issue_a+cortex_r4_issue_alu")
+(define_reservation "cortex_r4_load_store"
+ "cortex_r4_issue_a+cortex_r4_issue_c")
+(define_reservation "cortex_r4_load_store_2"
+ "(cortex_r4_issue_a+cortex_r4_issue_b)*2")
+(define_reservation "cortex_r4_branch" "cortex_r4_issue_b")
+
+;; We assume that all instructions are unconditional.
+
+;; Data processing instructions. Moves without shifts are kept separate
+;; for the purposes of the dual-issue constraints above.
+(define_insn_reservation "cortex_r4_alu" 2
+ (and (eq_attr "tune" "cortexr4")
+ (and (eq_attr "type" "alu")
+ (not (eq_attr "insn" "mov"))))
+ "cortex_r4_alu")
+
+(define_insn_reservation "cortex_r4_mov" 2
+ (and (eq_attr "tune" "cortexr4")
+ (and (eq_attr "type" "alu")
+ (eq_attr "insn" "mov")))
+ "cortex_r4_mov")
+
+(define_insn_reservation "cortex_r4_alu_shift" 2
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "alu_shift"))
+ "cortex_r4_alu")
+
+(define_insn_reservation "cortex_r4_alu_shift_reg" 2
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "alu_shift_reg"))
+ "cortex_r4_alu_shift_reg")
+
+;; An ALU instruction followed by an ALU instruction with no early dep.
+(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\
+ cortex_r4_mov"
+ "cortex_r4_alu")
+(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\
+ cortex_r4_mov"
+ "cortex_r4_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\
+ cortex_r4_mov"
+ "cortex_r4_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; In terms of availabilities, a consumer mov could theoretically be
+;; issued together with a producer ALU instruction, without stalls.
+;; In practice this cannot happen because mov;add (in that order) is not
+;; eligible for dual issue and furthermore dual issue is not permitted
+;; when a dependency is involved. We therefore note it as latency one.
+;; A mov followed by another of the same is also latency one.
+(define_bypass 1 "cortex_r4_alu,cortex_r4_alu_shift,cortex_r4_alu_shift_reg,\
+ cortex_r4_mov"
+ "cortex_r4_mov")
+
+;; qadd, qdadd, qsub and qdsub are not currently emitted, and neither are
+;; media data processing instructions nor sad instructions.
+
+;; Multiplication instructions.
+
+(define_insn_reservation "cortex_r4_mul_4" 4
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "mul,smmul"))
+ "cortex_r4_mul_2")
+
+(define_insn_reservation "cortex_r4_mul_3" 3
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "smulxy,smulwy,smuad,smusd"))
+ "cortex_r4_mul")
+
+(define_insn_reservation "cortex_r4_mla_4" 4
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "mla,smmla,smmls"))
+ "cortex_r4_mul_2")
+
+(define_insn_reservation "cortex_r4_mla_3" 3
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "smlaxy,smlawy,smlad,smlsd"))
+ "cortex_r4_mul")
+
+(define_insn_reservation "cortex_r4_smlald" 3
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "smlald,smlsld"))
+ "cortex_r4_mul")
+
+(define_insn_reservation "cortex_r4_mull" 4
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "smull,umull,umlal,umaal"))
+ "cortex_r4_mul_2")
+
+;; A multiply or an MLA with a single-register result, followed by an
+;; MLA with an accumulator dependency, has its result forwarded.
+(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3"
+ "cortex_r4_mla_3,cortex_r4_mla_4"
+ "arm_mac_accumulator_is_mul_result")
+
+(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4"
+ "cortex_r4_mla_3,cortex_r4_mla_4"
+ "arm_mac_accumulator_is_mul_result")
+
+;; A multiply followed by an ALU instruction needing the multiply
+;; result only at ALU has lower latency than one needing it at Shift.
+(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald"
+ "cortex_r4_alu")
+(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald"
+ "cortex_r4_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 2 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald"
+ "cortex_r4_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull"
+ "cortex_r4_alu")
+(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull"
+ "cortex_r4_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 3 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull"
+ "cortex_r4_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; A multiply followed by a mov has one cycle lower latency again.
+(define_bypass 1 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald"
+ "cortex_r4_mov")
+(define_bypass 2 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull"
+ "cortex_r4_mov")
+
+;; We guess that division of A/B using sdiv or udiv, on average,
+;; is performed with B having ten more leading zeros than A.
+;; This gives a latency of nine for udiv and ten for sdiv.
+(define_insn_reservation "cortex_r4_udiv" 9
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "udiv"))
+ "cortex_r4_div_9")
+
+(define_insn_reservation "cortex_r4_sdiv" 10
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "insn" "sdiv"))
+ "cortex_r4_div_10")
+
+;; Branches. We assume correct prediction.
+
+(define_insn_reservation "cortex_r4_branch" 0
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "branch"))
+ "cortex_r4_branch")
+
+;; Call latencies are not predictable. A semi-arbitrary very large
+;; number is used as "positive infinity" so that everything should be
+;; finished by the time of return.
+(define_insn_reservation "cortex_r4_call" 32
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "call"))
+ "nothing")
+
+;; Status register access instructions are not currently emitted.
+
+;; Load instructions.
+;; We do not model the "addr_md_3cycle" cases and assume that
+;; accesses following are correctly aligned.
+
+(define_insn_reservation "cortex_r4_load_1_2" 3
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "load1,load2"))
+ "cortex_r4_load_store")
+
+(define_insn_reservation "cortex_r4_load_3_4" 4
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "load3,load4"))
+ "cortex_r4_load_store_2")
+
+;; If a producing load is followed by an instruction consuming only
+;; as a Normal Reg, there is one fewer cycle of latency.
+
+(define_bypass 2 "cortex_r4_load_1_2"
+ "cortex_r4_alu")
+(define_bypass 2 "cortex_r4_load_1_2"
+ "cortex_r4_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 2 "cortex_r4_load_1_2"
+ "cortex_r4_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+(define_bypass 3 "cortex_r4_load_3_4"
+ "cortex_r4_alu")
+(define_bypass 3 "cortex_r4_load_3_4"
+ "cortex_r4_alu_shift"
+ "arm_no_early_alu_shift_dep")
+(define_bypass 3 "cortex_r4_load_3_4"
+ "cortex_r4_alu_shift_reg"
+ "arm_no_early_alu_shift_value_dep")
+
+;; If a producing load is followed by an instruction consuming only
+;; as a Late Reg, there are two fewer cycles of latency. Such consumer
+;; instructions are moves and stores.
+
+(define_bypass 1 "cortex_r4_load_1_2"
+ "cortex_r4_mov,cortex_r4_store_1_2,cortex_r4_store_3_4")
+(define_bypass 2 "cortex_r4_load_3_4"
+ "cortex_r4_mov,cortex_r4_store_1_2,cortex_r4_store_3_4")
+
+;; If a producer's result is required as the base or offset of a load,
+;; there is an extra cycle latency.
+
+(define_bypass 3 "cortex_r4_alu,cortex_r4_mov,cortex_r4_alu_shift,\
+ cortex_r4_alu_shift_reg"
+ "cortex_r4_load_1_2,cortex_r4_load_3_4")
+
+(define_bypass 4 "cortex_r4_mul_3,cortex_r4_mla_3,cortex_r4_smlald"
+ "cortex_r4_load_1_2,cortex_r4_load_3_4")
+
+(define_bypass 5 "cortex_r4_mul_4,cortex_r4_mla_4,cortex_r4_mull"
+ "cortex_r4_load_1_2,cortex_r4_load_3_4")
+
+;; Store instructions.
+
+(define_insn_reservation "cortex_r4_store_1_2" 0
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "store1,store2"))
+ "cortex_r4_load_store")
+
+(define_insn_reservation "cortex_r4_store_3_4" 0
+ (and (eq_attr "tune" "cortexr4")
+ (eq_attr "type" "store3,store4"))
+ "cortex_r4_load_store_2")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/crti.asm b/gcc-4.2.1-5666.3/gcc/config/arm/crti.asm
new file mode 100644
index 000000000..166a3ce34
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/crti.asm
@@ -0,0 +1,84 @@
+# Copyright (C) 2001 Free Software Foundation, Inc.
+# Written By Nick Clifton
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# In addition to the permissions in the GNU General Public License, the
+# Free Software Foundation gives you unlimited permission to link the
+# compiled version of this file with other programs, and to distribute
+# those programs without any restriction coming from the use of this
+# file. (The General Public License restrictions do apply in other
+# respects; for example, they cover modification of the file, and
+# distribution when not linked into another program.)
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+# As a special exception, if you link this library with files
+# compiled with GCC to produce an executable, this does not cause
+# the resulting executable to be covered by the GNU General Public License.
+# This exception does not however invalidate any other reasons why
+# the executable file might be covered by the GNU General Public License.
+#
+
+# This file just make a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+#ifdef __ELF__
+#define TYPE(x) .type x,function
+#else
+#define TYPE(x)
+#endif
+
+ # Note - this macro is complemented by the FUNC_END macro
+ # in crtn.asm. If you change this macro you must also change
+ # that macro match.
+.macro FUNC_START
+#ifdef __thumb__
+ .thumb
+
+ push {r3, r4, r5, r6, r7, lr}
+#else
+ .arm
+ # Create a stack frame and save any call-preserved registers
+ mov ip, sp
+ stmdb sp!, {r3, r4, r5, r6, r7, r8, r9, sl, fp, ip, lr, pc}
+ sub fp, ip, #4
+#endif
+.endm
+
+ .file "crti.asm"
+
+ .section ".init"
+ .align 2
+ .global _init
+#ifdef __thumb__
+ .thumb_func
+#endif
+ TYPE(_init)
+_init:
+ FUNC_START
+
+
+ .section ".fini"
+ .align 2
+ .global _fini
+#ifdef __thumb__
+ .thumb_func
+#endif
+ TYPE(_fini)
+_fini:
+ FUNC_START
+
+# end of crti.asm
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/crtn.asm b/gcc-4.2.1-5666.3/gcc/config/arm/crtn.asm
new file mode 100644
index 000000000..360afae97
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/crtn.asm
@@ -0,0 +1,79 @@
+# Copyright (C) 2001, 2004 Free Software Foundation, Inc.
+# Written By Nick Clifton
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# In addition to the permissions in the GNU General Public License, the
+# Free Software Foundation gives you unlimited permission to link the
+# compiled version of this file with other programs, and to distribute
+# those programs without any restriction coming from the use of this
+# file. (The General Public License restrictions do apply in other
+# respects; for example, they cover modification of the file, and
+# distribution when not linked into another program.)
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+# As a special exception, if you link this library with files
+# compiled with GCC to produce an executable, this does not cause
+# the resulting executable to be covered by the GNU General Public License.
+# This exception does not however invalidate any other reasons why
+# the executable file might be covered by the GNU General Public License.
+#
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ # Note - this macro is complemented by the FUNC_START macro
+ # in crti.asm. If you change this macro you must also change
+ # that macro match.
+ #
+ # Note - we do not try any fancy optimizations of the return
+ # sequences here, it is just not worth it. Instead keep things
+ # simple. Restore all the save resgisters, including the link
+ # register and then perform the correct function return instruction.
+ # We also save/restore r3 to ensure stack alignment.
+.macro FUNC_END
+#ifdef __thumb__
+ .thumb
+
+ pop {r3, r4, r5, r6, r7}
+ pop {r3}
+ mov lr, r3
+#else
+ .arm
+
+ sub sp, fp, #40
+ ldmfd sp, {r4, r5, r6, r7, r8, r9, sl, fp, sp, lr}
+#endif
+
+#if defined __THUMB_INTERWORK__ || defined __thumb__
+ bx lr
+#else
+ mov pc, lr
+#endif
+.endm
+
+
+ .file "crtn.asm"
+
+ .section ".init"
+ ;;
+ FUNC_END
+
+ .section ".fini"
+ ;;
+ FUNC_END
+
+# end of crtn.asm
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/darwin.h b/gcc-4.2.1-5666.3/gcc/config/arm/darwin.h
new file mode 100644
index 000000000..ecf00c7b6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/darwin.h
@@ -0,0 +1,384 @@
+/* APPLE LOCAL file ARM darwin target */
+
+/* Size of the Obj-C jump buffer. */
+#define OBJC_JBLEN 28
+
+#define SUBTARGET_CPU_DEFAULT arm920
+
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC ""
+
+#define DEFAULT_TARGET_ARCH "arm"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (arm Darwin)");
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__arm__") ; \
+ builtin_define ("__arm"); \
+ builtin_define ("__LITTLE_ENDIAN__"); \
+ darwin_cpp_builtins (pfile); \
+ } \
+ while (0)
+
+/* We want -fPIC by default, unless we're using -static to compile for
+ the kernel or some such. */
+
+#undef CC1_SPEC
+#define CC1_SPEC "%<faltivec %<mcpu=G4 %<mcpu=G5 \
+%{!mmacosx-version-min=*: %{!miphoneos-version-min=*: %(darwin_cc1_minversion)}} \
+%{static: %{Zdynamic: %e conflicting code gen style switches are used}} \
+%{static: %{mdynamic-no-pic: %e conflicting code gen style switches are used}} \
+%{!static:%{!mdynamic-no-pic:-fPIC}} \
+%{!fbuiltin-strcat:-fno-builtin-strcat} \
+%{!fbuiltin-strcpy:-fno-builtin-strcpy} \
+%<fbuiltin-strcat \
+%<fbuiltin-strcpy \
+%<pg"
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:-lSystem}"
+
+/* Use the following macro for any Darwin/x86-specific command-line option
+ translation. */
+#define SUBTARGET_OPTION_TRANSLATE_TABLE \
+ { "", "" }
+
+#define REGISTER_PREFIX ""
+
+/* The assembler's names for the registers. Note that the ?xx registers are * there so that VFPv3/NEON registers D16-D31 have the same spacing as D0-D15
+ * (each of which is overlaid on two S registers), although there are no
+ * actual single-precision registers which correspond to D16-D31. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10","r11","ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp", \
+ "mv0", "mv1", "mv2", "mv3", \
+ "mv4", "mv5", "mv6", "mv7", \
+ "mv8", "mv9", "mv10", "mv11", \
+ "mv12", "mv13", "mv14", "mv15", \
+ "wcgr0", "wcgr1", "wcgr2", "wcgr3", \
+ "wr0", "wr1", "wr2", "wr3", \
+ "wr4", "wr5", "wr6", "wr7", \
+ "wr8", "wr9", "wr10", "wr11", \
+ "wr12", "wr13", "wr14", "wr15", \
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", \
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", \
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", \
+ "d16", "?16", "d17", "?17", "d18", "?18", "d19", "?19", \
+ "d20", "?20", "d21", "?21", "d22", "?22", "d23", "?23", \
+ "d24", "?24", "d25", "?25", "d26", "?26", "d27", "?27", \
+ "d28", "?28", "d29", "?29", "d30", "?30", "d31", "?31", \
+ "vfpcc" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"rfp", 9}, /* Gcc used to call it this */ \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"sl", 10}, /* sl */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15}, /* pc */ \
+ {"mvf0", 27}, \
+ {"mvf1", 28}, \
+ {"mvf2", 29}, \
+ {"mvf3", 30}, \
+ {"mvf4", 31}, \
+ {"mvf5", 32}, \
+ {"mvf6", 33}, \
+ {"mvf7", 34}, \
+ {"mvf8", 35}, \
+ {"mvf9", 36}, \
+ {"mvf10", 37}, \
+ {"mvf11", 38}, \
+ {"mvf12", 39}, \
+ {"mvf13", 40}, \
+ {"mvf14", 41}, \
+ {"mvf15", 42}, \
+ {"mvd0", 27}, \
+ {"mvd1", 28}, \
+ {"mvd2", 29}, \
+ {"mvd3", 30}, \
+ {"mvd4", 31}, \
+ {"mvd5", 32}, \
+ {"mvd6", 33}, \
+ {"mvd7", 34}, \
+ {"mvd8", 35}, \
+ {"mvd9", 36}, \
+ {"mvd10", 37}, \
+ {"mvd11", 38}, \
+ {"mvd12", 39}, \
+ {"mvd13", 40}, \
+ {"mvd14", 41}, \
+ {"mvd15", 42}, \
+ {"mvfx0", 27}, \
+ {"mvfx1", 28}, \
+ {"mvfx2", 29}, \
+ {"mvfx3", 30}, \
+ {"mvfx4", 31}, \
+ {"mvfx5", 32}, \
+ {"mvfx6", 33}, \
+ {"mvfx7", 34}, \
+ {"mvfx8", 35}, \
+ {"mvfx9", 36}, \
+ {"mvfx10", 37}, \
+ {"mvfx11", 38}, \
+ {"mvfx12", 39}, \
+ {"mvfx13", 40}, \
+ {"mvfx14", 41}, \
+ {"mvfx15", 42}, \
+ {"mvdx0", 27}, \
+ {"mvdx1", 28}, \
+ {"mvdx2", 29}, \
+ {"mvdx3", 30}, \
+ {"mvdx4", 31}, \
+ {"mvdx5", 32}, \
+ {"mvdx6", 33}, \
+ {"mvdx7", 34}, \
+ {"mvdx8", 35}, \
+ {"mvdx9", 36}, \
+ {"mvdx10", 37}, \
+ {"mvdx11", 38}, \
+ {"mvdx12", 39}, \
+ {"mvdx13", 40}, \
+ {"mvdx14", 41}, \
+ {"mvdx15", 42}, \
+ {"d0", 63}, {"q0", 63}, \
+ {"d1", 65}, \
+ {"d2", 67}, {"q1", 67}, \
+ {"d3", 69}, \
+ {"d4", 71}, {"q2", 71}, \
+ {"d5", 73}, \
+ {"d6", 75}, {"q3", 75}, \
+ {"d7", 77}, \
+ {"d8", 79}, {"q4", 79}, \
+ {"d9", 81}, \
+ {"d10", 83}, {"q5", 83}, \
+ {"d11", 85}, \
+ {"d12", 87}, {"q6", 87}, \
+ {"d13", 89}, \
+ {"d14", 91}, {"q7", 91}, \
+ {"d15", 93}, \
+ {"q8", 95}, \
+ {"q9", 99}, \
+ {"q10", 103}, \
+ {"q11", 107}, \
+ {"q12", 111}, \
+ {"q13", 115}, \
+ {"q14", 119}, \
+ {"q15", 123} \
+}
+#endif
+
+#define DBX_DEBUGGING_INFO 1
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ ARM_DECLARE_FUNCTION_SIZE (FILE, FNAME, DECL); \
+ } \
+ while (0)
+
+#define DARWIN_SUBARCH_SPEC " \
+ %{march=armv6k:armv6; \
+ march=armv5tej:armv5; \
+ march=xscale:xscale; \
+ march=armv4t:armv4t; \
+ march=armv7:armv7; \
+ march=armv7-a:armv7; \
+ march=armv7-r:armv7; \
+ march=armv7-m:armv7; \
+ march=armv7a:armv7; \
+ march=armv7r:armv7; \
+ march=armv7m:armv7; \
+ mcpu=arm10tdmi:armv5; \
+ mcpu=arm1020t:armv5; \
+ mcpu=arm9e:armv5; \
+ mcpu=arm946e-s:armv5; \
+ mcpu=arm966e-s:armv5; \
+ mcpu=arm968e-s:armv5; \
+ mcpu=arm10e:armv5; \
+ mcpu=arm1020e:armv5; \
+ mcpu=arm1022e:armv5; \
+ mcpu=arm926ej-s:armv5; \
+ mcpu=arm1026ej-s:armv5; \
+ mcpu=xscale:xscale; \
+ mcpu=arm1136j-s:armv6; \
+ mcpu=arm1136jf-s:armv6; \
+ mcpu=arm1176jz-s:armv6; \
+ mcpu=arm1176jzf-s:armv6; \
+ mcpu=cortex-a8:armv7; \
+ mcpu=cortex-r4:armv7; \
+ mcpu=cortex-m3:armv7; \
+ :arm -force_cpusubtype_ALL}"
+
+#define DARWIN_MINVERSION_SPEC "3.0"
+
+/* Default cc1 option for specifying minimum version number. */
+#define DARWIN_CC1_MINVERSION_SPEC "-miphoneos-version-min=%(darwin_minversion)"
+
+/* Default ld option for specifying minimum version number. */
+#define DARWIN_LD_MINVERSION_SPEC "-iphoneos_version_min %(darwin_minversion)"
+
+/* Use iPhone OS version numbers by default. */
+#define DARWIN_DEFAULT_VERSION_TYPE DARWIN_VERSION_IPHONEOS
+
+#define DARWIN_IPHONEOS_LIBGCC_SPEC "-lgcc_s.1 -lgcc"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ DARWIN_EXTRA_SPECS \
+ { "darwin_arch", DARWIN_SUBARCH_SPEC }, \
+ { "darwin_subarch", DARWIN_SUBARCH_SPEC }
+
+/* This can go away once we can feature test the assembler correctly. */
+#define ASM_DEBUG_SPEC ""
+
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (1) \
+ { \
+ if (!darwin_macosx_version_min && !darwin_iphoneos_version_min) \
+ darwin_iphoneos_version_min = "3.0"; \
+ if (MACHO_DYNAMIC_NO_PIC_P) \
+ { \
+ if (flag_pic) \
+ warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC"); \
+ flag_pic = 0; \
+ } \
+ else if (flag_pic == 1) \
+ { \
+ /* Darwin doesn't support -fpic. */ \
+ warning (0, "-fpic is not supported; -fPIC assumed"); \
+ flag_pic = 2; \
+ } \
+ /* Remove when ld64 generates stubs for us. */ \
+ darwin_stubs = true; \
+ if (profile_flag) \
+ error ("function profiling not supported on this target"); \
+ /* Use -mlongcalls for kexts */ \
+ if (flag_mkernel || flag_apple_kext) \
+ target_flags |= MASK_LONG_CALLS; \
+ /* GCC 4.2+ only works with SDK 3.0+ */ \
+ if (darwin_iphoneos_version_min && \
+ strverscmp (darwin_iphoneos_version_min, "3.0") < 0) \
+ darwin_reserve_r9_on_v6 = 1; \
+ } \
+} while(0)
+
+/* APPLE LOCAL begin 5571707 Allow R9 as caller-saved register */
+#undef SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#define SUBTARGET_CONDITIONAL_REGISTER_USAGE \
+ arm_darwin_subtarget_conditional_register_usage();
+/* APPLE LOCAL end 5571707 Allow R9 as caller-saved register */
+
+#undef TARGET_MACHO
+#define TARGET_MACHO 1
+
+#undef DEFAULT_STRUCTURE_SIZE_BOUNDARY
+#define DEFAULT_STRUCTURE_SIZE_BOUNDARY 8
+
+#undef DOT_WORD
+#define DOT_WORD ".long"
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ asm_fprintf (STREAM, "\t" DOT_WORD "\t%LL%d\n", VALUE)
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ do { \
+ unsigned HOST_WIDE_INT _new_size = SIZE; \
+ fputs (".comm ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ if (_new_size == 0) _new_size = 1; \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", _new_size); \
+ } while (0)
+
+#undef SUBTARGET_ASM_DECLARE_FUNCTION_NAME
+#define SUBTARGET_ASM_DECLARE_FUNCTION_NAME ARM_DECLARE_FUNCTION_NAME
+
+/* APPLE LOCAL begin 6093388 -mfpu=neon default for v7a */
+/* We default to VFP for v6, NEON for v7 */
+#define FPUTYPE_DEFAULT (arm_arch7a ? FPUTYPE_NEON : FPUTYPE_VFP)
+
+#undef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ((arm_arch6 || arm_arch7a) ? ARM_FLOAT_ABI_SOFTFP : ARM_FLOAT_ABI_SOFT)
+/* APPLE LOCAL end 6093388 -mfpu=neon default for v7a */
+#undef REGISTER_TARGET_PRAGMAS
+#define REGISTER_TARGET_PRAGMAS DARWIN_REGISTER_TARGET_PRAGMAS
+
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR (1)
+
+#undef GLOBAL_ASM_OP
+#define GLOBAL_ASM_OP "\t.globl "
+
+#define TARGET_64BIT 0
+
+#define C_COMMON_OVERRIDE_OPTIONS do { \
+ SUBTARGET_C_COMMON_OVERRIDE_OPTIONS; \
+ } while (0)
+
+/* The xxxvfp comparisons return 0 or 1. */
+#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, OPERATOR) \
+ ((TARGET_MACHO && TARGET_THUMB && !TARGET_SOFT_FLOAT \
+ && (flag_pic || MACHO_DYNAMIC_NO_PIC_P) \
+ && (MODE == SFmode || MODE == DFmode)) ? 1 : 0)
+
+/* Adjust inlining parameters. */
+#undef SUBTARGET_OPTIMIZATION_OPTIONS
+#define SUBTARGET_OPTIMIZATION_OPTIONS \
+ if (optimize_size) \
+ { \
+ set_param_value ("max-inline-insns-single", 9); \
+ set_param_value ("max-inline-insns-auto", 4); \
+ set_param_value ("inline-call-cost", 2); \
+ }
+
+#undef ARM_DECLARE_FUNCTION_SIZE
+#define ARM_DECLARE_FUNCTION_SIZE(STREAM, NAME, DECL) \
+ if (!TARGET_LONG_CALLS || ! DECL_SECTION_NAME (DECL)) \
+ arm_encode_call_attribute (DECL, SYMBOL_SHORT_CALL)
+
+/* Remove limit for -Os */
+#undef MAX_CONDITIONAL_EXECUTE
+#define MAX_CONDITIONAL_EXECUTE (optimize_size ? INT_MAX : (BRANCH_COST + 1))
+
+#undef TARGET_IASM_OP_CONSTRAINT
+#define TARGET_IASM_OP_CONSTRAINT \
+ { "ldr", 2, "m" },
+
+#define OBJC_TARGET_FLAG_OBJC_ABI \
+ do { \
+ if (flag_objc_abi == -1) \
+ flag_objc_abi = 2; \
+ if (flag_objc_legacy_dispatch == -1) \
+ flag_objc_legacy_dispatch = 1; \
+ } while (0)
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/darwin.opt b/gcc-4.2.1-5666.3/gcc/config/arm/darwin.opt
new file mode 100644
index 000000000..de4a4eaca
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/darwin.opt
@@ -0,0 +1,9 @@
+; APPLE LOCAL file ARM Macintosh alignment
+malign-mac68k
+Target Report Mask(ALIGN_MAC68K) Var(darwin_alignment_flags)
+Align structs and unions according to mac68k rules
+
+malign-natural
+Target Report Mask(ALIGN_NATURAL) Var(darwin_alignment_flags)
+Align structs and unions according to natural rules
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/ecos-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/ecos-elf.h
new file mode 100644
index 000000000..22eefe497
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/ecos-elf.h
@@ -0,0 +1,28 @@
+/* Definitions for ecos based ARM systems using ELF
+ Copyright (C) 1998, 2001 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF Ecos)", stderr);
+
+#define HAS_INIT_SECTION
+
+#undef INVOKE_main
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/elf.h
new file mode 100644
index 000000000..36a76e533
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/elf.h
@@ -0,0 +1,159 @@
+/* Definitions of target machine for GNU compiler.
+ For ARM with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org> and
+ Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#ifndef OBJECT_FORMAT_ELF
+ #error elf.h included before elfos.h
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "-D__ELF__"
+#endif
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "subtarget_extra_asm_spec", SUBTARGET_EXTRA_ASM_SPEC }, \
+ { "subtarget_asm_float_spec", SUBTARGET_ASM_FLOAT_SPEC },
+#endif
+
+#ifndef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC ""
+#endif
+
+#ifndef SUBTARGET_ASM_FLOAT_SPEC
+#define SUBTARGET_ASM_FLOAT_SPEC "\
+%{mapcs-float:-mfloat}"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian:-EB} \
+%{mlittle-endian:-EL} \
+%{mcpu=*:-mcpu=%*} \
+%{march=*:-march=%*} \
+%{mapcs-*:-mapcs-%*} \
+%(subtarget_asm_float_spec) \
+%{mthumb-interwork:-mthumb-interwork} \
+%{msoft-float:-mfloat-abi=soft} %{mhard-float:-mfloat-abi=hard} \
+%{mfloat-abi=*} %{mfpu=*} \
+%(subtarget_extra_asm_spec)"
+#endif
+
+/* The ARM uses @ are a comment character so we need to redefine
+ TYPE_OPERAND_FMT. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+/* We might need a ARM specific header to function declarations. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ ARM_DECLARE_FUNCTION_NAME (FILE, NAME, DECL); \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ ARM_OUTPUT_FN_UNWIND (FILE, TRUE); \
+ } \
+ while (0)
+
+/* We might need an ARM specific trailer for function declarations. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ ARM_OUTPUT_FN_UNWIND (FILE, FALSE); \
+ ARM_DECLARE_FUNCTION_SIZE (FILE, FNAME, DECL); \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } \
+ while (0)
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+/* We put ARM and Thumb-2 jump tables in the text section, because it makes
+ the code more efficient, but for Thumb-1 it's better to put them out of
+ band. */
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* APPLE LOCAL begin ARM compact switch tables */
+/* The above is no longer true. */
+#define JUMP_TABLES_IN_TEXT_SECTION (TARGET_EITHER)
+/* APPLE LOCAL end ARM compact switch tables */
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
+#endif
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/elf)", stderr)
+#endif
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "marm", "mlittle-endian", "msoft-float", "mno-thumb-interwork", "fno-leading-underscore" }
+#endif
+
+#define TARGET_ASM_FILE_START_APP_OFF true
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+
+/* Output an element in the static constructor array. */
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR arm_elf_asm_constructor
+
+/* For PIC code we need to explicitly specify (PLT) and (GOT) relocs. */
+#define NEED_PLT_RELOC flag_pic
+#define NEED_GOT_RELOC flag_pic
+
+/* The ELF assembler handles GOT addressing differently to NetBSD. */
+#define GOT_PCREL 0
+
+/* Biggest alignment supported by the object file format of this
+ machine. Use this macro to limit the alignment which can be
+ specified using the `__attribute__ ((aligned (N)))' construct. If
+ not defined, the default value is `BIGGEST_ALIGNMENT'. */
+#define MAX_OFILE_ALIGNMENT (32768 * 8)
+
+/* Align output to a power of two. Note ".align 0" is redundant,
+ and also GAS will treat it as ".align 2" which we do not want. */
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ if ((POWER) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", POWER); \
+ } \
+ while (0)
+
+/* The EABI doesn't provide a way of implementing init_priority. */
+#define SUPPORTS_INIT_PRIORITY (!TARGET_AAPCS_BASED)
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/fpa.md b/gcc-4.2.1-5666.3/gcc/config/arm/fpa.md
new file mode 100644
index 000000000..f314ca291
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/fpa.md
@@ -0,0 +1,945 @@
+;;- Machine description for FPA co-processor for ARM cpus.
+;; Copyright 1991, 1993, 1994, 1995, 1996, 1996, 1997, 1998, 1999, 2000,
+;; 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; Some FPA mnemonics are ambiguous between conditional infixes and
+;; conditional suffixes. All instructions use a conditional infix,
+;; even in unified assembly mode.
+
+;; APPLE LOCAL end v7 support. Merge from mainline
+;; FPA automaton.
+(define_automaton "armfp")
+
+;; Floating point unit (FPA)
+(define_cpu_unit "fpa" "armfp")
+
+; The fpa10 doesn't really have a memory read unit, but it can start
+; to speculatively execute the instruction in the pipeline, provided
+; the data is already loaded, so pretend reads have a delay of 2 (and
+; that the pipeline is infinite).
+(define_cpu_unit "fpa_mem" "arm")
+
+(define_insn_reservation "fdivx" 71
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx"))
+ "core+fpa*69")
+
+(define_insn_reservation "fdivd" 59
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd"))
+ "core+fpa*57")
+
+(define_insn_reservation "fdivs" 31
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs"))
+ "core+fpa*29")
+
+(define_insn_reservation "fmul" 9
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul"))
+ "core+fpa*7")
+
+(define_insn_reservation "ffmul" 6
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul"))
+ "core+fpa*4")
+
+(define_insn_reservation "farith" 4
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith"))
+ "core+fpa*2")
+
+(define_insn_reservation "ffarith" 2
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith"))
+ "core+fpa*2")
+
+(define_insn_reservation "r_2_f" 5
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f"))
+ "core+fpa*3")
+
+(define_insn_reservation "f_2_r" 1
+ (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r"))
+ "core+fpa*2")
+
+(define_insn_reservation "f_load" 3
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load"))
+ "fpa_mem+core*3")
+
+(define_insn_reservation "f_store" 4
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store"))
+ "core*4")
+
+(define_insn_reservation "r_mem_f" 6
+ (and (eq_attr "model_wbuf" "no")
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")))
+ "core*6")
+
+(define_insn_reservation "f_mem_r" 7
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r"))
+ "core*7")
+
+
+(define_insn "*addsf3_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "%f,f")
+ (match_operand:SF 2 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*adddf3_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "%f,f")
+ (match_operand:DF 2 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*adddf_esfdf_df_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*adddf_df_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "adf%?d\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*adddf_esfdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "adf%?d\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*subsf3_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "arm_float_rhs_operand" "f,G")
+ (match_operand:SF 2 "arm_float_rhs_operand" "fG,f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+ [(set_attr "type" "farith")]
+)
+
+(define_insn "*subdf3_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "arm_float_rhs_operand" "f,G")
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG,f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*subdf_esfdf_df_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "suf%?d\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*subdf_df_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "arm_float_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*subdf_esfdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "suf%?d\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*mulsf3_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "fml%?s\\t%0, %1, %2"
+ [(set_attr "type" "ffmul")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*muldf3_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "muf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fmul")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*muldf_esfdf_df_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "muf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fmul")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*muldf_df_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "muf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fmul")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*muldf_esfdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "muf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fmul")
+ (set_attr "predicable" "yes")]
+)
+
+;; Division insns
+
+(define_insn "*divsf3_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "arm_float_rhs_operand" "f,G")
+ (match_operand:SF 2 "arm_float_rhs_operand" "fG,f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+ [(set_attr "type" "fdivs")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*divdf3_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "arm_float_rhs_operand" "f,G")
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG,f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*divdf_esfdf_df_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "dvf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*divdf_df_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "arm_float_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "rdf%?d\\t%0, %2, %1"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*divdf_esfdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "dvf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*modsf3_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "rmf%?s\\t%0, %1, %2"
+ [(set_attr "type" "fdivs")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*moddf3_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "rmf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*moddf_esfdf_df_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "arm_float_rhs_operand" "fG")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "rmf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*moddf_df_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "rmf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*moddf_esfdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "rmf%?d\\t%0, %1, %2"
+ [(set_attr "type" "fdivd")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*negsf2_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "mnf%?s\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*negdf2_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "mnf%?d\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*negdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "mnf%?d\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*abssf2_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "abs%?s\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*absdf2_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "abs%?d\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*absdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "abs%?d\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*sqrtsf2_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "sqt%?s\\t%0, %1"
+ [(set_attr "type" "float_em")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*sqrtdf2_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "sqt%?d\\t%0, %1"
+ [(set_attr "type" "float_em")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*sqrtdf_esfdf_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "sqt%?d\\t%0, %1"
+ [(set_attr "type" "float_em")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*floatsisf2_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "flt%?s\\t%0, %1"
+ [(set_attr "type" "r_2_f")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*floatsidf2_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "flt%?d\\t%0, %1"
+ [(set_attr "type" "r_2_f")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*fix_truncsfsi2_fpa"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "fix%?z\\t%0, %1"
+ [(set_attr "type" "f_2_r")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*fix_truncdfsi2_fpa"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "fix%?z\\t%0, %1"
+ [(set_attr "type" "f_2_r")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*truncdfsf2_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "mvf%?s\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*extendsfdf2_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "mvf%?d\\t%0, %1"
+ [(set_attr "type" "ffarith")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*movsf_fpa"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f, m,f,r,r,r, m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_ARM
+ && TARGET_HARD_FLOAT && TARGET_FPA
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "predicable" "yes")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load1,store1")
+ (set_attr "pool_range" "*,*,1024,*,*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,1012,*,*,*,*,4084,*")]
+)
+
+(define_insn "*movdf_fpa"
+ [(set (match_operand:DF 0 "nonimmediate_operand"
+ "=r,Q,r,m,r, f, f,f, m,!f,!r")
+ (match_operand:DF 1 "general_operand"
+ "Q, r,r,r,mF,fG,H,mF,f,r, f"))]
+ "TARGET_ARM
+ && TARGET_HARD_FLOAT && TARGET_FPA
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "*
+ {
+ switch (which_alternative)
+ {
+ default:
+ case 0: return \"ldm%(ia%)\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%(ia%)\\t%m0, %M1\\t%@ double\";
+ case 2: return \"#\";
+ case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpa_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpa (operands);
+ }
+ }
+ "
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "predicable" "yes")
+ (set_attr "type"
+ "load1,store2,*,store2,load1,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")
+ (set_attr "pool_range" "*,*,*,*,1020,*,*,1024,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,*,1008,*,*,1008,*,*,*")]
+)
+
+;; We treat XFmode as meaning 'internal format'. It's the right size and we
+;; don't use it for anything else. We only support moving between FPA
+;; registers and moving an FPA register to/from memory.
+(define_insn "*movxf_fpa"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,f,m")
+ (match_operand:XF 1 "general_operand" "f,m,f"))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_FPA
+ && (register_operand (operands[0], XFmode)
+ || register_operand (operands[1], XFmode))"
+ "*
+ switch (which_alternative)
+ {
+ default:
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
+ return \"ldf%?e\\t%0, %1\";
+ return \"lfm%?\\t%0, 1, %1\";
+ case 2: if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
+ return \"stf%?e\\t%1, %0\";
+ return \"sfm%?\\t%1, 1, %0\";
+ }
+ "
+ [(set_attr "length" "4,4,4")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "ffarith,f_load,f_store")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; stfs/ldfs always use a conditional infix. This works around the
+;; ambiguity between "stf pl s" and "sftp ls".
+(define_insn "*thumb2_movsf_fpa"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f, m,f,r,r,r, m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_THUMB2
+ && TARGET_HARD_FLOAT && TARGET_FPA
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1 @bar
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "ce_count" "1,1,1,1,2,2,1,1,1")
+ (set_attr "predicable" "yes")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load1,store1")
+ (set_attr "pool_range" "*,*,1024,*,*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,1012,*,*,*,*,0,*")]
+)
+
+;; Not predicable because we don't know the number of instructions.
+(define_insn "*thumb2_movdf_fpa"
+ [(set (match_operand:DF 0 "nonimmediate_operand"
+ "=r,Q,r,m,r, f, f,f, m,!f,!r")
+ (match_operand:DF 1 "general_operand"
+ "Q, r,r,r,mF,fG,H,mF,f,r, f"))]
+ "TARGET_THUMB2
+ && TARGET_HARD_FLOAT && TARGET_FPA
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ default:
+ case 0: return \"ldm%(ia%)\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%(ia%)\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpa_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpa (operands);
+ }
+ }
+ "
+ [(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+ "load1,store2,*,store2,load1,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")
+ (set_attr "pool_range" "*,*,*,*,4092,*,*,1024,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,*,0,*,*,1020,*,*,*")]
+)
+
+;; Saving and restoring the floating point registers in the prologue should
+;; be done in XFmode, even though we don't support that for anything else
+;; (Well, strictly it's 'internal representation', but that's effectively
+;; XFmode).
+;; Not predicable because we don't know the number of instructions.
+
+(define_insn "*thumb2_movxf_fpa"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_FPA && reload_completed"
+ "*
+ switch (which_alternative)
+ {
+ default:
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpa_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpa (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+ "
+ [(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")
+ (set_attr "pool_range" "*,*,1024,*,*,*,*")
+ (set_attr "neg_pool_range" "*,*,1004,*,*,*,*")]
+)
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+(define_insn "*cmpsf_fpa"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmpdf_fpa"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmpesfdf_df_fpa"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmpdf_esfdf_fpa"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "cmf%?\\t%0, %1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmpsf_trap_fpa"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmpdf_trap_fpa"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmp_esfdf_df_trap_fpa"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "arm_float_add_operand" "fG,H")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*cmp_df_esfdf_trap_fpa"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FPA"
+ "cmf%?e\\t%0, %1"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")]
+)
+
+(define_insn "*movsfcc_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "arm_float_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "arm_float_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")]
+)
+
+(define_insn "*movdfcc_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "arm_float_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "arm_float_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*thumb2_movsfcc_fpa"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "arm_float_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "arm_float_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ it\\t%D3\;mvf%D3s\\t%0, %2
+ it\\t%D3\;mnf%D3s\\t%0, #%N2
+ it\\t%d3\;mvf%d3s\\t%0, %1
+ it\\t%d3\;mnf%d3s\\t%0, #%N1
+ ite\\t%d3\;mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ ite\\t%d3\;mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ ite\\t%d3\;mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ ite\\t%d3\;mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "6,6,6,6,10,10,10,10")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")]
+)
+
+(define_insn "*thumb2_movdfcc_fpa"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "arm_float_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "arm_float_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_FPA"
+ "@
+ it\\t%D3\;mvf%D3d\\t%0, %2
+ it\\t%D3\;mnf%D3d\\t%0, #%N2
+ it\\t%d3\;mvf%d3d\\t%0, %1
+ it\\t%d3\;mnf%d3d\\t%0, #%N1
+ ite\\t%d3\;mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ ite\\t%d3\;mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ ite\\t%d3\;mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ ite\\t%d3\;mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "6,6,6,6,10,10,10,10")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")]
+)
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/freebsd.h b/gcc-4.2.1-5666.3/gcc/config/arm/freebsd.h
new file mode 100644
index 000000000..6bae83def
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/freebsd.h
@@ -0,0 +1,68 @@
+/* Definitions for StrongARM running FreeBSD using the ELF format
+ Copyright (C) 2001, 2004 Free Software Foundation, Inc.
+ Contributed by David E. O'Brien <obrien@FreeBSD.org> and BSDi.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "fbsd_dynamic_linker", FBSD_DYNAMIC_LINKER }
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC FBSD_CPP_SPEC
+
+#undef LINK_SPEC
+#define LINK_SPEC " \
+ %{p:%nconsider using `-pg' instead of `-p' with gprof(1) } \
+ %{v:-V} \
+ %{assert*} %{R*} %{rpath*} %{defsym*} \
+ %{shared:-Bshareable %{h*} %{soname*}} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker %(fbsd_dynamic_linker) }} \
+ %{static:-Bstatic}} \
+ %{symbolic:-Bsymbolic}"
+
+
+/************************[ Target stuff ]***********************************/
+
+/* Define the actual types of some ANSI-mandated types.
+ Needs to agree with <machine/ansi.h>. GCC defaults come from c-decl.c,
+ c-common.c, and config/<arch>/<arch>.h. */
+
+/* arm.h gets this wrong for FreeBSD. We use the GCC defaults instead. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+/* We use the GCC defaults here. */
+#undef WCHAR_TYPE
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_strongarm
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (FreeBSD/StrongARM ELF)");
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/gentune.sh b/gcc-4.2.1-5666.3/gcc/config/arm/gentune.sh
new file mode 100755
index 000000000..40c054110
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/gentune.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# Generate arm-tune.md, a file containing the tune attribute from the list of
+# CPUs in arm-cores.def
+
+echo ";; -*- buffer-read-only: t -*-"
+echo ";; Generated automatically by gentune.sh from arm-cores.def"
+
+allcores=`awk -F'[(, ]+' '/^ARM_CORE/ { cores = cores$3"," } END { print cores } ' $1`
+
+echo "(define_attr \"tune\""
+echo " \"$allcores\"" | sed -e 's/,"$/"/'
+echo " (const (symbol_ref \"arm_tune\")))"
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/host-arm-darwin.c b/gcc-4.2.1-5666.3/gcc/config/arm/host-arm-darwin.c
new file mode 100644
index 000000000..a74503af4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/host-arm-darwin.c
@@ -0,0 +1,33 @@
+/* APPLE LOCAL file ARM native compiler support */
+/* arm-darwin host-specific hook definitions.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "config/host-darwin.h"
+
+/* Darwin doesn't do anything special for arm hosts; this file exists just
+ to include config/host-darwin.h. */
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/hwdiv.md b/gcc-4.2.1-5666.3/gcc/config/arm/hwdiv.md
new file mode 100644
index 000000000..3b940f2f8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/hwdiv.md
@@ -0,0 +1,42 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM instruction patterns for hardware division
+;; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (div:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ "arm_arch_hwdiv"
+ "sdiv%?\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "insn" "sdiv")]
+)
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ "arm_arch_hwdiv"
+ "udiv%?\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "insn" "udiv")]
+)
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/ieee754-df.S b/gcc-4.2.1-5666.3/gcc/config/arm/ieee754-df.S
new file mode 100644
index 000000000..6df9fbef0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/ieee754-df.S
@@ -0,0 +1,1869 @@
+/* ieee754-df.S double-precision floating point support for ARM
+
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Nicolas Pitre (nico@cam.org)
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/*
+ * Notes:
+ *
+ * The goal of this code is to be as fast as possible. This is
+ * not meant to be easy to understand for the casual reader.
+ * For slightly simpler code please see the single precision version
+ * of this file.
+ *
+ * Only the default rounding mode is intended for best performances.
+ * Exceptions aren't supported yet, but that can be added quite easily
+ * if necessary without impacting performances.
+ */
+
+
+@ For FPA, float words are always big-endian.
+@ For VFP, floats words follow the memory system mode.
+#if defined(__VFP_FP__) && !defined(__ARMEB__)
+#define xl r0
+#define xh r1
+#define yl r2
+#define yh r3
+#else
+#define xh r0
+#define xl r1
+#define yh r2
+#define yl r3
+#endif
+
+
+#ifdef L_negdf2
+
+ARM_FUNC_START negdf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_dneg, negdf2)
+
+ @ flip sign bit
+ eor xh, xh, #0x80000000
+ RET
+
+ FUNC_END aeabi_dneg
+ FUNC_END negdf2
+
+#endif
+
+#ifdef L_addsubdf3
+
+ARM_FUNC_START aeabi_drsub
+
+ eor xh, xh, #0x80000000 @ flip sign bit of first arg
+ b 1f
+
+ARM_FUNC_START subdf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_dsub, subdf3)
+
+ eor yh, yh, #0x80000000 @ flip sign bit of second arg
+#if defined(__INTERWORKING_STUBS__)
+ b 1f @ Skip Thumb-code prologue
+#endif
+
+ARM_FUNC_START adddf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_dadd, adddf3)
+
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+1: do_push {r4, r5, lr}
+
+ @ Look for zeroes, equal values, INF, or NAN.
+ shift1 lsl, r4, xh, #1
+ shift1 lsl, r5, yh, #1
+ teq r4, r5
+ do_it eq
+ teqeq xl, yl
+ do_it ne, ttt
+ COND(orr,s,ne) ip, r4, xl
+ COND(orr,s,ne) ip, r5, yl
+ COND(mvn,s,ne) ip, r4, asr #21
+ COND(mvn,s,ne) ip, r5, asr #21
+ beq LSYM(Lad_s)
+
+ @ Compute exponent difference. Make largest exponent in r4,
+ @ corresponding arg in xh-xl, and positive exponent difference in r5.
+ shift1 lsr, r4, r4, #21
+ rsbs r5, r4, r5, lsr #21
+ do_it lt
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ rsblt r5, r5, #0
+ ble 1f
+ add r4, r4, r5
+ eor yl, xl, yl
+ eor yh, xh, yh
+ eor xl, yl, xl
+ eor xh, yh, xh
+ eor yl, xl, yl
+ eor yh, xh, yh
+1:
+ @ If exponent difference is too large, return largest argument
+ @ already in xh-xl. We need up to 54 bit to handle proper rounding
+ @ of 0x1p54 - 1.1.
+ cmp r5, #54
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it hi
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(hi, r4, r5)
+
+ @ Convert mantissa to signed integer.
+ tst xh, #0x80000000
+ mov xh, xh, lsl #12
+ mov ip, #0x00100000
+ orr xh, ip, xh, lsr #12
+ beq 1f
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ negs xl, xl
+ sbc xh, xh, xh, lsl #1
+#else
+ rsbs xl, xl, #0
+ rsc xh, xh, #0
+#endif
+1:
+ tst yh, #0x80000000
+ mov yh, yh, lsl #12
+ orr yh, ip, yh, lsr #12
+ beq 1f
+#if defined(__thumb2__)
+ negs yl, yl
+ sbc yh, yh, yh, lsl #1
+#else
+ rsbs yl, yl, #0
+ rsc yh, yh, #0
+#endif
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+1:
+ @ If exponent == difference, one or both args were denormalized.
+ @ Since this is not common case, rescale them off line.
+ teq r4, r5
+ beq LSYM(Lad_d)
+LSYM(Lad_x):
+
+ @ Compensate for the exponent overlapping the mantissa MSB added later
+ sub r4, r4, #1
+
+ @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
+ rsbs lr, r5, #32
+ blt 1f
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsl, ip, yl, lr
+ shiftop adds, xl, xl, yl, lsr, r5, yl
+ adc xh, xh, #0
+ shiftop adds, xl, xl, yh, lsl, lr, yl
+ shiftop adcs, xh, xh, yh, asr, r5, yh
+ b 2f
+1: sub r5, r5, #32
+ add lr, lr, #32
+ cmp yl, #1
+ shift1 lsl,ip, yh, lr
+ do_it cs
+ orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later
+ shiftop adds, xl, xl, yh, asr, r5, yh
+ adcs xh, xh, yh, asr #31
+2:
+ @ We now have a result in xh-xl-ip.
+ @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
+ and r5, xh, #0x80000000
+ bpl LSYM(Lad_p)
+#if defined(__thumb2__)
+ mov lr, #0
+ negs ip, ip
+ sbcs xl, lr, xl
+ sbc xh, lr, xh
+#else
+ rsbs ip, ip, #0
+ rscs xl, xl, #0
+ rsc xh, xh, #0
+#endif
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ @ Determine how to normalize the result.
+LSYM(Lad_p):
+ cmp xh, #0x00100000
+ bcc LSYM(Lad_a)
+ cmp xh, #0x00200000
+ bcc LSYM(Lad_e)
+
+ @ Result needs to be shifted right.
+ movs xh, xh, lsr #1
+ movs xl, xl, rrx
+ mov ip, ip, rrx
+ add r4, r4, #1
+
+ @ Make sure we did not bust our exponent.
+ mov r2, r4, lsl #21
+ cmn r2, #(2 << 21)
+ bcs LSYM(Lad_o)
+
+ @ Our result is now properly aligned into xh-xl, remaining bits in ip.
+ @ Round with MSB of ip. If halfway between two numbers, round towards
+ @ LSB of xl = 0.
+ @ Pack final result together.
+LSYM(Lad_e):
+ cmp ip, #0x80000000
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq
+ COND(mov,s,eq) ip, xl, lsr #1
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ adcs xl, xl, #0
+ adc xh, xh, r4, lsl #20
+ orr xh, xh, r5
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+ @ Result must be shifted left and exponent adjusted.
+LSYM(Lad_a):
+ movs ip, ip, lsl #1
+ adcs xl, xl, xl
+ adc xh, xh, xh
+ tst xh, #0x00100000
+ sub r4, r4, #1
+ bne LSYM(Lad_e)
+
+ @ No rounding necessary since ip will always be 0 at this point.
+LSYM(Lad_l):
+
+#if __ARM_ARCH__ < 5
+
+ teq xh, #0
+ movne r3, #20
+ moveq r3, #52
+ moveq xh, xl
+ moveq xl, #0
+ mov r2, xh
+ cmp r2, #(1 << 16)
+ movhs r2, r2, lsr #16
+ subhs r3, r3, #16
+ cmp r2, #(1 << 8)
+ movhs r2, r2, lsr #8
+ subhs r3, r3, #8
+ cmp r2, #(1 << 4)
+ movhs r2, r2, lsr #4
+ subhs r3, r3, #4
+ cmp r2, #(1 << 2)
+ subhs r3, r3, #2
+ sublo r3, r3, r2, lsr #1
+ sub r3, r3, r2, lsr #3
+
+#else
+
+ teq xh, #0
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, t
+ moveq xh, xl
+ moveq xl, #0
+ clz r3, xh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ addeq r3, r3, #32
+ sub r3, r3, #11
+
+#endif
+
+ @ determine how to shift the value.
+ subs r2, r3, #32
+ bge 2f
+ adds r2, r2, #12
+ ble 1f
+
+ @ shift value left 21 to 31 bits, or actually right 11 to 1 bits
+ @ since a register switch happened above.
+ add ip, r2, #20
+ rsb r2, r2, #12
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsl, xl, xh, ip
+ shift1 lsr, xh, xh, r2
+ b 3f
+
+ @ actually shift value left 1 to 20 bits, which might also represent
+ @ 32 to 52 bits if counting the register switch that happened earlier.
+1: add r2, r2, #20
+2: do_it le
+ rsble ip, r2, #32
+ shift1 lsl, xh, xh, r2
+#if defined(__thumb2__)
+ lsr ip, xl, ip
+ itt le
+ orrle xh, xh, ip
+ lslle xl, xl, r2
+#else
+ orrle xh, xh, xl, lsr ip
+ movle xl, xl, lsl r2
+#endif
+
+ @ adjust exponent accordingly.
+3: subs r4, r4, r3
+ do_it ge, tt
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ addge xh, xh, r4, lsl #20
+ orrge xh, xh, r5
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(ge, r4, r5)
+
+ @ Exponent too small, denormalize result.
+ @ Find out proper shift value.
+ mvn r4, r4
+ subs r4, r4, #31
+ bge 2f
+ adds r4, r4, #12
+ bgt 1f
+
+ @ shift result right of 1 to 20 bits, sign is in r5.
+ add r4, r4, #20
+ rsb r2, r4, #32
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsr, xl, xl, r4
+ shiftop orr, xl, xl, xh, lsl, r2, yh
+ shiftop orr, xh, r5, xh, lsr, r4, yh
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+ @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
+ @ a register switch from xh to xl.
+1: rsb r4, r4, #12
+ rsb r2, r4, #32
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsr, xl, xl, r2
+ shiftop orr, xl, xl, xh, lsl, r4, yh
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ mov xh, r5
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+ @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
+ @ from xh to xl.
+ /* APPLE LOCAL v7 support. Merge from mainline */
+2: shift1 lsr, xl, xh, r4
+ mov xh, r5
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+ @ Adjust exponents for denormalized arguments.
+ @ Note that r4 must not remain equal to 0.
+LSYM(Lad_d):
+ teq r4, #0
+ eor yh, yh, #0x00100000
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, te
+ eoreq xh, xh, #0x00100000
+ addeq r4, r4, #1
+ subne r5, r5, #1
+ b LSYM(Lad_x)
+
+
+LSYM(Lad_s):
+ mvns ip, r4, asr #21
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(mvn,s,ne) ip, r5, asr #21
+ beq LSYM(Lad_i)
+
+ teq r4, r5
+ do_it eq
+ teqeq xl, yl
+ beq 1f
+
+ @ Result is x + 0.0 = x or 0.0 + y = y.
+ orrs ip, r4, xl
+ do_it eq, t
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ moveq xh, yh
+ moveq xl, yl
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+1: teq xh, yh
+
+ @ Result is x - x = 0.
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne, tt
+ movne xh, #0
+ movne xl, #0
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(ne, r4, r5)
+
+ @ Result is x + x = 2x.
+ movs ip, r4, lsr #21
+ bne 2f
+ movs xl, xl, lsl #1
+ adcs xh, xh, xh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs
+ orrcs xh, xh, #0x80000000
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+2: adds r4, r4, #(2 << 21)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, t
+ addcc xh, xh, #(1 << 20)
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(cc, r4, r5)
+ and r5, xh, #0x80000000
+
+ @ Overflow: return INF.
+LSYM(Lad_o):
+ orr xh, r5, #0x7f000000
+ orr xh, xh, #0x00f00000
+ mov xl, #0
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+ @ At least one of x or y is INF/NAN.
+ @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
+ @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
+ @ if either is NAN: return NAN
+ @ if opposite sign: return NAN
+ @ otherwise return xh-xl (which is INF or -INF)
+LSYM(Lad_i):
+ mvns ip, r4, asr #21
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, te
+ movne xh, yh
+ movne xl, yl
+ COND(mvn,s,eq) ip, r5, asr #21
+ do_it ne, t
+ movne yh, xh
+ movne yl, xl
+ orrs r4, xl, xh, lsl #12
+ do_it eq, te
+ COND(orr,s,eq) r5, yl, yh, lsl #12
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ teqeq xh, yh
+ orrne xh, xh, #0x00080000 @ quiet NAN
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5)
+
+ FUNC_END aeabi_dsub
+ FUNC_END subdf3
+ FUNC_END aeabi_dadd
+ FUNC_END adddf3
+
+ARM_FUNC_START floatunsidf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_ui2d,floatunsidf)
+
+ teq r0, #0
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, t
+ moveq r1, #0
+ RETc(eq)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r4, r5, lr}
+ mov r4, #0x400 @ initial exponent
+ add r4, r4, #(52-1 - 1)
+ mov r5, #0 @ sign bit is 0
+ /* APPLE LOCAL begin ARM MACH assembler */
+#if !defined(__VFP_FP__) || defined(__ARMEB__)
+ mov xl, r0
+#endif
+ /* APPLE LOCAL end ARM MACH assembler */
+ mov xh, #0
+ b LSYM(Lad_l)
+
+ FUNC_END aeabi_ui2d
+ FUNC_END floatunsidf
+
+ARM_FUNC_START floatsidf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_i2d,floatsidf)
+
+ teq r0, #0
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, t
+ moveq r1, #0
+ RETc(eq)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r4, r5, lr}
+ mov r4, #0x400 @ initial exponent
+ add r4, r4, #(52-1 - 1)
+ ands r5, r0, #0x80000000 @ sign bit in r5
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it mi
+ rsbmi r0, r0, #0 @ absolute value
+ /* APPLE LOCAL begin ARM MACH assembler */
+#if !defined(__VFP_FP__) || defined(__ARMEB__)
+ mov xl, r0
+#endif
+ /* APPLE LOCAL end ARM MACH assembler */
+ mov xh, #0
+ b LSYM(Lad_l)
+
+ FUNC_END aeabi_i2d
+ FUNC_END floatsidf
+
+ARM_FUNC_START extendsfdf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_f2d,extendsfdf2)
+
+ movs r2, r0, lsl #1 @ toss sign bit
+ mov xh, r2, asr #3 @ stretch exponent
+ mov xh, xh, rrx @ retrieve sign bit
+ mov xl, r2, lsl #28 @ retrieve remaining bits
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, ttt
+ COND(and,s,ne) r3, r2, #0xff000000 @ isolate exponent
+ teqne r3, #0xff000000 @ if not 0, check if INF or NAN
+ eorne xh, xh, #0x38000000 @ fixup exponent otherwise.
+ RETc(ne) @ and return it.
+
+ teq r2, #0 @ if actually 0
+ do_it ne, e
+ teqne r3, #0xff000000 @ or INF or NAN
+ RETc(eq) @ we are done already.
+
+ @ value was denormalized. We can normalize it now.
+ do_push {r4, r5, lr}
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ mov r4, #0x380 @ setup corresponding exponent
+ and r5, xh, #0x80000000 @ move sign bit in r5
+ bic xh, xh, #0x80000000
+ b LSYM(Lad_l)
+
+ FUNC_END aeabi_f2d
+ FUNC_END extendsfdf2
+
+ARM_FUNC_START floatundidf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_ul2d,floatundidf)
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq, t
+ mvfeqd f0, #0.0
+#else
+ do_it eq
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+#endif
+ RETc(eq)
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ @ For hard FPA code we want to return via the tail below so that
+ @ we can return the result in f0 as well as in r0/r1 for backwards
+ @ compatibility.
+ adr ip, LSYM(f0_ret)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r4, r5, ip, lr}
+#else
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r4, r5, lr}
+#endif
+
+ mov r5, #0
+ b 2f
+
+ARM_FUNC_START floatdidf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_l2d,floatdidf)
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq, t
+ mvfeqd f0, #0.0
+#else
+ do_it eq
+#endif
+ RETc(eq)
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ @ For hard FPA code we want to return via the tail below so that
+ @ we can return the result in f0 as well as in r0/r1 for backwards
+ @ compatibility.
+ adr ip, LSYM(f0_ret)
+ do_push {r4, r5, ip, lr}
+#else
+ do_push {r4, r5, lr}
+#endif
+
+ ands r5, ah, #0x80000000 @ sign bit in r5
+ bpl 2f
+#if defined(__thumb2__)
+ negs al, al
+ sbc ah, ah, ah, lsl #1
+#else
+ rsbs al, al, #0
+ rsc ah, ah, #0
+#endif
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+2:
+ mov r4, #0x400 @ initial exponent
+ add r4, r4, #(52-1 - 1)
+
+ @ FPA little-endian: must swap the word order.
+ /* APPLE LOCAL begin ARM MACH assembler */
+#if !defined(__VFP_FP__) && !defined(__ARMEB__)
+ mov ip, al
+ mov xh, ah
+ mov xl, ip
+#endif
+ /* APPLE LOCAL end ARM MACH assembler */
+
+ movs ip, xh, lsr #22
+ beq LSYM(Lad_p)
+
+ @ The value is too big. Scale it down a bit...
+ mov r2, #3
+ movs ip, ip, lsr #3
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ addne r2, r2, #3
+ movs ip, ip, lsr #3
+ do_it ne
+ addne r2, r2, #3
+ add r2, r2, ip, lsr #3
+
+ rsb r3, r2, #32
+ shift1 lsl, ip, xl, r3
+ shift1 lsr, xl, xl, r2
+ shiftop orr, xl, xl, xh, lsl, r3, lr
+ shift1 lsr, xh, xh, r2
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ add r4, r4, r2
+ b LSYM(Lad_p)
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+
+ @ Legacy code expects the result to be returned in f0. Copy it
+ @ there as well.
+LSYM(f0_ret):
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r0, r1}
+ ldfd f0, [sp], #8
+ RETLDM
+
+#endif
+
+ FUNC_END floatdidf
+ FUNC_END aeabi_l2d
+ FUNC_END floatundidf
+ FUNC_END aeabi_ul2d
+
+#endif /* L_addsubdf3 */
+
+#ifdef L_muldivdf3
+
+ARM_FUNC_START muldf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_dmul,muldf3)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r4, r5, r6, lr}
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ orr ip, ip, #0x700
+ ands r4, ip, xh, lsr #20
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, tte
+ COND(and,s,ne) r5, ip, yh, lsr #20
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ teqne r4, ip
+ teqne r5, ip
+ bleq LSYM(Lml_s)
+
+ @ Add exponents together
+ add r4, r4, r5
+
+ @ Determine final sign.
+ eor r6, xh, yh
+
+ @ Convert mantissa to unsigned integer.
+ @ If power of two, branch to a separate path.
+ bic xh, xh, ip, lsl #21
+ bic yh, yh, ip, lsl #21
+ orrs r5, xl, xh, lsl #12
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(orr,s,ne) r5, yl, yh, lsl #12
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ orr xh, xh, #0x00100000
+ orr yh, yh, #0x00100000
+ beq LSYM(Lml_1)
+
+#if __ARM_ARCH__ < 4
+
+ @ Put sign bit in r6, which will be restored in yl later.
+ and r6, r6, #0x80000000
+
+ @ Well, no way to make it shorter without the umull instruction.
+ stmfd sp!, {r6, r7, r8, r9, sl, fp}
+ mov r7, xl, lsr #16
+ mov r8, yl, lsr #16
+ mov r9, xh, lsr #16
+ mov sl, yh, lsr #16
+ bic xl, xl, r7, lsl #16
+ bic yl, yl, r8, lsl #16
+ bic xh, xh, r9, lsl #16
+ bic yh, yh, sl, lsl #16
+ mul ip, xl, yl
+ mul fp, xl, r8
+ mov lr, #0
+ adds ip, ip, fp, lsl #16
+ adc lr, lr, fp, lsr #16
+ mul fp, r7, yl
+ adds ip, ip, fp, lsl #16
+ adc lr, lr, fp, lsr #16
+ mul fp, xl, sl
+ mov r5, #0
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, r7, yh
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, xh, r8
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, r9, yl
+ adds lr, lr, fp, lsl #16
+ adc r5, r5, fp, lsr #16
+ mul fp, xh, sl
+ mul r6, r9, sl
+ adds r5, r5, fp, lsl #16
+ adc r6, r6, fp, lsr #16
+ mul fp, r9, yh
+ adds r5, r5, fp, lsl #16
+ adc r6, r6, fp, lsr #16
+ mul fp, xl, yh
+ adds lr, lr, fp
+ mul fp, r7, sl
+ adcs r5, r5, fp
+ mul fp, xh, yl
+ adc r6, r6, #0
+ adds lr, lr, fp
+ mul fp, r9, r8
+ adcs r5, r5, fp
+ mul fp, r7, r8
+ adc r6, r6, #0
+ adds lr, lr, fp
+ mul fp, xh, yh
+ adcs r5, r5, fp
+ adc r6, r6, #0
+ ldmfd sp!, {yl, r7, r8, r9, sl, fp}
+
+#else
+
+ @ Here is the actual multiplication.
+ umull ip, lr, xl, yl
+ mov r5, #0
+ umlal lr, r5, xh, yl
+ and yl, r6, #0x80000000
+ umlal lr, r5, xl, yh
+ mov r6, #0
+ umlal r5, r6, xh, yh
+
+#endif
+
+ @ The LSBs in ip are only significant for the final rounding.
+ @ Fold them into lr.
+ teq ip, #0
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne
+ orrne lr, lr, #1
+
+ @ Adjust result upon the MSB position.
+ sub r4, r4, #0xff
+ cmp r6, #(1 << (20-11))
+ sbc r4, r4, #0x300
+ bcs 1f
+ movs lr, lr, lsl #1
+ adcs r5, r5, r5
+ adc r6, r6, r6
+1:
+ @ Shift to final position, add sign to result.
+ orr xh, yl, r6, lsl #11
+ orr xh, xh, r5, lsr #21
+ mov xl, r5, lsl #11
+ orr xl, xl, lr, lsr #21
+ mov lr, lr, lsl #11
+
+ @ Check exponent range for under/overflow.
+ subs ip, r4, #(254 - 1)
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it hi
+ cmphi ip, #0x700
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ cmp lr, #0x80000000
+ do_it eq
+ COND(mov,s,eq) lr, xl, lsr #1
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ adcs xl, xl, #0
+ adc xh, xh, r4, lsl #20
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ @ Multiplication by 0x1p*: let''s shortcut a lot of code.
+LSYM(Lml_1):
+ and r6, r6, #0x80000000
+ orr xh, r6, xh
+ orr xl, xl, yl
+ eor xh, xh, yh
+ subs r4, r4, ip, lsr #1
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it gt, tt
+ COND(rsb,s,gt) r5, r4, ip
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ orrgt xh, xh, r4, lsl #20
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(gt, r4, r5, r6)
+
+ @ Under/overflow: fix things up for the code below.
+ orr xh, xh, #0x00100000
+ mov lr, #0
+ subs r4, r4, #1
+
+LSYM(Lml_u):
+ @ Overflow?
+ bgt LSYM(Lml_o)
+
+ @ Check if denormalized result is possible, otherwise return signed 0.
+ cmn r4, #(53 + 1)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it le, tt
+ movle xl, #0
+ bicle xh, xh, #0x7fffffff
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(le, r4, r5, r6)
+
+ @ Find out proper shift value.
+ rsb r4, r4, #0
+ subs r4, r4, #32
+ bge 2f
+ adds r4, r4, #12
+ bgt 1f
+
+ @ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
+ add r4, r4, #20
+ rsb r5, r4, #32
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsl, r3, xl, r5
+ shift1 lsr, xl, xl, r4
+ shiftop orr, xl, xl, xh, lsl, r5, r2
+ and r2, xh, #0x80000000
+ bic xh, xh, #0x80000000
+ adds xl, xl, r3, lsr #31
+ shiftop adc, xh, r2, xh, lsr, r4, r6
+ orrs lr, lr, r3, lsl #1
+ do_it eq
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ biceq xl, xl, r3, lsr #31
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ @ shift result right of 21 to 31 bits, or left 11 to 1 bits after
+ @ a register switch from xh to xl. Then round.
+1: rsb r4, r4, #12
+ rsb r5, r4, #32
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsl, r3, xl, r4
+ shift1 lsr, xl, xl, r5
+ shiftop orr, xl, xl, xh, lsl, r4, r2
+ bic xh, xh, #0x7fffffff
+ adds xl, xl, r3, lsr #31
+ adc xh, xh, #0
+ orrs lr, lr, r3, lsl #1
+ do_it eq
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ biceq xl, xl, r3, lsr #31
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
+ @ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
+2: rsb r5, r4, #32
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shiftop orr, lr, lr, xl, lsl, r5, r2
+ shift1 lsr, r3, xl, r4
+ shiftop orr, r3, r3, xh, lsl, r5, r2
+ shift1 lsr, xl, xh, r4
+ bic xh, xh, #0x7fffffff
+ shiftop bic, xl, xl, xh, lsr, r4, r2
+ add xl, xl, r3, lsr #31
+ orrs lr, lr, r3, lsl #1
+ do_it eq
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ biceq xl, xl, r3, lsr #31
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ @ One or both arguments are denormalized.
+ @ Scale them leftwards and preserve sign bit.
+LSYM(Lml_d):
+ teq r4, #0
+ bne 2f
+ and r6, xh, #0x80000000
+1: movs xl, xl, lsl #1
+ adc xh, xh, xh
+ tst xh, #0x00100000
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ subeq r4, r4, #1
+ beq 1b
+ orr xh, xh, r6
+ teq r5, #0
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne
+ movne pc, lr
+2: and r6, yh, #0x80000000
+3: movs yl, yl, lsl #1
+ adc yh, yh, yh
+ tst yh, #0x00100000
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ subeq r5, r5, #1
+ beq 3b
+ orr yh, yh, r6
+ mov pc, lr
+
+LSYM(Lml_s):
+ @ Isolate the INF and NAN cases away
+ teq r4, ip
+ and r5, ip, yh, lsr #20
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne
+ teqne r5, ip
+ beq 1f
+
+ @ Here, one or more arguments are either denormalized or zero.
+ orrs r6, xl, xh, lsl #1
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(orr,s,ne) r6, yl, yh, lsl #1
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ bne LSYM(Lml_d)
+
+ @ Result is 0, but determine sign anyway.
+LSYM(Lml_z):
+ eor xh, xh, yh
+ bic xh, xh, #0x7fffffff
+ mov xl, #0
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+1: @ One or both args are INF or NAN.
+ orrs r6, xl, xh, lsl #1
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, te
+ moveq xl, yl
+ moveq xh, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ COND(orr,s,ne) r6, yl, yh, lsl #1
+ beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
+ teq r4, ip
+ bne 1f
+ orrs r6, xl, xh, lsl #12
+ bne LSYM(Lml_n) @ NAN * <anything> -> NAN
+1: teq r5, ip
+ bne LSYM(Lml_i)
+ orrs r6, yl, yh, lsl #12
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne, t
+ movne xl, yl
+ movne xh, yh
+ bne LSYM(Lml_n) @ <anything> * NAN -> NAN
+
+ @ Result is INF, but we need to determine its sign.
+LSYM(Lml_i):
+ eor xh, xh, yh
+
+ @ Overflow: return INF (sign already in xh).
+LSYM(Lml_o):
+ and xh, xh, #0x80000000
+ orr xh, xh, #0x7f000000
+ orr xh, xh, #0x00f00000
+ mov xl, #0
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ @ Return a quiet NAN.
+LSYM(Lml_n):
+ orr xh, xh, #0x7f000000
+ orr xh, xh, #0x00f80000
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ FUNC_END aeabi_dmul
+ FUNC_END muldf3
+
+ARM_FUNC_START divdf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_ddiv,divdf3)
+
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r4, r5, r6, lr}
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ orr ip, ip, #0x700
+ ands r4, ip, xh, lsr #20
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, tte
+ COND(and,s,ne) r5, ip, yh, lsr #20
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ teqne r4, ip
+ teqne r5, ip
+ bleq LSYM(Ldv_s)
+
+ @ Substract divisor exponent from dividend''s.
+ sub r4, r4, r5
+
+ @ Preserve final sign into lr.
+ eor lr, xh, yh
+
+ @ Convert mantissa to unsigned integer.
+ @ Dividend -> r5-r6, divisor -> yh-yl.
+ orrs r5, yl, yh, lsl #12
+ mov xh, xh, lsl #12
+ beq LSYM(Ldv_1)
+ mov yh, yh, lsl #12
+ mov r5, #0x10000000
+ orr yh, r5, yh, lsr #4
+ orr yh, yh, yl, lsr #24
+ mov yl, yl, lsl #8
+ orr r5, r5, xh, lsr #4
+ orr r5, r5, xl, lsr #24
+ mov r6, xl, lsl #8
+
+ @ Initialize xh with final sign bit.
+ and xh, lr, #0x80000000
+
+ @ Ensure result will land to known bit position.
+ @ Apply exponent bias accordingly.
+ cmp r5, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ cmpeq r6, yl
+ adc r4, r4, #(255 - 2)
+ add r4, r4, #0x300
+ bcs 1f
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+1:
+ @ Perform first substraction to align result to a nibble.
+ subs r6, r6, yl
+ sbc r5, r5, yh
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ mov xl, #0x00100000
+ mov ip, #0x00080000
+
+ @ The actual division loop.
+1: subs lr, r6, yl
+ sbcs lr, r5, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ subs lr, r6, yl
+ sbcs lr, r5, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip, lsr #1
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ subs lr, r6, yl
+ sbcs lr, r5, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip, lsr #2
+ movs yh, yh, lsr #1
+ mov yl, yl, rrx
+ subs lr, r6, yl
+ sbcs lr, r5, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs, tt
+ subcs r6, r6, yl
+ movcs r5, lr
+ orrcs xl, xl, ip, lsr #3
+
+ orrs lr, r5, r6
+ beq 2f
+ mov r5, r5, lsl #4
+ orr r5, r5, r6, lsr #28
+ mov r6, r6, lsl #4
+ mov yh, yh, lsl #3
+ orr yh, yh, yl, lsr #29
+ mov yl, yl, lsl #3
+ movs ip, ip, lsr #4
+ bne 1b
+
+ @ We are done with a word of the result.
+ @ Loop again for the low word if this pass was for the high word.
+ tst xh, #0x00100000
+ bne 3f
+ orr xh, xh, xl
+ mov xl, #0
+ mov ip, #0x80000000
+ b 1b
+2:
+ @ Be sure result starts in the high word.
+ tst xh, #0x00100000
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, t
+ orreq xh, xh, xl
+ moveq xl, #0
+3:
+ @ Check exponent range for under/overflow.
+ subs ip, r4, #(254 - 1)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it hi
+ cmphi ip, #0x700
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ subs ip, r5, yh
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq, t
+ COND(sub,s,eq) ip, r6, yl
+ COND(mov,s,eq) ip, xl, lsr #1
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ adcs xl, xl, #0
+ adc xh, xh, r4, lsl #20
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r4, r5, r6)
+
+ @ Division by 0x1p*: shortcut a lot of code.
+LSYM(Ldv_1):
+ and lr, lr, #0x80000000
+ orr xh, lr, xh, lsr #12
+ adds r4, r4, ip, lsr #1
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it gt, tt
+ COND(rsb,s,gt) r5, r4, ip
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ orrgt xh, xh, r4, lsl #20
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM2(gt, r4, r5, r6)
+
+ orr xh, xh, #0x00100000
+ mov lr, #0
+ subs r4, r4, #1
+ b LSYM(Lml_u)
+
+ @ Result mightt need to be denormalized: put remainder bits
+ @ in lr for rounding considerations.
+LSYM(Ldv_u):
+ orr lr, r5, r6
+ b LSYM(Lml_u)
+
+ @ One or both arguments is either INF, NAN or zero.
+LSYM(Ldv_s):
+ and r5, ip, yh, lsr #20
+ teq r4, ip
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ teqeq r5, ip
+ beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN
+ teq r4, ip
+ bne 1f
+ orrs r4, xl, xh, lsl #12
+ bne LSYM(Lml_n) @ NAN / <anything> -> NAN
+ teq r5, ip
+ bne LSYM(Lml_i) @ INF / <anything> -> INF
+ mov xl, yl
+ mov xh, yh
+ b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
+1: teq r5, ip
+ bne 2f
+ orrs r5, yl, yh, lsl #12
+ beq LSYM(Lml_z) @ <anything> / INF -> 0
+ mov xl, yl
+ mov xh, yh
+ b LSYM(Lml_n) @ <anything> / NAN -> NAN
+2: @ If both are nonzero, we need to normalize and resume above.
+ orrs r6, xl, xh, lsl #1
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(orr,s,ne) r6, yl, yh, lsl #1
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ bne LSYM(Lml_d)
+ @ One or both arguments are 0.
+ orrs r4, xl, xh, lsl #1
+ bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
+ orrs r5, yl, yh, lsl #1
+ bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
+ b LSYM(Lml_n) @ 0 / 0 -> NAN
+
+ FUNC_END aeabi_ddiv
+ FUNC_END divdf3
+
+#endif /* L_muldivdf3 */
+
+#ifdef L_cmpdf2
+
+@ Note: only r0 (return value) and ip are clobbered here.
+
+ARM_FUNC_START gtdf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(gedf2,gtdf2)
+ mov ip, #-1
+ b 1f
+
+ARM_FUNC_START ltdf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(ledf2,ltdf2)
+ mov ip, #1
+ b 1f
+
+ARM_FUNC_START cmpdf2
+/* APPLE LOCAL begin ARM MACH assembler */
+ARM_FUNC_ALIAS(nedf2,cmpdf2)
+ARM_FUNC_ALIAS(eqdf2,cmpdf2)
+/* APPLE LOCAL end ARM MACH assembler */
+ mov ip, #1 @ how should we specify unordered here?
+
+1: str ip, [sp, #-4]
+
+ @ Trap any INF/NAN first.
+ mov ip, xh, lsl #1
+ mvns ip, ip, asr #21
+ mov ip, yh, lsl #1
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(mvn,s,ne) ip, ip, asr #21
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ beq 3f
+
+ @ Test for equality.
+ @ Note that 0.0 is equal to -0.0.
+2: orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq, e
+ COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
+ teqne xh, yh @ or xh == yh
+ do_it eq, tt
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ teqeq xl, yl @ and xl == yl
+ moveq r0, #0 @ then equal.
+ RETc(eq)
+
+ @ Clear C flag
+ cmn r0, #0
+
+ @ Compare sign,
+ teq xh, yh
+
+ @ Compare values if same sign
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it pl
+ cmppl xh, yh
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ cmpeq xl, yl
+
+ @ Result:
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs, e
+ movcs r0, yh, asr #31
+ mvncc r0, yh, asr #31
+ orr r0, r0, #1
+ RET
+
+ @ Look for a NAN.
+3: mov ip, xh, lsl #1
+ mvns ip, ip, asr #21
+ bne 4f
+ orrs ip, xl, xh, lsl #12
+ bne 5f @ x is NAN
+4: mov ip, yh, lsl #1
+ mvns ip, ip, asr #21
+ bne 2b
+ orrs ip, yl, yh, lsl #12
+ beq 2b @ y is not NAN
+5: ldr r0, [sp, #-4] @ unordered return code
+ RET
+
+ FUNC_END gedf2
+ FUNC_END gtdf2
+ FUNC_END ledf2
+ FUNC_END ltdf2
+ FUNC_END nedf2
+ FUNC_END eqdf2
+ FUNC_END cmpdf2
+
+ARM_FUNC_START aeabi_cdrcmple
+
+ mov ip, r0
+ mov r0, r2
+ mov r2, ip
+ mov ip, r1
+ mov r1, r3
+ mov r3, ip
+ b 6f
+
+ARM_FUNC_START aeabi_cdcmpeq
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_cdcmple,aeabi_cdcmpeq)
+
+ @ The status-returning routines are required to preserve all
+ @ registers except ip, lr, and cpsr.
+ /* APPLE LOCAL v7 support. Merge from mainline */
+6: do_push {r0, lr}
+ ARM_CALL cmpdf2
+ @ Set the Z flag correctly, and the C flag unconditionally.
+ cmp r0, #0
+ @ Clear the C flag if the return value was -1, indicating
+ @ that the first operand was smaller than the second.
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it mi
+ cmnmi r0, #0
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r0)
+
+ FUNC_END aeabi_cdcmple
+ FUNC_END aeabi_cdcmpeq
+ FUNC_END aeabi_cdrcmple
+
+ARM_FUNC_START aeabi_dcmpeq
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdcmple
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, e
+ moveq r0, #1 @ Equal to.
+ movne r0, #0 @ Less than, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmpeq
+
+ARM_FUNC_START aeabi_dcmplt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdcmple
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, e
+ movcc r0, #1 @ Less than.
+ movcs r0, #0 @ Equal to, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmplt
+
+ARM_FUNC_START aeabi_dcmple
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdcmple
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ls, e
+ movls r0, #1 @ Less than or equal to.
+ movhi r0, #0 @ Greater than or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmple
+
+ARM_FUNC_START aeabi_dcmpge
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdrcmple
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ls, e
+ movls r0, #1 @ Operand 2 is less than or equal to operand 1.
+ movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmpge
+
+ARM_FUNC_START aeabi_dcmpgt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cdrcmple
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, e
+ movcc r0, #1 @ Operand 2 is less than operand 1.
+ movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
+ @ or they are unordered.
+ RETLDM
+
+ FUNC_END aeabi_dcmpgt
+
+#endif /* L_cmpdf2 */
+
+#ifdef L_unorddf2
+
+ARM_FUNC_START unorddf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_dcmpun,unorddf2)
+
+ mov ip, xh, lsl #1
+ mvns ip, ip, asr #21
+ bne 1f
+ orrs ip, xl, xh, lsl #12
+ bne 3f @ x is NAN
+1: mov ip, yh, lsl #1
+ mvns ip, ip, asr #21
+ bne 2f
+ orrs ip, yl, yh, lsl #12
+ bne 3f @ y is NAN
+2: mov r0, #0 @ arguments are ordered.
+ RET
+
+3: mov r0, #1 @ arguments are unordered.
+ RET
+
+ FUNC_END aeabi_dcmpun
+ FUNC_END unorddf2
+
+#endif /* L_unorddf2 */
+
+#ifdef L_fixdfsi
+
+ARM_FUNC_START fixdfsi
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_d2iz,fixdfsi)
+
+ @ check exponent range.
+ mov r2, xh, lsl #1
+ adds r2, r2, #(1 << 21)
+ bcs 2f @ value is INF or NAN
+ bpl 1f @ value is too small
+ mov r3, #(0xfffffc00 + 31)
+ subs r2, r3, r2, asr #21
+ bls 3f @ value is too large
+
+ @ scale value
+ mov r3, xh, lsl #11
+ orr r3, r3, #0x80000000
+ orr r3, r3, xl, lsr #21
+ tst xh, #0x80000000 @ the sign bit
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsr, r0, r3, r2
+ do_it ne
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ rsbne r0, r0, #0
+ RET
+
+1: mov r0, #0
+ RET
+
+2: orrs xl, xl, xh, lsl #12
+ bne 4f @ x is NAN.
+3: ands r0, xh, #0x80000000 @ the sign bit
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ moveq r0, #0x7fffffff @ maximum signed positive si
+ RET
+
+4: mov r0, #0 @ How should we convert NAN?
+ RET
+
+ FUNC_END aeabi_d2iz
+ FUNC_END fixdfsi
+
+#endif /* L_fixdfsi */
+
+#ifdef L_fixunsdfsi
+
+ARM_FUNC_START fixunsdfsi
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_d2uiz,fixunsdfsi)
+
+ @ check exponent range.
+ movs r2, xh, lsl #1
+ bcs 1f @ value is negative
+ adds r2, r2, #(1 << 21)
+ bcs 2f @ value is INF or NAN
+ bpl 1f @ value is too small
+ mov r3, #(0xfffffc00 + 31)
+ subs r2, r3, r2, asr #21
+ bmi 3f @ value is too large
+
+ @ scale value
+ mov r3, xh, lsl #11
+ orr r3, r3, #0x80000000
+ orr r3, r3, xl, lsr #21
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ shift1 lsr, r0, r3, r2
+ RET
+
+1: mov r0, #0
+ RET
+
+2: orrs xl, xl, xh, lsl #12
+ bne 4f @ value is NAN.
+3: mov r0, #0xffffffff @ maximum unsigned si
+ RET
+
+4: mov r0, #0 @ How should we convert NAN?
+ RET
+
+ FUNC_END aeabi_d2uiz
+ FUNC_END fixunsdfsi
+
+#endif /* L_fixunsdfsi */
+
+#ifdef L_truncdfsf2
+
+ARM_FUNC_START truncdfsf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_d2f,truncdfsf2)
+
+ @ check exponent range.
+ mov r2, xh, lsl #1
+ subs r3, r2, #((1023 - 127) << 21)
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it cs, t
+ COND(sub,s,cs) ip, r3, #(1 << 21)
+ COND(rsb,s,cs) ip, ip, #(254 << 21)
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ bls 2f @ value is out of range
+
+1: @ shift and round mantissa
+ and ip, xh, #0x80000000
+ mov r2, xl, lsl #3
+ orr xl, ip, xl, lsr #29
+ cmp r2, #0x80000000
+ adc r0, xl, r3, lsl #2
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+2: @ either overflow or underflow
+ tst xh, #0x40000000
+ bne 3f @ overflow
+
+ @ check if denormalized value is possible
+ adds r2, r3, #(23 << 21)
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it lt, t
+ andlt r0, xh, #0x80000000 @ too small, return signed 0.
+ RETc(lt)
+
+ @ denormalize value so we can resume with the code above afterwards.
+ orr xh, xh, #0x00100000
+ mov r2, r2, lsr #21
+ rsb r2, r2, #24
+ rsb ip, r2, #32
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ lsls r3, xl, ip
+#else
+ movs r3, xl, lsl ip
+#endif
+ shift1 lsr, xl, xl, r2
+ do_it ne
+ orrne xl, xl, #1 @ fold r3 for rounding considerations.
+ mov r3, xh, lsl #11
+ mov r3, r3, lsr #11
+ shiftop orr, xl, xl, r3, lsl, ip, ip
+ shift1 lsr, r3, r3, r2
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+ mov r3, r3, lsl #1
+ b 1b
+
+3: @ chech for NAN
+ mvns r3, r2, asr #21
+ bne 5f @ simple overflow
+ orrs r3, xl, xh, lsl #12
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne, tt
+ movne r0, #0x7f000000
+ orrne r0, r0, #0x00c00000
+ RETc(ne) @ return NAN
+
+5: @ return INF with sign
+ and r0, xh, #0x80000000
+ orr r0, r0, #0x7f000000
+ orr r0, r0, #0x00800000
+ RET
+
+ FUNC_END aeabi_d2f
+ FUNC_END truncdfsf2
+
+#endif /* L_truncdfsf2 */
+
+/* APPLE LOCAL begin ARM 4702983 Thumb VFP math */
+#ifndef NOT_DARWIN
+#if __ARM_ARCH__ > 5
+#ifdef L_muldf3vfp
+
+ARM_FUNC_START muldf3vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fmuld d5, d6, d7
+ fmrrd r0, r1, d5
+ RET
+
+ FUNC_END muldf3vfp
+
+#endif
+
+#ifdef L_adddf3vfp
+
+ARM_FUNC_START adddf3vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ faddd d5, d6, d7
+ fmrrd r0, r1, d5
+ RET
+
+ FUNC_END adddf3vfp
+
+#endif
+
+#ifdef L_subdf3vfp
+
+ARM_FUNC_START subdf3vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fsubd d5, d6, d7
+ fmrrd r0, r1, d5
+ RET
+
+ FUNC_END subdf3vfp
+
+#endif
+
+#ifdef L_divdf3vfp
+
+ARM_FUNC_START divdf3vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fdivd d5, d6, d7
+ fmrrd r0, r1, d5
+ RET
+
+ FUNC_END divdf3vfp
+
+#endif
+
+#ifdef L_eqdf2vfp
+
+ARM_FUNC_START eqdf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it ne, e
+ movne r0, #0
+ moveq r0, #1
+ RET
+
+ FUNC_END eqdf2vfp
+
+#endif
+
+#ifdef L_nedf2vfp
+
+ARM_FUNC_START nedf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it eq, e
+ moveq r0, #0
+ movne r0, #1
+ RET
+
+ FUNC_END nedf2vfp
+
+#endif
+
+#ifdef L_ltdf2vfp
+
+ARM_FUNC_START ltdf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it pl, e
+ movpl r0, #0
+ movmi r0, #1
+ RET
+
+ FUNC_END ltdf2vfp
+
+#endif
+
+#ifdef L_gtdf2vfp
+
+ARM_FUNC_START gtdf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it le, e
+ movle r0, #0
+ movgt r0, #1
+ RET
+
+ FUNC_END gtdf2vfp
+
+#endif
+
+#ifdef L_ledf2vfp
+
+ARM_FUNC_START ledf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it hi, e
+ movhi r0, #0
+ movls r0, #1
+ RET
+
+ FUNC_END ledf2vfp
+
+#endif
+
+#ifdef L_gedf2vfp
+
+ARM_FUNC_START gedf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it lt, e
+ movlt r0, #0
+ movge r0, #1
+ RET
+
+ FUNC_END gedf2vfp
+
+#endif
+
+#ifdef L_unorddf2vfp
+
+ARM_FUNC_START unorddf2vfp
+
+ fmdrr d6, r0, r1
+ fmdrr d7, r2, r3
+ fcmpd d6, d7
+ fmstat
+ do_it vc, e
+ movvc r0, #0
+ movvs r0, #1
+ RET
+
+ FUNC_END unorddf2vfp
+
+#endif
+
+#ifdef L_fixdfsivfp
+
+ARM_FUNC_START fixdfsivfp
+
+ fmdrr d7, r0, r1
+ ftosizd s15, d7
+ fmrs r0, s15
+ RET
+
+ FUNC_END fixdfsivfp
+
+#endif
+
+#ifdef L_fixunsdfsivfp
+
+ARM_FUNC_START fixunsdfsivfp
+
+ fmdrr d7, r0, r1
+ ftouizd s15, d7
+ fmrs r0, s15
+ RET
+
+ FUNC_END fixunsdfsivfp
+
+#endif
+
+#ifdef L_extendsfdf2vfp
+
+ARM_FUNC_START extendsfdf2vfp
+
+ fmsr s15, r0
+ fcvtds d7, s15
+ fmrrd r0, r1, d7
+ RET
+
+ FUNC_END extendsfdf2vfp
+
+#endif
+
+#ifdef L_truncdfsf2vfp
+
+ARM_FUNC_START truncdfsf2vfp
+
+ fmdrr d7, r0, r1
+ fcvtsd s15, d7
+ fmrs r0, s15
+ RET
+
+ FUNC_END truncdfsf2vfp
+
+#endif
+
+#ifdef L_floatsidfvfp
+
+ARM_FUNC_START floatsidfvfp
+
+ fmsr s15, r0
+ fsitod d7, s15
+ fmrrd r0, r1, d7
+ RET
+
+ FUNC_END floatsidfvfp
+
+#endif
+
+#ifdef L_floatsidfvfp
+
+ARM_FUNC_START floatunssidfvfp
+
+ fmsr s15, r0
+ fuitod d7, s15
+ fmrrd r0, r1, d7
+ RET
+
+ FUNC_END floatunssidfvfp
+
+#endif
+
+#endif /* __ARM_ARCH__ > 5 */
+#endif /* NOT_DARWIN */
+/* APPLE LOCAL end ARM 4702983 Thumb VFP math */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/ieee754-sf.S b/gcc-4.2.1-5666.3/gcc/config/arm/ieee754-sf.S
new file mode 100644
index 000000000..fdea4cb65
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/ieee754-sf.S
@@ -0,0 +1,1398 @@
+/* ieee754-sf.S single-precision floating point support for ARM
+
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Nicolas Pitre (nico@cam.org)
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/*
+ * Notes:
+ *
+ * The goal of this code is to be as fast as possible. This is
+ * not meant to be easy to understand for the casual reader.
+ *
+ * Only the default rounding mode is intended for best performances.
+ * Exceptions aren't supported yet, but that can be added quite easily
+ * if necessary without impacting performances.
+ */
+
+#ifdef L_negsf2
+
+ARM_FUNC_START negsf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_fneg,negsf2)
+
+ eor r0, r0, #0x80000000 @ flip sign bit
+ RET
+
+ FUNC_END aeabi_fneg
+ FUNC_END negsf2
+
+#endif
+
+#ifdef L_addsubsf3
+
+ARM_FUNC_START aeabi_frsub
+
+ eor r0, r0, #0x80000000 @ flip sign bit of first arg
+ b 1f
+
+ARM_FUNC_START subsf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_fsub,subsf3)
+
+ eor r1, r1, #0x80000000 @ flip sign bit of second arg
+#if defined(__INTERWORKING_STUBS__)
+ b 1f @ Skip Thumb-code prologue
+#endif
+
+ARM_FUNC_START addsf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_fadd,addsf3)
+
+1: @ Look for zeroes, equal values, INF, or NAN.
+ movs r2, r0, lsl #1
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, ttt
+ COND(mov,s,ne) r3, r1, lsl #1
+ teqne r2, r3
+ COND(mvn,s,ne) ip, r2, asr #24
+ COND(mvn,s,ne) ip, r3, asr #24
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ beq LSYM(Lad_s)
+
+ @ Compute exponent difference. Make largest exponent in r2,
+ @ corresponding arg in r0, and positive exponent difference in r3.
+ mov r2, r2, lsr #24
+ rsbs r3, r2, r3, lsr #24
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it gt, ttt
+ addgt r2, r2, r3
+ eorgt r1, r0, r1
+ eorgt r0, r1, r0
+ eorgt r1, r0, r1
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it lt
+ rsblt r3, r3, #0
+
+ @ If exponent difference is too large, return largest argument
+ @ already in r0. We need up to 25 bit to handle proper rounding
+ @ of 0x1p25 - 1.1.
+ cmp r3, #25
+ /* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it hi
+ RETc(hi)
+ /* APPLE LOCAL end v7 support. Merge from mainline */
+
+ @ Convert mantissa to signed integer.
+ tst r0, #0x80000000
+ orr r0, r0, #0x00800000
+ bic r0, r0, #0xff000000
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne
+ rsbne r0, r0, #0
+ tst r1, #0x80000000
+ orr r1, r1, #0x00800000
+ bic r1, r1, #0xff000000
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne
+ rsbne r1, r1, #0
+
+ @ If exponent == difference, one or both args were denormalized.
+ @ Since this is not common case, rescale them off line.
+ teq r2, r3
+ beq LSYM(Lad_d)
+LSYM(Lad_x):
+
+ @ Compensate for the exponent overlapping the mantissa MSB added later
+ sub r2, r2, #1
+
+ @ Shift and add second arg to first arg in r0.
+ @ Keep leftover bits into r1.
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ shiftop adds, r0, r0, r1, asr, r3, ip
+ rsb r3, r3, #32
+ shift1 lsl, r1, r1, r3
+
+ @ Keep absolute value in r0-r1, sign in r3 (the n bit was set above)
+ and r3, r0, #0x80000000
+ bpl LSYM(Lad_p)
+#if defined(__thumb2__)
+ negs r1, r1
+ sbc r0, r0, r0, lsl #1
+#else
+ rsbs r1, r1, #0
+ rsc r0, r0, #0
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+
+ @ Determine how to normalize the result.
+LSYM(Lad_p):
+ cmp r0, #0x00800000
+ bcc LSYM(Lad_a)
+ cmp r0, #0x01000000
+ bcc LSYM(Lad_e)
+
+ @ Result needs to be shifted right.
+ movs r0, r0, lsr #1
+ mov r1, r1, rrx
+ add r2, r2, #1
+
+ @ Make sure we did not bust our exponent.
+ cmp r2, #254
+ bhs LSYM(Lad_o)
+
+ @ Our result is now properly aligned into r0, remaining bits in r1.
+ @ Pack final result together.
+ @ Round with MSB of r1. If halfway between two numbers, round towards
+ @ LSB of r0 = 0.
+LSYM(Lad_e):
+ cmp r1, #0x80000000
+ adc r0, r0, r2, lsl #23
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ biceq r0, r0, #1
+ orr r0, r0, r3
+ RET
+
+ @ Result must be shifted left and exponent adjusted.
+LSYM(Lad_a):
+ movs r1, r1, lsl #1
+ adc r0, r0, r0
+ tst r0, #0x00800000
+ sub r2, r2, #1
+ bne LSYM(Lad_e)
+
+ @ No rounding necessary since r1 will always be 0 at this point.
+LSYM(Lad_l):
+
+#if __ARM_ARCH__ < 5
+
+ movs ip, r0, lsr #12
+ moveq r0, r0, lsl #12
+ subeq r2, r2, #12
+ tst r0, #0x00ff0000
+ moveq r0, r0, lsl #8
+ subeq r2, r2, #8
+ tst r0, #0x00f00000
+ moveq r0, r0, lsl #4
+ subeq r2, r2, #4
+ tst r0, #0x00c00000
+ moveq r0, r0, lsl #2
+ subeq r2, r2, #2
+ cmp r0, #0x00800000
+ movcc r0, r0, lsl #1
+ sbcs r2, r2, #0
+
+#else
+
+ clz ip, r0
+ sub ip, ip, #8
+ subs r2, r2, ip
+/* APPLE LOCAL v7 support. Merge from mainline */
+ shift1 lsl, r0, r0, ip
+
+#endif
+
+ @ Final result with sign
+ @ If exponent negative, denormalize result.
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ge, et
+ addge r0, r0, r2, lsl #23
+ rsblt r2, r2, #0
+ orrge r0, r0, r3
+#if defined(__thumb2__)
+ do_it lt, t
+ lsrlt r0, r0, r2
+ orrlt r0, r3, r0
+#else
+ orrlt r0, r3, r0, lsr r2
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ RET
+
+ @ Fixup and adjust bit position for denormalized arguments.
+ @ Note that r2 must not remain equal to 0.
+LSYM(Lad_d):
+ teq r2, #0
+ eor r1, r1, #0x00800000
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, te
+ eoreq r0, r0, #0x00800000
+ addeq r2, r2, #1
+ subne r3, r3, #1
+ b LSYM(Lad_x)
+
+LSYM(Lad_s):
+ mov r3, r1, lsl #1
+
+ mvns ip, r2, asr #24
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(mvn,s,ne) ip, r3, asr #24
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ beq LSYM(Lad_i)
+
+ teq r2, r3
+ beq 1f
+
+ @ Result is x + 0.0 = x or 0.0 + y = y.
+ teq r2, #0
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ moveq r0, r1
+ RET
+
+1: teq r0, r1
+
+ @ Result is x - x = 0.
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne, t
+ movne r0, #0
+ RETc(ne)
+
+ @ Result is x + x = 2x.
+ tst r2, #0xff000000
+ bne 2f
+ movs r0, r0, lsl #1
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cs
+ orrcs r0, r0, #0x80000000
+ RET
+2: adds r2, r2, #(2 << 24)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, t
+ addcc r0, r0, #(1 << 23)
+ RETc(cc)
+ and r3, r0, #0x80000000
+
+ @ Overflow: return INF.
+LSYM(Lad_o):
+ orr r0, r3, #0x7f000000
+ orr r0, r0, #0x00800000
+ RET
+
+ @ At least one of r0/r1 is INF/NAN.
+ @ if r0 != INF/NAN: return r1 (which is INF/NAN)
+ @ if r1 != INF/NAN: return r0 (which is INF/NAN)
+ @ if r0 or r1 is NAN: return NAN
+ @ if opposite sign: return NAN
+ @ otherwise return r0 (which is INF or -INF)
+LSYM(Lad_i):
+ mvns r2, r2, asr #24
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, et
+ movne r0, r1
+ COND(mvn,s,eq) r3, r3, asr #24
+ movne r1, r0
+ movs r2, r0, lsl #9
+ do_it eq, te
+ COND(mov,s,eq) r3, r1, lsl #9
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ teqeq r0, r1
+ orrne r0, r0, #0x00400000 @ quiet NAN
+ RET
+
+ FUNC_END aeabi_frsub
+ FUNC_END aeabi_fadd
+ FUNC_END addsf3
+ FUNC_END aeabi_fsub
+ FUNC_END subsf3
+
+ARM_FUNC_START floatunsisf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_ui2f,floatunsisf)
+
+ mov r3, #0
+ b 1f
+
+ARM_FUNC_START floatsisf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_i2f,floatsisf)
+
+ ands r3, r0, #0x80000000
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it mi
+ rsbmi r0, r0, #0
+
+1: movs ip, r0
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ RETc(eq)
+
+ @ Add initial exponent to sign
+ orr r3, r3, #((127 + 23) << 23)
+
+ /* APPLE LOCAL begin ARM MACH assembler */
+#ifndef __ARMEB__
+ mov ah, r0
+#endif
+ /* APPLE LOCAL end ARM MACH assembler */
+ mov al, #0
+ b 2f
+
+ FUNC_END aeabi_i2f
+ FUNC_END floatsisf
+ FUNC_END aeabi_ui2f
+ FUNC_END floatunsisf
+
+ARM_FUNC_START floatundisf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_ul2f,floatundisf)
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq, t
+ mvfeqs f0, #0.0
+#else
+ do_it eq
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#endif
+ RETc(eq)
+
+ mov r3, #0
+ b 1f
+
+ARM_FUNC_START floatdisf
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_l2f,floatdisf)
+
+ orrs r2, r0, r1
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq, t
+ mvfeqs f0, #0.0
+#else
+ do_it eq
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#endif
+ RETc(eq)
+
+ ands r3, ah, #0x80000000 @ sign bit in r3
+ bpl 1f
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ negs al, al
+ sbc ah, ah, ah, lsl #1
+#else
+ rsbs al, al, #0
+ rsc ah, ah, #0
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+1:
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+ @ For hard FPA code we want to return via the tail below so that
+ @ we can return the result in f0 as well as in r0 for backwards
+ @ compatibility.
+ str lr, [sp, #-8]!
+ adr lr, LSYM(f0_ret)
+#endif
+
+ movs ip, ah
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, tt
+ moveq ip, al
+ moveq ah, al
+ moveq al, #0
+
+ @ Add initial exponent to sign
+ orr r3, r3, #((127 + 23 + 32) << 23)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ subeq r3, r3, #(32 << 23)
+2: sub r3, r3, #(1 << 23)
+
+#if __ARM_ARCH__ < 5
+
+ mov r2, #23
+ cmp ip, #(1 << 16)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it hs, t
+ movhs ip, ip, lsr #16
+ subhs r2, r2, #16
+ cmp ip, #(1 << 8)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it hs, t
+ movhs ip, ip, lsr #8
+ subhs r2, r2, #8
+ cmp ip, #(1 << 4)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it hs, t
+ movhs ip, ip, lsr #4
+ subhs r2, r2, #4
+ cmp ip, #(1 << 2)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it hs, e
+ subhs r2, r2, #2
+ sublo r2, r2, ip, lsr #1
+ subs r2, r2, ip, lsr #3
+
+#else
+
+ clz r2, ip
+ subs r2, r2, #8
+
+#endif
+
+ sub r3, r3, r2, lsl #23
+ blt 3f
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ shiftop add, r3, r3, ah, lsl, r2, ip
+ shift1 lsl, ip, al, r2
+ rsb r2, r2, #32
+ cmp ip, #0x80000000
+ shiftop adc, r0, r3, al, lsr, r2, r2
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+3: add r2, r2, #32
+ shift1 lsl, ip, ah, r2
+ rsb r2, r2, #32
+ orrs al, al, ip, lsl #1
+ shiftop adc, r0, r3, ah, lsr, r2, r2
+ do_it eq
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ biceq r0, r0, ip, lsr #31
+ RET
+
+#if !defined (__VFP_FP__) && !defined(__SOFTFP__)
+
+LSYM(f0_ret):
+ str r0, [sp, #-4]!
+ ldfs f0, [sp], #4
+ RETLDM
+
+#endif
+
+ FUNC_END floatdisf
+ FUNC_END aeabi_l2f
+ FUNC_END floatundisf
+ FUNC_END aeabi_ul2f
+
+#endif /* L_addsubsf3 */
+
+#ifdef L_muldivsf3
+
+ARM_FUNC_START mulsf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_fmul,mulsf3)
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ ands r2, ip, r0, lsr #23
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, tt
+ COND(and,s,ne) r3, ip, r1, lsr #23
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ teqne r2, ip
+ teqne r3, ip
+ beq LSYM(Lml_s)
+LSYM(Lml_x):
+
+ @ Add exponents together
+ add r2, r2, r3
+
+ @ Determine final sign.
+ eor ip, r0, r1
+
+ @ Convert mantissa to unsigned integer.
+ @ If power of two, branch to a separate path.
+ @ Make up for final alignment.
+ movs r0, r0, lsl #9
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(mov,s,ne) r1, r1, lsl #9
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ beq LSYM(Lml_1)
+ mov r3, #0x08000000
+ orr r0, r3, r0, lsr #5
+ orr r1, r3, r1, lsr #5
+
+#if __ARM_ARCH__ < 4
+
+ @ Put sign bit in r3, which will be restored into r0 later.
+ and r3, ip, #0x80000000
+
+ @ Well, no way to make it shorter without the umull instruction.
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_push {r3, r4, r5}
+ mov r4, r0, lsr #16
+ mov r5, r1, lsr #16
+ bic r0, r0, r4, lsl #16
+ bic r1, r1, r5, lsl #16
+ mul ip, r4, r5
+ mul r3, r0, r1
+ mul r0, r5, r0
+ mla r0, r4, r1, r0
+ adds r3, r3, r0, lsl #16
+ adc r1, ip, r0, lsr #16
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_pop {r0, r4, r5}
+
+#else
+
+ @ The actual multiplication.
+ umull r3, r1, r0, r1
+
+ @ Put final sign in r0.
+ and r0, ip, #0x80000000
+
+#endif
+
+ @ Adjust result upon the MSB position.
+ cmp r1, #(1 << 23)
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, tt
+ movcc r1, r1, lsl #1
+ orrcc r1, r1, r3, lsr #31
+ movcc r3, r3, lsl #1
+
+ @ Add sign to result.
+ orr r0, r0, r1
+
+ @ Apply exponent bias, check for under/overflow.
+ sbc r2, r2, #127
+ cmp r2, #(254 - 1)
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ cmp r3, #0x80000000
+ adc r0, r0, r2, lsl #23
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+ @ Multiplication by 0x1p*: let''s shortcut a lot of code.
+LSYM(Lml_1):
+ teq r0, #0
+ and ip, ip, #0x80000000
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it eq
+ moveq r1, r1, lsl #9
+ orr r0, ip, r0, lsr #9
+ orr r0, r0, r1, lsr #9
+ subs r2, r2, #127
+ do_it gt, tt
+ COND(rsb,s,gt) r3, r2, #255
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ orrgt r0, r0, r2, lsl #23
+ RETc(gt)
+
+ @ Under/overflow: fix things up for the code below.
+ orr r0, r0, #0x00800000
+ mov r3, #0
+ subs r2, r2, #1
+
+LSYM(Lml_u):
+ @ Overflow?
+ bgt LSYM(Lml_o)
+
+ @ Check if denormalized result is possible, otherwise return signed 0.
+ cmn r2, #(24 + 1)
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it le, t
+ bicle r0, r0, #0x7fffffff
+ RETc(le)
+
+ @ Shift value right, round, etc.
+ rsb r2, r2, #0
+ movs r1, r0, lsl #1
+ shift1 lsr, r1, r1, r2
+ rsb r2, r2, #32
+ shift1 lsl, ip, r0, r2
+ movs r0, r1, rrx
+ adc r0, r0, #0
+ orrs r3, r3, ip, lsl #1
+ do_it eq
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ biceq r0, r0, ip, lsr #31
+ RET
+
+ @ One or both arguments are denormalized.
+ @ Scale them leftwards and preserve sign bit.
+LSYM(Lml_d):
+ teq r2, #0
+ and ip, r0, #0x80000000
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+1: do_it eq, tt
+ moveq r0, r0, lsl #1
+ tsteq r0, #0x00800000
+ subeq r2, r2, #1
+ beq 1b
+ orr r0, r0, ip
+ teq r3, #0
+ and ip, r1, #0x80000000
+2: do_it eq, tt
+ moveq r1, r1, lsl #1
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ tsteq r1, #0x00800000
+ subeq r3, r3, #1
+ beq 2b
+ orr r1, r1, ip
+ b LSYM(Lml_x)
+
+LSYM(Lml_s):
+ @ Isolate the INF and NAN cases away
+ and r3, ip, r1, lsr #23
+ teq r2, ip
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ teqne r3, ip
+ beq 1f
+
+ @ Here, one or more arguments are either denormalized or zero.
+ bics ip, r0, #0x80000000
+ do_it ne
+ COND(bic,s,ne) ip, r1, #0x80000000
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ bne LSYM(Lml_d)
+
+ @ Result is 0, but determine sign anyway.
+LSYM(Lml_z):
+ eor r0, r0, r1
+ bic r0, r0, #0x7fffffff
+ RET
+
+1: @ One or both args are INF or NAN.
+ teq r0, #0x0
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne, ett
+ teqne r0, #0x80000000
+ moveq r0, r1
+ teqne r1, #0x0
+ teqne r1, #0x80000000
+ beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
+ teq r2, ip
+ bne 1f
+ movs r2, r0, lsl #9
+ bne LSYM(Lml_n) @ NAN * <anything> -> NAN
+1: teq r3, ip
+ bne LSYM(Lml_i)
+ movs r3, r1, lsl #9
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ne
+ movne r0, r1
+ bne LSYM(Lml_n) @ <anything> * NAN -> NAN
+
+ @ Result is INF, but we need to determine its sign.
+LSYM(Lml_i):
+ eor r0, r0, r1
+
+ @ Overflow: return INF (sign already in r0).
+LSYM(Lml_o):
+ and r0, r0, #0x80000000
+ orr r0, r0, #0x7f000000
+ orr r0, r0, #0x00800000
+ RET
+
+ @ Return a quiet NAN.
+LSYM(Lml_n):
+ orr r0, r0, #0x7f000000
+ orr r0, r0, #0x00c00000
+ RET
+
+ FUNC_END aeabi_fmul
+ FUNC_END mulsf3
+
+ARM_FUNC_START divsf3
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_fdiv,divsf3)
+
+ @ Mask out exponents, trap any zero/denormal/INF/NAN.
+ mov ip, #0xff
+ ands r2, ip, r0, lsr #23
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne, tt
+ COND(and,s,ne) r3, ip, r1, lsr #23
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ teqne r2, ip
+ teqne r3, ip
+ beq LSYM(Ldv_s)
+LSYM(Ldv_x):
+
+ @ Substract divisor exponent from dividend''s
+ sub r2, r2, r3
+
+ @ Preserve final sign into ip.
+ eor ip, r0, r1
+
+ @ Convert mantissa to unsigned integer.
+ @ Dividend -> r3, divisor -> r1.
+ movs r1, r1, lsl #9
+ mov r0, r0, lsl #9
+ beq LSYM(Ldv_1)
+ mov r3, #0x10000000
+ orr r1, r3, r1, lsr #4
+ orr r3, r3, r0, lsr #4
+
+ @ Initialize r0 (result) with final sign bit.
+ and r0, ip, #0x80000000
+
+ @ Ensure result will land to known bit position.
+ @ Apply exponent bias accordingly.
+ cmp r3, r1
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it cc
+ movcc r3, r3, lsl #1
+ adc r2, r2, #(127 - 2)
+
+ @ The actual division loop.
+ mov ip, #0x00800000
+1: cmp r3, r1
+ do_it cs, t
+ subcs r3, r3, r1
+ orrcs r0, r0, ip
+ cmp r3, r1, lsr #1
+ do_it cs, t
+ subcs r3, r3, r1, lsr #1
+ orrcs r0, r0, ip, lsr #1
+ cmp r3, r1, lsr #2
+ do_it cs, t
+ subcs r3, r3, r1, lsr #2
+ orrcs r0, r0, ip, lsr #2
+ cmp r3, r1, lsr #3
+ do_it cs, t
+ subcs r3, r3, r1, lsr #3
+ orrcs r0, r0, ip, lsr #3
+ movs r3, r3, lsl #4
+ do_it ne
+ COND(mov,s,ne) ip, ip, lsr #4
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ bne 1b
+
+ @ Check exponent for under/overflow.
+ cmp r2, #(254 - 1)
+ bhi LSYM(Lml_u)
+
+ @ Round the result, merge final exponent.
+ cmp r3, r1
+ adc r0, r0, r2, lsl #23
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq
+ biceq r0, r0, #1
+ RET
+
+ @ Division by 0x1p*: let''s shortcut a lot of code.
+LSYM(Ldv_1):
+ and ip, ip, #0x80000000
+ orr r0, ip, r0, lsr #9
+ adds r2, r2, #127
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it gt, tt
+ COND(rsb,s,gt) r3, r2, #255
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ orrgt r0, r0, r2, lsl #23
+ RETc(gt)
+
+ orr r0, r0, #0x00800000
+ mov r3, #0
+ subs r2, r2, #1
+ b LSYM(Lml_u)
+
+ @ One or both arguments are denormalized.
+ @ Scale them leftwards and preserve sign bit.
+LSYM(Ldv_d):
+ teq r2, #0
+ and ip, r0, #0x80000000
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+1: do_it eq, tt
+ moveq r0, r0, lsl #1
+ tsteq r0, #0x00800000
+ subeq r2, r2, #1
+ beq 1b
+ orr r0, r0, ip
+ teq r3, #0
+ and ip, r1, #0x80000000
+2: do_it eq, tt
+ moveq r1, r1, lsl #1
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ tsteq r1, #0x00800000
+ subeq r3, r3, #1
+ beq 2b
+ orr r1, r1, ip
+ b LSYM(Ldv_x)
+
+ @ One or both arguments are either INF, NAN, zero or denormalized.
+LSYM(Ldv_s):
+ and r3, ip, r1, lsr #23
+ teq r2, ip
+ bne 1f
+ movs r2, r0, lsl #9
+ bne LSYM(Lml_n) @ NAN / <anything> -> NAN
+ teq r3, ip
+ bne LSYM(Lml_i) @ INF / <anything> -> INF
+ mov r0, r1
+ b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
+1: teq r3, ip
+ bne 2f
+ movs r3, r1, lsl #9
+ beq LSYM(Lml_z) @ <anything> / INF -> 0
+ mov r0, r1
+ b LSYM(Lml_n) @ <anything> / NAN -> NAN
+2: @ If both are nonzero, we need to normalize and resume above.
+ bics ip, r0, #0x80000000
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(bic,s,ne) ip, r1, #0x80000000
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ bne LSYM(Ldv_d)
+ @ One or both arguments are zero.
+ bics r2, r0, #0x80000000
+ bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
+ bics r3, r1, #0x80000000
+ bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
+ b LSYM(Lml_n) @ 0 / 0 -> NAN
+
+ FUNC_END aeabi_fdiv
+ FUNC_END divsf3
+
+#endif /* L_muldivsf3 */
+
+#ifdef L_cmpsf2
+
+ @ The return value in r0 is
+ @
+ @ 0 if the operands are equal
+ @ 1 if the first operand is greater than the second, or
+ @ the operands are unordered and the operation is
+ @ CMP, LT, LE, NE, or EQ.
+ @ -1 if the first operand is less than the second, or
+ @ the operands are unordered and the operation is GT
+ @ or GE.
+ @
+ @ The Z flag will be set iff the operands are equal.
+ @
+ @ The following registers are clobbered by this function:
+ @ ip, r0, r1, r2, r3
+
+ARM_FUNC_START gtsf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(gesf2,gtsf2)
+ mov ip, #-1
+ b 1f
+
+ARM_FUNC_START ltsf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(lesf2,ltsf2)
+ mov ip, #1
+ b 1f
+
+ARM_FUNC_START cmpsf2
+/* APPLE LOCAL begin ARM MACH assembler */
+ARM_FUNC_ALIAS(nesf2,cmpsf2)
+ARM_FUNC_ALIAS(eqsf2,cmpsf2)
+/* APPLE LOCAL end ARM MACH assembler */
+ mov ip, #1 @ how should we specify unordered here?
+
+1: str ip, [sp, #-4]
+
+ @ Trap any INF/NAN first.
+ mov r2, r0, lsl #1
+ mov r3, r1, lsl #1
+ mvns ip, r2, asr #24
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ do_it ne
+ COND(mvn,s,ne) ip, r3, asr #24
+ beq 3f
+
+ @ Compare values.
+ @ Note that 0.0 is equal to -0.0.
+2: orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
+ do_it ne
+ teqne r0, r1 @ if not 0 compare sign
+ do_it pl
+ COND(sub,s,pl) r0, r2, r3 @ if same sign compare values, set r0
+
+ @ Result:
+ do_it hi
+ movhi r0, r1, asr #31
+ do_it lo
+ mvnlo r0, r1, asr #31
+ do_it ne
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ orrne r0, r0, #1
+ RET
+
+ @ Look for a NAN.
+3: mvns ip, r2, asr #24
+ bne 4f
+ movs ip, r0, lsl #9
+ bne 5f @ r0 is NAN
+4: mvns ip, r3, asr #24
+ bne 2b
+ movs ip, r1, lsl #9
+ beq 2b @ r1 is not NAN
+5: ldr r0, [sp, #-4] @ return unordered code.
+ RET
+
+ FUNC_END gesf2
+ FUNC_END gtsf2
+ FUNC_END lesf2
+ FUNC_END ltsf2
+ FUNC_END nesf2
+ FUNC_END eqsf2
+ FUNC_END cmpsf2
+
+ARM_FUNC_START aeabi_cfrcmple
+
+ mov ip, r0
+ mov r0, r1
+ mov r1, ip
+ b 6f
+
+ARM_FUNC_START aeabi_cfcmpeq
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_cfcmple,aeabi_cfcmpeq)
+
+ @ The status-returning routines are required to preserve all
+ @ registers except ip, lr, and cpsr.
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+6: do_push {r0, r1, r2, r3, lr}
+ ARM_CALL cmpsf2
+ @ Set the Z flag correctly, and the C flag unconditionally.
+ cmp r0, #0
+ @ Clear the C flag if the return value was -1, indicating
+ @ that the first operand was smaller than the second.
+ do_it mi
+ cmnmi r0, #0
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1(r0, r1, r2, r3)
+
+ FUNC_END aeabi_cfcmple
+ FUNC_END aeabi_cfcmpeq
+ FUNC_END aeabi_cfrcmple
+
+ARM_FUNC_START aeabi_fcmpeq
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfcmple
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it eq, e
+ moveq r0, #1 @ Equal to.
+ movne r0, #0 @ Less than, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmpeq
+
+ARM_FUNC_START aeabi_fcmplt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfcmple
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, e
+ movcc r0, #1 @ Less than.
+ movcs r0, #0 @ Equal to, greater than, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmplt
+
+ARM_FUNC_START aeabi_fcmple
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfcmple
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ls, e
+ movls r0, #1 @ Less than or equal to.
+ movhi r0, #0 @ Greater than or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmple
+
+ARM_FUNC_START aeabi_fcmpge
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfrcmple
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it ls, e
+ movls r0, #1 @ Operand 2 is less than or equal to operand 1.
+ movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmpge
+
+ARM_FUNC_START aeabi_fcmpgt
+
+ str lr, [sp, #-8]!
+ ARM_CALL aeabi_cfrcmple
+/* APPLE LOCAL v7 support. Merge from mainline */
+ do_it cc, e
+ movcc r0, #1 @ Operand 2 is less than operand 1.
+ movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
+ @ or they are unordered.
+ RETLDM
+
+ FUNC_END aeabi_fcmpgt
+
+#endif /* L_cmpsf2 */
+
+#ifdef L_unordsf2
+
+ARM_FUNC_START unordsf2
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_fcmpun,unordsf2)
+
+ mov r2, r0, lsl #1
+ mov r3, r1, lsl #1
+ mvns ip, r2, asr #24
+ bne 1f
+ movs ip, r0, lsl #9
+ bne 3f @ r0 is NAN
+1: mvns ip, r3, asr #24
+ bne 2f
+ movs ip, r1, lsl #9
+ bne 3f @ r1 is NAN
+2: mov r0, #0 @ arguments are ordered.
+ RET
+3: mov r0, #1 @ arguments are unordered.
+ RET
+
+ FUNC_END aeabi_fcmpun
+ FUNC_END unordsf2
+
+#endif /* L_unordsf2 */
+
+#ifdef L_fixsfsi
+
+ARM_FUNC_START fixsfsi
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_f2iz,fixsfsi)
+
+ @ check exponent range.
+ mov r2, r0, lsl #1
+ cmp r2, #(127 << 24)
+ bcc 1f @ value is too small
+ mov r3, #(127 + 31)
+ subs r2, r3, r2, lsr #24
+ bls 2f @ value is too large
+
+ @ scale value
+ mov r3, r0, lsl #8
+ orr r3, r3, #0x80000000
+ tst r0, #0x80000000 @ the sign bit
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ shift1 lsr, r0, r3, r2
+ do_it ne
+ rsbne r0, r0, #0
+ RET
+
+1: mov r0, #0
+ RET
+
+2: cmp r2, #(127 + 31 - 0xff)
+ bne 3f
+ movs r2, r0, lsl #9
+ bne 4f @ r0 is NAN.
+3: ands r0, r0, #0x80000000 @ the sign bit
+ do_it eq
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ moveq r0, #0x7fffffff @ the maximum signed positive si
+ RET
+
+4: mov r0, #0 @ What should we convert NAN to?
+ RET
+
+ FUNC_END aeabi_f2iz
+ FUNC_END fixsfsi
+
+#endif /* L_fixsfsi */
+
+#ifdef L_fixunssfsi
+
+ARM_FUNC_START fixunssfsi
+/* APPLE LOCAL ARM MACH assembler */
+ARM_FUNC_ALIAS(aeabi_f2uiz,fixunssfsi)
+
+ @ check exponent range.
+ movs r2, r0, lsl #1
+ bcs 1f @ value is negative
+ cmp r2, #(127 << 24)
+ bcc 1f @ value is too small
+ mov r3, #(127 + 31)
+ subs r2, r3, r2, lsr #24
+ bmi 2f @ value is too large
+
+ @ scale the value
+ mov r3, r0, lsl #8
+ orr r3, r3, #0x80000000
+/* APPLE LOCAL v7 support. Merge from mainline */
+ shift1 lsr, r0, r3, r2
+ RET
+
+1: mov r0, #0
+ RET
+
+2: cmp r2, #(127 + 31 - 0xff)
+ bne 3f
+ movs r2, r0, lsl #9
+ bne 4f @ r0 is NAN.
+3: mov r0, #0xffffffff @ maximum unsigned si
+ RET
+
+4: mov r0, #0 @ What should we convert NAN to?
+ RET
+
+ FUNC_END aeabi_f2uiz
+ FUNC_END fixunssfsi
+
+#endif /* L_fixunssfsi */
+
+/* APPLE LOCAL begin ARM 4702983 Thumb VFP math */
+#ifndef NOT_DARWIN
+#if __ARM_ARCH__ > 5
+#ifdef L_mulsf3vfp
+
+ARM_FUNC_START mulsf3vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fmuls s13, s14, s15
+ fmrs r0, s13
+ RET
+
+ FUNC_END mulsf3vfp
+
+#endif
+
+#ifdef L_addsf3vfp
+
+ARM_FUNC_START addsf3vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fadds s13, s14, s15
+ fmrs r0, s13
+ RET
+
+ FUNC_END addsf3vfp
+
+#endif
+
+#ifdef L_subsf3vfp
+
+ARM_FUNC_START subsf3vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fsubs s13, s14, s15
+ fmrs r0, s13
+ RET
+
+ FUNC_END subsf3vfp
+
+#endif
+
+#ifdef L_divsf3vfp
+
+ARM_FUNC_START divsf3vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fdivs s13, s14, s15
+ fmrs r0, s13
+ RET
+
+ FUNC_END divsf3vfp
+
+#endif
+
+#ifdef L_eqsf2vfp
+
+ARM_FUNC_START eqsf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it ne, e
+ movne r0, #0
+ moveq r0, #1
+ RET
+
+ FUNC_END eqsf2vfp
+
+#endif
+
+#ifdef L_nesf2vfp
+
+ARM_FUNC_START nesf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it eq, e
+ moveq r0, #0
+ movne r0, #1
+ RET
+
+ FUNC_END nesf2vfp
+
+#endif
+
+#ifdef L_ltsf2vfp
+
+ARM_FUNC_START ltsf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it pl, e
+ movpl r0, #0
+ movmi r0, #1
+ RET
+
+ FUNC_END ltsf2vfp
+
+#endif
+
+#ifdef L_gtsf2vfp
+
+ARM_FUNC_START gtsf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it le, e
+ movle r0, #0
+ movgt r0, #1
+ RET
+
+ FUNC_END gtsf2vfp
+
+#endif
+
+#ifdef L_lesf2vfp
+
+ARM_FUNC_START lesf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it hi, e
+ movhi r0, #0
+ movls r0, #1
+ RET
+
+ FUNC_END lesf2vfp
+
+#endif
+
+#ifdef L_gesf2vfp
+
+ARM_FUNC_START gesf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it lt, e
+ movlt r0, #0
+ movge r0, #1
+ RET
+
+ FUNC_END gesf2vfp
+
+#endif
+
+#ifdef L_unordsf2vfp
+
+ARM_FUNC_START unordsf2vfp
+
+ fmsr s14, r0
+ fmsr s15, r1
+ fcmps s14, s15
+ fmstat
+ do_it vc, e
+ movvc r0, #0
+ movvs r0, #1
+ RET
+
+ FUNC_END unordsf2vfp
+
+#endif
+
+#ifdef L_fixsfsivfp
+
+ARM_FUNC_START fixsfsivfp
+
+ fmsr s15, r0
+ ftosizs s15, s15
+ fmrs r0, s15
+ RET
+
+ FUNC_END fixsfsivfp
+
+#endif
+
+#ifdef L_fixunssfsivfp
+
+ARM_FUNC_START fixunssfsivfp
+
+ fmsr s15, r0
+ ftouizs s15, s15
+ fmrs r0, s15
+ RET
+
+ FUNC_END fixunssfsivfp
+
+#endif
+
+#ifdef L_floatsisfvfp
+
+ARM_FUNC_START floatsisfvfp
+
+ fmsr s15, r0
+ fsitos s15, s15
+ fmrs r0, s15
+ RET
+
+ FUNC_END floatsisfvfp
+
+#endif
+
+#ifdef L_floatsisfvfp
+
+ARM_FUNC_START floatunssisfvfp
+
+ fmsr s15, r0
+ fuitos s15, s15
+ fmrs r0, s15
+ RET
+
+ FUNC_END floatunssisfvfp
+
+#endif
+
+#endif /* __ARM_ARCH__ > 5 */
+#endif /* NOT_DARWIN */
+/* APPLE LOCAL end ARM 4702983 Thumb VFP math */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/iwmmxt.md b/gcc-4.2.1-5666.3/gcc/config/arm/iwmmxt.md
new file mode 100644
index 000000000..bc4fda33b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/iwmmxt.md
@@ -0,0 +1,1411 @@
+;; APPLE LOCAL v7 support. Merge from mainline
+;; ??? This file needs auditing for thumb2
+;; Patterns for the Intel Wireless MMX technology architecture.
+;; Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 2, or (at your option) any later
+;; version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+;; Integer element sizes implemented by IWMMXT.
+(define_mode_macro VMMX [V2SI V4HI V8QI])
+
+;; Integer element sizes for shifts.
+(define_mode_macro VSHFT [V4HI V2SI DI])
+
+;; Determine element size suffix from vector mode.
+(define_mode_attr MMX_char [(V8QI "b") (V4HI "h") (V2SI "w") (DI "d")])
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+(define_insn "iwmmxt_iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=y,?&r,?&r")
+ (ior:DI (match_operand:DI 1 "register_operand" "%y,0,r")
+ (match_operand:DI 2 "register_operand" "y,r,r")))]
+ "TARGET_REALLY_IWMMXT"
+ "@
+ wor%?\\t%0, %1, %2
+ #
+ #"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "iwmmxt_xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=y,?&r,?&r")
+ (xor:DI (match_operand:DI 1 "register_operand" "%y,0,r")
+ (match_operand:DI 2 "register_operand" "y,r,r")))]
+ "TARGET_REALLY_IWMMXT"
+ "@
+ wxor%?\\t%0, %1, %2
+ #
+ #"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "iwmmxt_anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=y,?&r,?&r")
+ (and:DI (match_operand:DI 1 "register_operand" "%y,0,r")
+ (match_operand:DI 2 "register_operand" "y,r,r")))]
+ "TARGET_REALLY_IWMMXT"
+ "@
+ wand%?\\t%0, %1, %2
+ #
+ #"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "iwmmxt_nanddi3"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (and:DI (match_operand:DI 1 "register_operand" "y")
+ (not:DI (match_operand:DI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wandn%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "*iwmmxt_arm_movdi"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, m,y,y,yr,y,yrUy")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r,y,yr,y,yrUy,y"))]
+ "TARGET_REALLY_IWMMXT
+ && ( register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ return output_move_double (operands);
+ case 0:
+ return \"#\";
+ case 3:
+ return \"wmov%?\\t%0,%1\";
+ case 4:
+ return \"tmcrr%?\\t%0,%Q1,%R1\";
+ case 5:
+ return \"tmrrc%?\\t%Q0,%R0,%1\";
+ case 6:
+ return \"wldrd%?\\t%0,%1\";
+ case 7:
+ return \"wstrd%?\\t%1,%0\";
+ }
+}"
+ [(set_attr "length" "8,8,8,4,4,4,4,4")
+ (set_attr "type" "*,load1,store2,*,*,*,*,*")
+ (set_attr "pool_range" "*,1020,*,*,*,*,*,*")
+ (set_attr "neg_pool_range" "*,1012,*,*,*,*,*,*")]
+)
+
+(define_insn "*iwmmxt_movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m,z,r,?z,Uy,z")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r,r,z,Uy,z,z"))]
+ "TARGET_REALLY_IWMMXT
+ && ( register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mov\\t%0, %1\";
+ case 1: return \"mvn\\t%0, #%B1\";
+ case 2: return \"ldr\\t%0, %1\";
+ case 3: return \"str\\t%1, %0\";
+ case 4: return \"tmcr\\t%0, %1\";
+ case 5: return \"tmrc\\t%0, %1\";
+ case 6: return arm_output_load_gr (operands);
+ case 7: return \"wstrw\\t%1, %0\";
+ default:return \"wstrw\\t%1, [sp, #-4]!\;wldrw\\t%0, [sp], #4\\t@move CG reg\";
+ }"
+ [(set_attr "type" "*,*,load1,store1,*,*,load1,store1,*")
+ (set_attr "length" "*,*,*, *,*,*, 16, *,8")
+ (set_attr "pool_range" "*,*,4096, *,*,*,1024, *,*")
+ (set_attr "neg_pool_range" "*,*,4084, *,*,*, *, 1012,*")
+ ;; Note - the "predicable" attribute is not allowed to have alternatives.
+ ;; Since the wSTRw wCx instruction is not predicable, we cannot support
+ ;; predicating any of the alternatives in this template. Instead,
+ ;; we do the predication ourselves, in cond_iwmmxt_movsi_insn.
+ (set_attr "predicable" "no")
+ ;; Also - we have to pretend that these insns clobber the condition code
+ ;; bits as otherwise arm_final_prescan_insn() will try to conditionalize
+ ;; them.
+ (set_attr "conds" "clob")]
+)
+
+;; Because iwmmxt_movsi_insn is not predicable, we provide the
+;; cond_exec version explicitly, with appropriate constraints.
+
+(define_insn "*cond_iwmmxt_movsi_insn"
+ [(cond_exec
+ (match_operator 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "")
+ (const_int 0)])
+ (set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m,z,r")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r,r,z")))]
+ "TARGET_REALLY_IWMMXT
+ && ( register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mov%?\\t%0, %1\";
+ case 1: return \"mvn%?\\t%0, #%B1\";
+ case 2: return \"ldr%?\\t%0, %1\";
+ case 3: return \"str%?\\t%1, %0\";
+ case 4: return \"tmcr%?\\t%0, %1\";
+ default: return \"tmrc%?\\t%0, %1\";
+ }"
+ [(set_attr "type" "*,*,load1,store1,*,*")
+ (set_attr "pool_range" "*,*,4096, *,*,*")
+ (set_attr "neg_pool_range" "*,*,4084, *,*,*")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_insn "movv8qi_internal"
+ [(set (match_operand:V8QI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r")
+ (match_operand:V8QI 1 "general_operand" "y,y,mi,y,r,r,mi"))]
+ "TARGET_REALLY_IWMMXT"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"wmov%?\\t%0, %1\";
+ case 1: return \"wstrd%?\\t%1, %0\";
+ case 2: return \"wldrd%?\\t%0, %1\";
+ case 3: return \"tmrrc%?\\t%Q0, %R0, %1\";
+ case 4: return \"tmcrr%?\\t%0, %Q1, %R1\";
+ case 5: return \"#\";
+ default: return output_move_double (operands);
+ }"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4, 4, 4,4,4,8, 8")
+ (set_attr "type" "*,store1,load1,*,*,*,load1")
+ (set_attr "pool_range" "*, *, 256,*,*,*, 256")
+ (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244")])
+
+(define_insn "movv4hi_internal"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r")
+ (match_operand:V4HI 1 "general_operand" "y,y,mi,y,r,r,mi"))]
+ "TARGET_REALLY_IWMMXT"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"wmov%?\\t%0, %1\";
+ case 1: return \"wstrd%?\\t%1, %0\";
+ case 2: return \"wldrd%?\\t%0, %1\";
+ case 3: return \"tmrrc%?\\t%Q0, %R0, %1\";
+ case 4: return \"tmcrr%?\\t%0, %Q1, %R1\";
+ case 5: return \"#\";
+ default: return output_move_double (operands);
+ }"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4, 4, 4,4,4,8, 8")
+ (set_attr "type" "*,store1,load1,*,*,*,load1")
+ (set_attr "pool_range" "*, *, 256,*,*,*, 256")
+ (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244")])
+
+(define_insn "movv2si_internal"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r")
+ (match_operand:V2SI 1 "general_operand" "y,y,mi,y,r,r,mi"))]
+ "TARGET_REALLY_IWMMXT"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"wmov%?\\t%0, %1\";
+ case 1: return \"wstrd%?\\t%1, %0\";
+ case 2: return \"wldrd%?\\t%0, %1\";
+ case 3: return \"tmrrc%?\\t%Q0, %R0, %1\";
+ case 4: return \"tmcrr%?\\t%0, %Q1, %R1\";
+ case 5: return \"#\";
+ default: return output_move_double (operands);
+ }"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4, 4, 4,4,4,8, 24")
+ (set_attr "type" "*,store1,load1,*,*,*,load1")
+ (set_attr "pool_range" "*, *, 256,*,*,*, 256")
+ (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244")])
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+;; This pattern should not be needed. It is to match a
+;; wierd case generated by GCC when no optimizations are
+;; enabled. (Try compiling gcc/testsuite/gcc.c-torture/
+;; compile/simd-5.c at -O0). The mode for operands[1] is
+;; deliberately omitted.
+(define_insn "movv2si_internal_2"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "=?r")
+ (match_operand 1 "immediate_operand" "mi"))]
+ "TARGET_REALLY_IWMMXT"
+ "* return output_move_double (operands);"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "8")
+ (set_attr "type" "load1")
+ (set_attr "pool_range" "256")
+ (set_attr "neg_pool_range" "244")])
+
+;; Vector add/subtract
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_insn "*add<mode>3_iwmmxt"
+ [(set (match_operand:VMMX 0 "register_operand" "=y")
+ (plus:VMMX (match_operand:VMMX 1 "register_operand" "y")
+ (match_operand:VMMX 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wadd<MMX_char>%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+(define_insn "ssaddv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (ss_plus:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "waddbss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ssaddv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ss_plus:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "waddhss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ssaddv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (ss_plus:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "waddwss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "usaddv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (us_plus:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "waddbus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "usaddv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (us_plus:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "waddhus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "usaddv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (us_plus:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "waddwus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_insn "*sub<mode>3_iwmmxt"
+ [(set (match_operand:VMMX 0 "register_operand" "=y")
+ (minus:VMMX (match_operand:VMMX 1 "register_operand" "y")
+ (match_operand:VMMX 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsub<MMX_char>%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+(define_insn "sssubv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (ss_minus:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsubbss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "sssubv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ss_minus:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsubhss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "sssubv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (ss_minus:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsubwss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ussubv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (us_minus:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsubbus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ussubv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (us_minus:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsubhus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ussubv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (us_minus:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsubwus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+(define_insn "*mulv4hi3_iwmmxt"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (mult:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wmulul%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "smulv4hi3_highpart"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (mult:V4SI (sign_extend:V4SI (match_operand:V4HI 1 "register_operand" "y"))
+ (sign_extend:V4SI (match_operand:V4HI 2 "register_operand" "y")))
+ (const_int 16))))]
+ "TARGET_REALLY_IWMMXT"
+ "wmulsm%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "umulv4hi3_highpart"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (mult:V4SI (zero_extend:V4SI (match_operand:V4HI 1 "register_operand" "y"))
+ (zero_extend:V4SI (match_operand:V4HI 2 "register_operand" "y")))
+ (const_int 16))))]
+ "TARGET_REALLY_IWMMXT"
+ "wmulum%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wmacs"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V4HI 2 "register_operand" "y")
+ (match_operand:V4HI 3 "register_operand" "y")] UNSPEC_WMACS))]
+ "TARGET_REALLY_IWMMXT"
+ "wmacs%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wmacsz"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")] UNSPEC_WMACSZ))]
+ "TARGET_REALLY_IWMMXT"
+ "wmacsz%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wmacu"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V4HI 2 "register_operand" "y")
+ (match_operand:V4HI 3 "register_operand" "y")] UNSPEC_WMACU))]
+ "TARGET_REALLY_IWMMXT"
+ "wmacu%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wmacuz"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")] UNSPEC_WMACUZ))]
+ "TARGET_REALLY_IWMMXT"
+ "wmacuz%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; Same as xordi3, but don't show input operands so that we don't think
+;; they are live.
+(define_insn "iwmmxt_clrdi"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(const_int 0)] UNSPEC_CLRDI))]
+ "TARGET_REALLY_IWMMXT"
+ "wxor%?\\t%0, %0, %0"
+ [(set_attr "predicable" "yes")])
+
+;; Seems like cse likes to generate these, so we have to support them.
+
+(define_insn "*iwmmxt_clrv8qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (const_vector:V8QI [(const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)]))]
+ "TARGET_REALLY_IWMMXT"
+ "wxor%?\\t%0, %0, %0"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "*iwmmxt_clrv4hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (const_vector:V4HI [(const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)]))]
+ "TARGET_REALLY_IWMMXT"
+ "wxor%?\\t%0, %0, %0"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "*iwmmxt_clrv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (const_vector:V2SI [(const_int 0) (const_int 0)]))]
+ "TARGET_REALLY_IWMMXT"
+ "wxor%?\\t%0, %0, %0"
+ [(set_attr "predicable" "yes")])
+
+;; Unsigned averages/sum of absolute differences
+
+(define_insn "iwmmxt_uavgrndv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (ashiftrt:V8QI
+ (plus:V8QI (plus:V8QI
+ (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y"))
+ (const_vector:V8QI [(const_int 1)
+ (const_int 1)
+ (const_int 1)
+ (const_int 1)
+ (const_int 1)
+ (const_int 1)
+ (const_int 1)
+ (const_int 1)]))
+ (const_int 1)))]
+ "TARGET_REALLY_IWMMXT"
+ "wavg2br%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_uavgrndv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ashiftrt:V4HI
+ (plus:V4HI (plus:V4HI
+ (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y"))
+ (const_vector:V4HI [(const_int 1)
+ (const_int 1)
+ (const_int 1)
+ (const_int 1)]))
+ (const_int 1)))]
+ "TARGET_REALLY_IWMMXT"
+ "wavg2hr%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+
+(define_insn "iwmmxt_uavgv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (ashiftrt:V8QI (plus:V8QI
+ (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y"))
+ (const_int 1)))]
+ "TARGET_REALLY_IWMMXT"
+ "wavg2b%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_uavgv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ashiftrt:V4HI (plus:V4HI
+ (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y"))
+ (const_int 1)))]
+ "TARGET_REALLY_IWMMXT"
+ "wavg2h%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_psadbw"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (abs:V8QI (minus:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "psadbw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+
+;; Insert/extract/shuffle
+
+(define_insn "iwmmxt_tinsrb"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_merge:V8QI (match_operand:V8QI 1 "register_operand" "0")
+ (vec_duplicate:V8QI
+ (truncate:QI (match_operand:SI 2 "nonimmediate_operand" "r")))
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ "TARGET_REALLY_IWMMXT"
+ "tinsrb%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tinsrh"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_merge:V4HI (match_operand:V4HI 1 "register_operand" "0")
+ (vec_duplicate:V4HI
+ (truncate:HI (match_operand:SI 2 "nonimmediate_operand" "r")))
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ "TARGET_REALLY_IWMMXT"
+ "tinsrh%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tinsrw"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_merge:V2SI (match_operand:V2SI 1 "register_operand" "0")
+ (vec_duplicate:V2SI
+ (match_operand:SI 2 "nonimmediate_operand" "r"))
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ "TARGET_REALLY_IWMMXT"
+ "tinsrw%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_textrmub"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (vec_select:QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel
+ [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_REALLY_IWMMXT"
+ "textrmub%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_textrmsb"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (vec_select:QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel
+ [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_REALLY_IWMMXT"
+ "textrmsb%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_textrmuh"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (vec_select:HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel
+ [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_REALLY_IWMMXT"
+ "textrmuh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_textrmsh"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (vec_select:HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel
+ [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_REALLY_IWMMXT"
+ "textrmsh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; There are signed/unsigned variants of this instruction, but they are
+;; pointless.
+(define_insn "iwmmxt_textrmw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (vec_select:SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+ "TARGET_REALLY_IWMMXT"
+ "textrmsw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wshufh"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:SI 2 "immediate_operand" "i")] UNSPEC_WSHUFH))]
+ "TARGET_REALLY_IWMMXT"
+ "wshufh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; Mask-generating comparisons
+;;
+;; Note - you cannot use patterns like these here:
+;;
+;; (set:<vector> (match:<vector>) (<comparator>:<vector> (match:<vector>) (match:<vector>)))
+;;
+;; Because GCC will assume that the truth value (1 or 0) is installed
+;; into the entire destination vector, (with the '1' going into the least
+;; significant element of the vector). This is not how these instructions
+;; behave.
+;;
+;; Unfortunately the current patterns are illegal. They are SET insns
+;; without a SET in them. They work in most cases for ordinary code
+;; generation, but there are circumstances where they can cause gcc to fail.
+;; XXX - FIXME.
+
+(define_insn "eqv8qi3"
+ [(unspec_volatile [(match_operand:V8QI 0 "register_operand" "=y")
+ (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_EQ)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpeqb%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "eqv4hi3"
+ [(unspec_volatile [(match_operand:V4HI 0 "register_operand" "=y")
+ (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_EQ)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpeqh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "eqv2si3"
+ [(unspec_volatile:V2SI [(match_operand:V2SI 0 "register_operand" "=y")
+ (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_EQ)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpeqw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "gtuv8qi3"
+ [(unspec_volatile [(match_operand:V8QI 0 "register_operand" "=y")
+ (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_GTU)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpgtub%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "gtuv4hi3"
+ [(unspec_volatile [(match_operand:V4HI 0 "register_operand" "=y")
+ (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_GTU)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpgtuh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "gtuv2si3"
+ [(unspec_volatile [(match_operand:V2SI 0 "register_operand" "=y")
+ (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_GTU)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpgtuw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "gtv8qi3"
+ [(unspec_volatile [(match_operand:V8QI 0 "register_operand" "=y")
+ (match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_GT)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpgtsb%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "gtv4hi3"
+ [(unspec_volatile [(match_operand:V4HI 0 "register_operand" "=y")
+ (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_GT)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpgtsh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "gtv2si3"
+ [(unspec_volatile [(match_operand:V2SI 0 "register_operand" "=y")
+ (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:V2SI 2 "register_operand" "y")]
+ VUNSPEC_WCMP_GT)]
+ "TARGET_REALLY_IWMMXT"
+ "wcmpgtsw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; Max/min insns
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_insn "*smax<mode>3_iwmmxt"
+ [(set (match_operand:VMMX 0 "register_operand" "=y")
+ (smax:VMMX (match_operand:VMMX 1 "register_operand" "y")
+ (match_operand:VMMX 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wmaxs<MMX_char>%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "*umax<mode>3_iwmmxt"
+ [(set (match_operand:VMMX 0 "register_operand" "=y")
+ (umax:VMMX (match_operand:VMMX 1 "register_operand" "y")
+ (match_operand:VMMX 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wmaxu<MMX_char>%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "*smin<mode>3_iwmmxt"
+ [(set (match_operand:VMMX 0 "register_operand" "=y")
+ (smin:VMMX (match_operand:VMMX 1 "register_operand" "y")
+ (match_operand:VMMX 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wmins<MMX_char>%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "*umin<mode>3_iwmmxt"
+ [(set (match_operand:VMMX 0 "register_operand" "=y")
+ (umin:VMMX (match_operand:VMMX 1 "register_operand" "y")
+ (match_operand:VMMX 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wminu<MMX_char>%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; Pack/unpack insns.
+
+(define_insn "iwmmxt_wpackhss"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_concat:V8QI
+ (ss_truncate:V4QI (match_operand:V4HI 1 "register_operand" "y"))
+ (ss_truncate:V4QI (match_operand:V4HI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wpackhss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wpackwss"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (ss_truncate:V2HI (match_operand:V2SI 1 "register_operand" "y"))
+ (ss_truncate:V2HI (match_operand:V2SI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wpackwss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wpackdss"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_concat:V2SI
+ (ss_truncate:SI (match_operand:DI 1 "register_operand" "y"))
+ (ss_truncate:SI (match_operand:DI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wpackdss%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wpackhus"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_concat:V8QI
+ (us_truncate:V4QI (match_operand:V4HI 1 "register_operand" "y"))
+ (us_truncate:V4QI (match_operand:V4HI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wpackhus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wpackwus"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (us_truncate:V2HI (match_operand:V2SI 1 "register_operand" "y"))
+ (us_truncate:V2HI (match_operand:V2SI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wpackwus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wpackdus"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_concat:V2SI
+ (us_truncate:SI (match_operand:DI 1 "register_operand" "y"))
+ (us_truncate:SI (match_operand:DI 2 "register_operand" "y"))))]
+ "TARGET_REALLY_IWMMXT"
+ "wpackdus%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+
+(define_insn "iwmmxt_wunpckihb"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_merge:V8QI
+ (vec_select:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
+ (const_int 1)
+ (const_int 6)
+ (const_int 2)
+ (const_int 7)
+ (const_int 3)]))
+ (vec_select:V8QI (match_operand:V8QI 2 "register_operand" "y")
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
+ (const_int 5)
+ (const_int 2)
+ (const_int 6)
+ (const_int 3)
+ (const_int 7)]))
+ (const_int 85)))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckihb%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckihh"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_merge:V4HI
+ (vec_select:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (vec_select:V4HI (match_operand:V4HI 2 "register_operand" "y")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (const_int 5)))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckihh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckihw"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_merge:V2SI
+ (vec_select:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(const_int 0)
+ (const_int 1)]))
+ (vec_select:V2SI (match_operand:V2SI 2 "register_operand" "y")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (const_int 1)))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckihw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckilb"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_merge:V8QI
+ (vec_select:V8QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
+ (const_int 5)
+ (const_int 2)
+ (const_int 6)
+ (const_int 3)
+ (const_int 7)]))
+ (vec_select:V8QI (match_operand:V8QI 2 "register_operand" "y")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
+ (const_int 1)
+ (const_int 6)
+ (const_int 2)
+ (const_int 7)
+ (const_int 3)]))
+ (const_int 85)))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckilb%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckilh"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_merge:V4HI
+ (vec_select:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (vec_select:V4HI (match_operand:V4HI 2 "register_operand" "y")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (const_int 5)))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckilh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckilw"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_merge:V2SI
+ (vec_select:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (vec_select:V2SI (match_operand:V2SI 2 "register_operand" "y")
+ (parallel [(const_int 0)
+ (const_int 1)]))
+ (const_int 1)))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckilw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckehub"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (zero_extend:V4HI
+ (vec_select:V4QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckehub%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckehuh"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (zero_extend:V2SI
+ (vec_select:V2HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckehuh%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckehuw"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (zero_extend:DI
+ (vec_select:SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(const_int 1)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckehuw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckehsb"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (sign_extend:V4HI
+ (vec_select:V4QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckehsb%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckehsh"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (sign_extend:V2SI
+ (vec_select:V2HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckehsh%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckehsw"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (sign_extend:DI
+ (vec_select:SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(const_int 1)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckehsw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckelub"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (zero_extend:V4HI
+ (vec_select:V4QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckelub%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckeluh"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (zero_extend:V2SI
+ (vec_select:V2HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckeluh%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckeluw"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (zero_extend:DI
+ (vec_select:SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(const_int 0)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckeluw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckelsb"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (sign_extend:V4HI
+ (vec_select:V4QI (match_operand:V8QI 1 "register_operand" "y")
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckelsb%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckelsh"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (sign_extend:V2SI
+ (vec_select:V2HI (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckelsh%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wunpckelsw"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (sign_extend:DI
+ (vec_select:SI (match_operand:V2SI 1 "register_operand" "y")
+ (parallel [(const_int 0)]))))]
+ "TARGET_REALLY_IWMMXT"
+ "wunpckelsw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+;; Shifts
+
+(define_insn "rorv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (rotatert:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:SI 2 "register_operand" "z")))]
+ "TARGET_REALLY_IWMMXT"
+ "wrorhg%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "rorv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (rotatert:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:SI 2 "register_operand" "z")))]
+ "TARGET_REALLY_IWMMXT"
+ "wrorwg%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "rordi3"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (rotatert:DI (match_operand:DI 1 "register_operand" "y")
+ (match_operand:SI 2 "register_operand" "z")))]
+ "TARGET_REALLY_IWMMXT"
+ "wrordg%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+(define_insn "ashr<mode>3_iwmmxt"
+ [(set (match_operand:VSHFT 0 "register_operand" "=y")
+ (ashiftrt:VSHFT (match_operand:VSHFT 1 "register_operand" "y")
+ (match_operand:SI 2 "register_operand" "z")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsra<MMX_char>g%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "lshr<mode>3_iwmmxt"
+ [(set (match_operand:VSHFT 0 "register_operand" "=y")
+ (lshiftrt:VSHFT (match_operand:VSHFT 1 "register_operand" "y")
+ (match_operand:SI 2 "register_operand" "z")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsrl<MMX_char>g%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashl<mode>3_iwmmxt"
+ [(set (match_operand:VSHFT 0 "register_operand" "=y")
+ (ashift:VSHFT (match_operand:VSHFT 1 "register_operand" "y")
+ (match_operand:SI 2 "register_operand" "z")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsll<MMX_char>g%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+
+(define_insn "rorv4hi3_di"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (rotatert:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wrorh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "rorv2si3_di"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (rotatert:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wrorw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "rordi3_di"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (rotatert:DI (match_operand:DI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wrord%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashrv4hi3_di"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ashiftrt:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsrah%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashrv2si3_di"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (ashiftrt:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsraw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashrdi3_di"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsrad%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "lshrv4hi3_di"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (lshiftrt:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsrlh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "lshrv2si3_di"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (lshiftrt:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsrlw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "lshrdi3_di"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsrld%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashlv4hi3_di"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ashift:V4HI (match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsllh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashlv2si3_di"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (ashift:V2SI (match_operand:V2SI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wsllw%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "ashldi3_di"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (ashift:DI (match_operand:DI 1 "register_operand" "y")
+ (match_operand:DI 2 "register_operand" "y")))]
+ "TARGET_REALLY_IWMMXT"
+ "wslld%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wmadds"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")] UNSPEC_WMADDS))]
+ "TARGET_REALLY_IWMMXT"
+ "wmadds%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wmaddu"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")] UNSPEC_WMADDU))]
+ "TARGET_REALLY_IWMMXT"
+ "wmaddu%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmia"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "r"))
+ (sign_extend:DI
+ (match_operand:SI 3 "register_operand" "r")))))]
+ "TARGET_REALLY_IWMMXT"
+ "tmia%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmiaph"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (truncate:HI (match_operand:SI 2 "register_operand" "r")))
+ (sign_extend:DI
+ (truncate:HI (match_operand:SI 3 "register_operand" "r"))))
+ (mult:DI (sign_extend:DI
+ (truncate:HI (ashiftrt:SI (match_dup 2) (const_int 16))))
+ (sign_extend:DI
+ (truncate:HI (ashiftrt:SI (match_dup 3) (const_int 16))))))))]
+ "TARGET_REALLY_IWMMXT"
+ "tmiaph%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmiabb"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (truncate:HI (match_operand:SI 2 "register_operand" "r")))
+ (sign_extend:DI
+ (truncate:HI (match_operand:SI 3 "register_operand" "r"))))))]
+ "TARGET_REALLY_IWMMXT"
+ "tmiabb%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmiatb"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (truncate:HI (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (sign_extend:DI
+ (truncate:HI (match_operand:SI 3 "register_operand" "r"))))))]
+ "TARGET_REALLY_IWMMXT"
+ "tmiatb%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmiabt"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (truncate:HI (match_operand:SI 2 "register_operand" "r")))
+ (sign_extend:DI
+ (truncate:HI (ashiftrt:SI
+ (match_operand:SI 3 "register_operand" "r")
+ (const_int 16)))))))]
+ "TARGET_REALLY_IWMMXT"
+ "tmiabt%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmiatt"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (truncate:HI (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "r")
+ (const_int 16))))
+ (sign_extend:DI
+ (truncate:HI (ashiftrt:SI
+ (match_operand:SI 3 "register_operand" "r")
+ (const_int 16)))))))]
+ "TARGET_REALLY_IWMMXT"
+ "tmiatt%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tbcstqi"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_duplicate:V8QI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_REALLY_IWMMXT"
+ "tbcstb%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tbcsthi"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_duplicate:V4HI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_REALLY_IWMMXT"
+ "tbcsth%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tbcstsi"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_duplicate:V2SI (match_operand:SI 1 "register_operand" "r")))]
+ "TARGET_REALLY_IWMMXT"
+ "tbcstw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmovmskb"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V8QI 1 "register_operand" "y")] UNSPEC_TMOVMSK))]
+ "TARGET_REALLY_IWMMXT"
+ "tmovmskb%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmovmskh"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V4HI 1 "register_operand" "y")] UNSPEC_TMOVMSK))]
+ "TARGET_REALLY_IWMMXT"
+ "tmovmskh%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmovmskw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V2SI 1 "register_operand" "y")] UNSPEC_TMOVMSK))]
+ "TARGET_REALLY_IWMMXT"
+ "tmovmskw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_waccb"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:V8QI 1 "register_operand" "y")] UNSPEC_WACC))]
+ "TARGET_REALLY_IWMMXT"
+ "waccb%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wacch"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:V4HI 1 "register_operand" "y")] UNSPEC_WACC))]
+ "TARGET_REALLY_IWMMXT"
+ "wacch%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_waccw"
+ [(set (match_operand:DI 0 "register_operand" "=y")
+ (unspec:DI [(match_operand:V2SI 1 "register_operand" "y")] UNSPEC_WACC))]
+ "TARGET_REALLY_IWMMXT"
+ "waccw%?\\t%0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_walign"
+ [(set (match_operand:V8QI 0 "register_operand" "=y,y")
+ (subreg:V8QI (ashiftrt:TI
+ (subreg:TI (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "y,y")
+ (match_operand:V8QI 2 "register_operand" "y,y")) 0)
+ (mult:SI
+ (match_operand:SI 3 "nonmemory_operand" "i,z")
+ (const_int 8))) 0))]
+ "TARGET_REALLY_IWMMXT"
+ "@
+ waligni%?\\t%0, %1, %2, %3
+ walignr%U3%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmrc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(match_operand:SI 1 "immediate_operand" "i")]
+ VUNSPEC_TMRC))]
+ "TARGET_REALLY_IWMMXT"
+ "tmrc%?\\t%0, %w1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_tmcr"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")
+ (match_operand:SI 1 "register_operand" "r")]
+ VUNSPEC_TMCR)]
+ "TARGET_REALLY_IWMMXT"
+ "tmcr%?\\t%w0, %1"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wsadb"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")] UNSPEC_WSAD))]
+ "TARGET_REALLY_IWMMXT"
+ "wsadb%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wsadh"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")] UNSPEC_WSAD))]
+ "TARGET_REALLY_IWMMXT"
+ "wsadh%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wsadbz"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")] UNSPEC_WSADZ))]
+ "TARGET_REALLY_IWMMXT"
+ "wsadbz%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
+(define_insn "iwmmxt_wsadhz"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "y")
+ (match_operand:V4HI 2 "register_operand" "y")] UNSPEC_WSADZ))]
+ "TARGET_REALLY_IWMMXT"
+ "wsadhz%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")])
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/kaos-arm.h b/gcc-4.2.1-5666.3/gcc/config/arm/kaos-arm.h
new file mode 100644
index 000000000..b575489b1
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/kaos-arm.h
@@ -0,0 +1,24 @@
+/* Definitions of target machine for GNU compiler.
+ kaOS on arm architecture version.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/kaOS[ELF])", stderr);
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/kaos-strongarm.h b/gcc-4.2.1-5666.3/gcc/config/arm/kaos-strongarm.h
new file mode 100644
index 000000000..7be215199
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/kaos-strongarm.h
@@ -0,0 +1,24 @@
+/* Definitions of target machine for GNU compiler.
+ kaOS on strongarm architecture version.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (StrongARM/kaOS[ELF])", stderr);
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/lib1funcs.asm b/gcc-4.2.1-5666.3/gcc/config/arm/lib1funcs.asm
new file mode 100644
index 000000000..bb438b9ec
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/lib1funcs.asm
@@ -0,0 +1,1695 @@
+@ libgcc routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005, 2007
+ Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+/* ------------------------------------------------------------------------ */
+
+/* We need to know what prefix to add to function names. */
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#ifdef __ELF__
+#ifdef __thumb__
+#define __PLT__ /* Not supported in Thumb assembler (for now). */
+#else
+#define __PLT__ (PLT)
+#endif
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#define LSYM(x) .x
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#define LSYM(x) x
+#endif
+
+/* Function end macros. Variants for interworking. */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
+ || defined(__ARM_ARCH_4T__)
+/* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
+ long multiply instructions. That includes v3M. */
+# define __ARM_ARCH__ 4
+#endif
+
+#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+# define __ARM_ARCH__ 5
+#endif
+
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
+# define __ARM_ARCH__ 6
+#endif
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)
+# define __ARM_ARCH__ 7
+#endif
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#ifndef __ARM_ARCH__
+#error Unable to determine architecture.
+#endif
+
+/* How to return from a function call depends on the architecture variant. */
+
+#if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
+
+# define RET bx lr
+# define RETc(x) bx##x lr
+
+/* Special precautions for interworking on armv4t. */
+# if (__ARM_ARCH__ == 4)
+
+/* Always use bx, not ldr pc. */
+# if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
+# define __INTERWORKING__
+# endif /* __THUMB__ || __THUMB_INTERWORK__ */
+
+/* Include thumb stub before arm mode code. */
+# if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
+# define __INTERWORKING_STUBS__
+# endif /* __thumb__ && !__THUMB_INTERWORK__ */
+
+#endif /* __ARM_ARCH == 4 */
+
+#else
+
+# define RET mov pc, lr
+# define RETc(x) mov##x pc, lr
+
+#endif
+
+.macro cfi_pop advance, reg, cfa_offset
+#ifdef __ELF__
+ .pushsection .debug_frame
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .4byte \advance
+ .byte (0xc0 | \reg) /* DW_CFA_restore */
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .uleb128 \cfa_offset
+ .popsection
+#endif
+.endm
+.macro cfi_push advance, reg, offset, cfa_offset
+#ifdef __ELF__
+ .pushsection .debug_frame
+ .byte 0x4 /* DW_CFA_advance_loc4 */
+ .4byte \advance
+ .byte (0x80 | \reg) /* DW_CFA_offset */
+ .uleb128 (\offset / -4)
+ .byte 0xe /* DW_CFA_def_cfa_offset */
+ .uleb128 \cfa_offset
+ .popsection
+#endif
+.endm
+.macro cfi_start start_label, end_label
+#ifdef __ELF__
+ .pushsection .debug_frame
+LSYM(Lstart_frame):
+ .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
+LSYM(Lstart_cie):
+ .4byte 0xffffffff @ CIE Identifier Tag
+ .byte 0x1 @ CIE Version
+ .ascii "\0" @ CIE Augmentation
+ .uleb128 0x1 @ CIE Code Alignment Factor
+ .sleb128 -4 @ CIE Data Alignment Factor
+ .byte 0xe @ CIE RA Column
+ .byte 0xc @ DW_CFA_def_cfa
+ .uleb128 0xd
+ .uleb128 0x0
+
+ .align 2
+LSYM(Lend_cie):
+ .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
+LSYM(Lstart_fde):
+ .4byte LSYM(Lstart_frame) @ FDE CIE offset
+ .4byte \start_label @ FDE initial location
+ .4byte \end_label-\start_label @ FDE address range
+ .popsection
+#endif
+.endm
+.macro cfi_end end_label
+#ifdef __ELF__
+ .pushsection .debug_frame
+ .align 2
+LSYM(Lend_fde):
+ .popsection
+\end_label:
+#endif
+.endm
+
+/* APPLE LOCAL begin ARM MACH assembler macros */
+#if defined (__INTERWORKING__)
+#define RETLDM \
+ ldr lr, [sp], #8 ; \
+ bx lr
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if definded (__thumb2__)
+#define RETLDM1(...) \
+ pop {__VA_ARGS__, lr} ; \
+ bx lr
+#define RETLDM2(cond,...) \
+ pop##cond {__VA_ARGS__, lr} ; \
+ bx##cond lr
+#else
+#define RETLDM1(...) \
+ ldmia sp!, {__VA_ARGS__, lr} ; \
+ bx lr
+#define RETLDM2(cond,...) \
+ ldm##cond##ia sp!, {__VA_ARGS__, lr} ; \
+ bx##cond lr
+#endif
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#define RETLDM_unwind(addr) \
+ ldr lr, [sp], #8 ; \
+9: cfi_pop 9b - addr, 0xe, 0x0 ; \
+ bx lr
+#else
+#define RETLDM \
+ ldr pc, [sp], #8
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined (__thumb2__)
+#define RETLDM1(...) \
+ pop {__VA_ARGS__, pc}
+#define RETLDM2(cond,...) \
+ pop##cond {__VA_ARGS__, pc}
+#else
+#define RETLDM1(...) \
+ ldmia sp!, {__VA_ARGS__, pc}
+#define RETLDM2(cond,...) \
+ ldm##cond##ia sp!, {__VA_ARGS__, pc}
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#define RETLDM_unwind(addr) \
+ ldr pc, [sp], #8
+#endif
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+
+/* The Unified assembly syntax allows the same code to be assembled for both
+ ARM and Thumb-2. However this is only supported by recent gas, so define
+ a set of macros to allow ARM code on older assemblers. */
+#if defined(__thumb2__)
+.macro do_it cond, suffix=""
+#if defined (__MACH__)
+ it$1 $0
+#else
+ it\suffix \cond
+#endif
+.endm
+.macro shift1 op, arg0, arg1, arg2
+#if defined (__MACH__)
+ $0 $1, $2, $3
+#else
+ \op \arg0, \arg1, \arg2
+#endif
+.endm
+#define do_push push
+#define do_pop pop
+#define COND(op1, op2, cond) op1 ## op2 ## cond
+/* Perform an arithmetic operation with a variable shift operand. This
+ requires two instructions and a scratch register on Thumb-2. */
+.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
+#if defined (__MACH__)
+ $4 $6, $3, $5
+ $0 $1, $2, $6
+#else
+ \shiftop \tmp, \src2, \shiftreg
+ \name \dest, \src1, \tmp
+#endif
+.endm
+#else
+.macro do_it cond, suffix=""
+.endm
+.macro shift1 op, arg0, arg1, arg2
+#if defined (__MACH__)
+ mov $1, $2, $0 $3
+#else
+ mov \arg0, \arg1, \op \arg2
+#endif
+.endm
+#define do_push stmfd sp!,
+#define do_pop ldmfd sp!,
+#define COND(op1, op2, cond) op1 ## cond ## op2
+.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
+#if defined (__MACH__)
+ $0 $1, $2, $3, $4 $5
+#else
+ \name \dest, \src1, \src2, \shiftop \shiftreg
+#endif
+.endm
+#endif
+
+
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+.macro ARM_LDIV0 name
+ str lr, [sp, #-8]!
+#if !defined(__MACH__)
+98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
+#endif
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ About as wrong as it could be.
+ RETLDM_unwind (8b)
+.endm
+
+
+.macro THUMB_LDIV0 name
+ push { r1, lr }
+#if !defined(__MACH__)
+7: cfi_push 7b - __\name, 0xe, -0x4, 0x8
+#endif
+ bl SYM (__div0)
+ mov r0, #0 @ About as wrong as it could be.
+#if defined (__INTERWORKING__)
+ pop { r1, r2 }
+ bx r2
+#else
+ pop { r1, pc }
+#endif
+.endm
+
+.macro FUNC_END name
+#if defined(__MACH__)
+ SIZE (__$0)
+#else
+ SIZE (__\name)
+#endif
+.endm
+
+.macro DIV_FUNC_END name
+#if !defined(__MACH__)
+ cfi_start __\name, LSYM(Lend_div0)
+#endif
+LSYM(Ldiv0):
+#ifdef __thumb__
+ THUMB_LDIV0 \name
+#else
+ ARM_LDIV0 \name
+#endif
+#if defined(__MACH__)
+ FUNC_END $0
+#else
+ cfi_end LSYM(Lend_div0)
+ FUNC_END \name
+#endif
+.endm
+
+.macro THUMB_FUNC_START name
+#if defined(__MACH__)
+ .globl SYM ($0)
+ TYPE ($0)
+ .thumb_func
+SYM ($0):
+#else
+ .globl SYM (\name)
+ TYPE (\name)
+ .thumb_func
+SYM (\name):
+#endif
+.endm
+/* APPLE LOCAL end ARM MACH assembler */
+
+/* Function start macros. Variants for ARM and Thumb. */
+
+#ifdef __thumb__
+#define THUMB_FUNC .thumb_func
+#define THUMB_CODE .force_thumb
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+# if defined(__thumb2__)
+#define THUMB_SYNTAX .syntax divided
+# else
+#define THUMB_SYNTAX
+# endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL ARM function alignment */
+#define FUNC_ALIGN .align 1
+#else
+#define THUMB_FUNC
+#define THUMB_CODE
+/* APPLE LOCAL ARM function alignment */
+#define FUNC_ALIGN .align 2
+/* APPLE LOCAL v7 support. Merge from mainline */
+#define THUMB_SYNTAX
+#endif
+
+/* APPLE LOCAL begin ARM MACH assembler */
+.macro FUNC_START name
+#if defined(__MACH__)
+ .text
+ .globl SYM (__$0)
+ TYPE (__$0)
+ FUNC_ALIGN
+ THUMB_CODE
+ THUMB_FUNC
+SYM (__$0):
+#else
+ .text
+ .globl SYM (__\name)
+ TYPE (__\name)
+ .align 0
+ THUMB_CODE
+ THUMB_FUNC
+SYM (__\name):
+#endif
+.endm
+
+/* Special function that will always be coded in ARM assembly, even if
+ in Thumb-only compilation. */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+
+/* For Thumb-2 we build everything in thumb mode. */
+.macro ARM_FUNC_START name
+#if defined(__MACH__)
+ FUNC_START $0
+#else
+ FUNC_START \name
+#endif
+ .syntax unified
+.endm
+#define EQUIV .thumb_set
+.macro ARM_CALL name
+#if defined(__MACH__)
+ bl ___$0
+#else
+ bl ___\name
+#endif
+.endm
+
+#elif defined(__INTERWORKING_STUBS__)
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+.macro ARM_FUNC_START name
+
+#if defined(__MACH__)
+ FUNC_START $0
+#else
+ FUNC_START \name
+#endif
+ bx pc
+ nop
+ .arm
+/* A hook to tell gdb that we've switched to ARM mode. Also used to call
+ directly from other local arm routines. */
+#if defined(__MACH__)
+_L__$0:
+#else
+_L__\name:
+#endif
+.endm
+#define EQUIV .thumb_set
+/* Branch directly to a function declared with ARM_FUNC_START.
+ Must be called in arm mode. */
+.macro ARM_CALL name
+#if defined(__MACH__)
+ bl _L__$0
+#else
+ bl _L__\name
+#endif
+.endm
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+
+#else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+.macro ARM_FUNC_START name
+#if defined(__MACH__)
+ .text
+ .globl SYM (__$0)
+ TYPE (__$0)
+ /* APPLE LOCAL ARM function alignment */
+ .align 2
+ .arm
+SYM (__$0):
+#else
+ .text
+ .globl SYM (__\name)
+ TYPE (__\name)
+ .align 0
+ .arm
+SYM (__\name):
+#endif
+.endm
+#define EQUIV .set
+.macro ARM_CALL name
+#if defined(__MACH__)
+ bl SYM (__$0)
+#else
+ bl __\name
+#endif
+.endm
+#endif
+
+#if defined (__thumb__)
+#define FUNC_ALIAS(new,old) \
+ .globl SYM (__##new) ; \
+ .thumb_set SYM (__##new), SYM (__##old)
+#else
+#define FUNC_ALIAS(new,old) \
+ .globl SYM (__##new) ; \
+ .set SYM (__##new), SYM (__##old)
+#endif
+
+#if defined(__INTERWORKING_STUBS__)
+#define ARM_FUNC_ALIAS(new,old) \
+ .globl SYM (__##new) ; \
+ EQUIV SYM (_##new), SYM (__##old) ; \
+ .set SYM (_L__##new), SYM (_L__##old)
+#else
+#define ARM_FUNC_ALIAS(new,old) \
+ .globl SYM (__##new) ; \
+ EQUIV SYM (__##new), SYM (__##old)
+#endif
+/* APPLE LOCAL end ARM MACH assembler */
+
+#ifdef __thumb__
+/* Register aliases. */
+
+work .req r4 @ XXXX is this safe ?
+dividend .req r0
+divisor .req r1
+overdone .req r2
+result .req r2
+curbit .req r3
+#endif
+#if 0
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+#endif
+
+/* ------------------------------------------------------------------------ */
+/* Bodies of the division and modulo routines. */
+/* ------------------------------------------------------------------------ */
+/* APPLE LOCAL begin ARM MACH assembler */
+#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
+#define ARMV5_DIV_LOOP(dividend, divisor, result) \
+ .set shift, shift - 1 ; \
+ cmp dividend, divisor, lsl #shift ; \
+ adc result, result, result ; \
+ subcs dividend, dividend, divisor, lsl #shift
+#define ARM_DIV_BODY(dividend, divisor, result, curbit) \
+ clz curbit, dividend ; \
+ clz result, divisor ; \
+ sub curbit, result, curbit ; \
+ rsbs curbit, curbit, #31 ; \
+ addne curbit, curbit, curbit, lsl #1 ; \
+ mov result, #0 ; \
+ addne pc, pc, curbit, lsl #2 ; \
+ nop ; \
+ .set shift, 32 ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result) ; \
+ ARMV5_DIV_LOOP (dividend, divisor, result)
+#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+#if __ARM_ARCH__ >= 5
+#define ARM_DIV_BODY_P1(dividend, divisor, result, curbit) \
+ clz curbit, divisor ; \
+ clz result, dividend ; \
+ sub result, curbit, result ; \
+ mov curbit, #1 ; \
+ mov divisor, divisor, lsl result ; \
+ mov curbit, curbit, lsl result ; \
+ mov result, #0
+#else /* __ARM_ARCH__ < 5 */
+#define ARM_DIV_BODY_P1(dividend, divisor, result, curbit) \
+ /* Initially shift the divisor left 3 bits if possible, */; \
+ /* set curbit accordingly. This allows for curbit to be located */; \
+ /* at the left end of each 4 bit nibbles in the division loop */; \
+ /* to save one loop in most cases. */; \
+ tst divisor, #0xe0000000 ; \
+ moveq divisor, divisor, lsl #3 ; \
+ moveq curbit, #8 ; \
+ movne curbit, #1 ; \
+ ; \
+ /* Unless the divisor is very big, shift it up in multiples of */; \
+ /* four bits, since this is the amount of unwinding in the main */; \
+ /* division loop. Continue shifting until the divisor is*/; \
+ /* larger than the dividend. */; \
+1: cmp divisor, #0x10000000 ; \
+ cmplo divisor, dividend ; \
+ movlo divisor, divisor, lsl #4 ; \
+ movlo curbit, curbit, lsl #4 ; \
+ blo 1b ; \
+ ; \
+ /* For very big divisors, we must shift it a bit at a time, or */; \
+ /* we will be in danger of overflowing. */; \
+1: cmp divisor, #0x80000000 ; \
+ cmplo divisor, dividend ; \
+ movlo divisor, divisor, lsl #1 ; \
+ movlo curbit, curbit, lsl #1 ; \
+ blo 1b ; \
+ ; \
+ mov result, #0
+#endif /* __ARM_ARCH__ < 5 */
+
+#define ARM_DIV_BODY(dividend, divisor, result, curbit) \
+ ARM_DIV_BODY_P1(dividend, divisor, result, curbit) ; \
+ ; \
+ /* Division loop */; \
+1: cmp dividend, divisor ; \
+ subhs dividend, dividend, divisor ; \
+ orrhs result, result, curbit ; \
+ cmp dividend, divisor, lsr #1 ; \
+ subhs dividend, dividend, divisor, lsr #1 ; \
+ orrhs result, result, curbit, lsr #1 ; \
+ cmp dividend, divisor, lsr #2 ; \
+ subhs dividend, dividend, divisor, lsr #2 ; \
+ orrhs result, result, curbit, lsr #2 ; \
+ cmp dividend, divisor, lsr #3 ; \
+ subhs dividend, dividend, divisor, lsr #3 ; \
+ orrhs result, result, curbit, lsr #3 ; \
+ cmp dividend, #0 /* Early termination? */; \
+ movnes curbit, curbit, lsr #4 /* No, any more bits to do?*/; \
+ movne divisor, divisor, lsr #4 ; \
+ bne 1b
+#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+
+/* ------------------------------------------------------------------------ */
+#if __ARM_ARCH__ >= 5
+#define ARM_DIV2_ORDER(divisor, order) \
+ clz order, divisor ; \
+ rsb order, order, #31
+#else
+#define ARM_DIV2_ORDER(divisor, order) \
+ cmp divisor, #(1 << 16) ; \
+ movhs divisor, divisor, lsr #16 ; \
+ movhs order, #16 ; \
+ movlo order, #0 ; \
+ ; \
+ cmp divisor, #(1 << 8) ; \
+ movhs divisor, divisor, lsr #8 ; \
+ addhs order, order, #8 ; \
+ ; \
+ cmp divisor, #(1 << 4) ; \
+ movhs divisor, divisor, lsr #4 ; \
+ addhs order, order, #4 ; \
+ ; \
+ cmp divisor, #(1 << 2) ; \
+ addhi order, order, #3 ; \
+ addls order, order, divisor, lsr #1
+#endif
+/* ------------------------------------------------------------------------ */
+#if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
+
+#define ARMV5_MOD_LOOP(dividend, divisor) \
+ .set shift, shift - 1 ; \
+ cmp dividend, divisor, lsl #shift ; \
+ subcs dividend, dividend, divisor, lsl #shift
+#define ARM_MOD_BODY(dividend, divisor, order, spare) \
+ clz order, divisor ; \
+ clz spare, dividend ; \
+ sub order, order, spare ; \
+ rsbs order, order, #31 ; \
+ addne pc, pc, order, lsl #3 ; \
+ nop ; \
+ .set shift, 32 ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor) ; \
+ ARMV5_MOD_LOOP (dividend, divisor)
+
+#else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+#if __ARM_ARCH__ >= 5
+
+#define ARM_MOD_BODY_P1(dividend, divisor, order, spare) \
+ clz order, divisor ; \
+ clz spare, dividend ; \
+ sub order, order, spare ; \
+ mov divisor, divisor, lsl order
+
+#else /* __ARM_ARCH__ < 5 */
+
+#define ARM_MOD_BODY_P1(dividend, divisor, order, spare) \
+ mov order, #0 ; \
+ ; \
+ /* Unless the divisor is very big, shift it up in multiples of */; \
+ /* four bits, since this is the amount of unwinding in the main */; \
+ /* division loop. Continue shifting until the divisor is */; \
+ /* larger than the dividend. */; \
+1: cmp divisor, #0x10000000 ; \
+ cmplo divisor, dividend ; \
+ movlo divisor, divisor, lsl #4 ; \
+ addlo order, order, #4 ; \
+ blo 1b ; \
+ ; \
+ /* For very big divisors, we must shift it a bit at a time, or */; \
+ /* we will be in danger of overflowing. */; \
+1: cmp divisor, #0x80000000 ; \
+ cmplo divisor, dividend ; \
+ movlo divisor, divisor, lsl #1 ; \
+ addlo order, order, #1 ; \
+ blo 1b
+
+#endif /* __ARM_ARCH__ < 5 */
+#define ARM_MOD_BODY(dividend, divisor, order, spare) \
+ARM_MOD_BODY_P1(dividend, divisor, order, spare) ; \
+ ; \
+ /* Perform all needed substractions to keep only the reminder. */; \
+ /* Do comparisons in batch of 4 first. */; \
+ subs order, order, #3 /* yes, 3 is intended here */; \
+ blt 2f ; \
+ ; \
+1: cmp dividend, divisor ; \
+ subhs dividend, dividend, divisor ; \
+ cmp dividend, divisor, lsr #1 ; \
+ subhs dividend, dividend, divisor, lsr #1 ; \
+ cmp dividend, divisor, lsr #2 ; \
+ subhs dividend, dividend, divisor, lsr #2 ; \
+ cmp dividend, divisor, lsr #3 ; \
+ subhs dividend, dividend, divisor, lsr #3 ; \
+ cmp dividend, #1 ; \
+ mov divisor, divisor, lsr #4 ; \
+ subges order, order, #4 ; \
+ bge 1b ; \
+ ; \
+ tst order, #3 ; \
+ teqne dividend, #0 ; \
+ beq 5f ; \
+ ; \
+ /* Either 1, 2 or 3 comparison/substractions are left. */; \
+2: cmn order, #2 ; \
+ blt 4f ; \
+ beq 3f ; \
+ cmp dividend, divisor ; \
+ subhs dividend, dividend, divisor ; \
+ mov divisor, divisor, lsr #1 ; \
+3: cmp dividend, divisor ; \
+ subhs dividend, dividend, divisor ; \
+ mov divisor, divisor, lsr #1 ; \
+4: cmp dividend, divisor ; \
+ subhs dividend, dividend, divisor ; \
+5: ; \
+
+#endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
+/* ------------------------------------------------------------------------ */
+#define THUMB_DIV_MOD_BODY(modulo) \
+ /* Load the constant 0x10000000 into our work register. */; \
+ mov work, #1 ; \
+ lsl work, #28 ; \
+LSYM(Loop1): ; \
+ /* Unless the divisor is very big, shift it up in multiples of */; \
+ /* four bits, since this is the amount of unwinding in the main */; \
+ /* division loop. Continue shifting until the divisor is */; \
+ /* larger than the dividend. */; \
+ cmp divisor, work ; \
+ bhs LSYM(Lbignum) ; \
+ cmp divisor, dividend ; \
+ bhs LSYM(Lbignum) ; \
+ lsl divisor, #4 ; \
+ lsl curbit, #4 ; \
+ b LSYM(Loop1) ; \
+LSYM(Lbignum): ; \
+ /* Set work to 0x80000000 */; \
+ lsl work, #3 ; \
+LSYM(Loop2): ; \
+ /* For very big divisors, we must shift it a bit at a time, or */; \
+ /* we will be in danger of overflowing. */; \
+ cmp divisor, work ; \
+ bhs LSYM(Loop3) ; \
+ cmp divisor, dividend ; \
+ bhs LSYM(Loop3) ; \
+ lsl divisor, #1 ; \
+ lsl curbit, #1 ; \
+ b LSYM(Loop2) ; \
+LSYM(Loop3): ; \
+ /* Test for possible subtractions ... */; \
+ .if modulo ; \
+ /* ... On the final pass, this may subtract too much from the dividend, */; \
+ /* so keep track of which subtractions are done, we can fix them up */; \
+ /* afterwards. */; \
+ mov overdone, #0 ; \
+ cmp dividend, divisor ; \
+ blo LSYM(Lover1) ; \
+ sub dividend, dividend, divisor ; \
+LSYM(Lover1): ; \
+ lsr work, divisor, #1 ; \
+ cmp dividend, work ; \
+ blo LSYM(Lover2) ; \
+ sub dividend, dividend, work ; \
+ mov ip, curbit ; \
+ mov work, #1 ; \
+ ror curbit, work ; \
+ orr overdone, curbit ; \
+ mov curbit, ip ; \
+LSYM(Lover2): ; \
+ lsr work, divisor, #2 ; \
+ cmp dividend, work ; \
+ blo LSYM(Lover3) ; \
+ sub dividend, dividend, work ; \
+ mov ip, curbit ; \
+ mov work, #2 ; \
+ ror curbit, work ; \
+ orr overdone, curbit ; \
+ mov curbit, ip ; \
+LSYM(Lover3): ; \
+ lsr work, divisor, #3 ; \
+ cmp dividend, work ; \
+ blo LSYM(Lover4) ; \
+ sub dividend, dividend, work ; \
+ mov ip, curbit ; \
+ mov work, #3 ; \
+ ror curbit, work ; \
+ orr overdone, curbit ; \
+ mov curbit, ip ; \
+LSYM(Lover4): ; \
+ mov ip, curbit ; \
+ .else ; \
+ /* ... and note which bits are done in the result. On the final pass, */; \
+ /* this may subtract too much from the dividend, but the result will be ok, */; \
+ /* since the "bit" will have been shifted out at the bottom. */; \
+ cmp dividend, divisor ; \
+ blo LSYM(Lover1) ; \
+ sub dividend, dividend, divisor ; \
+ orr result, result, curbit ; \
+LSYM(Lover1): ; \
+ lsr work, divisor, #1 ; \
+ cmp dividend, work ; \
+ blo LSYM(Lover2) ; \
+ sub dividend, dividend, work ; \
+ lsr work, curbit, #1 ; \
+ orr result, work ; \
+LSYM(Lover2): ; \
+ lsr work, divisor, #2 ; \
+ cmp dividend, work ; \
+ blo LSYM(Lover3) ; \
+ sub dividend, dividend, work ; \
+ lsr work, curbit, #2 ; \
+ orr result, work ; \
+LSYM(Lover3): ; \
+ lsr work, divisor, #3 ; \
+ cmp dividend, work ; \
+ blo LSYM(Lover4) ; \
+ sub dividend, dividend, work ; \
+ lsr work, curbit, #3 ; \
+ orr result, work ; \
+LSYM(Lover4): ; \
+ .endif ; \
+ ; \
+ cmp dividend, #0 /* Early termination? */; \
+ beq LSYM(Lover5) ; \
+ lsr curbit, #4 /* No, any more bits to do?*/; \
+ beq LSYM(Lover5) ; \
+ lsr divisor, #4 ; \
+ b LSYM(Loop3) ; \
+LSYM(Lover5): ; \
+ .if modulo ; \
+ /* Any subtractions that we should not have done will be recorded in */; \
+ /* the top three bits of "overdone". Exactly which were not needed */; \
+ /* are governed by the position of the bit, stored in ip. */; \
+ mov work, #0xe ; \
+ lsl work, #28 ; \
+ and overdone, work ; \
+ beq LSYM(Lgot_result) ; \
+ ; \
+ /* If we terminated early, because dividend became zero, then the*/; \
+ /* bit in ip will not be in the bottom nibble, and we should not */; \
+ /* perform the additions below. We must test for this though */; \
+ /* (rather relying upon the TSTs to prevent the additions) since */; \
+ /* the bit in ip could be in the top two bits which might then match*/; \
+ /* with one of the smaller RORs. */; \
+ mov curbit, ip ; \
+ mov work, #0x7 ; \
+ tst curbit, work ; \
+ beq LSYM(Lgot_result) ; \
+ ; \
+ mov curbit, ip ; \
+ mov work, #3 ; \
+ ror curbit, work ; \
+ tst overdone, curbit ; \
+ beq LSYM(Lover6) ; \
+ lsr work, divisor, #3 ; \
+ add dividend, work ; \
+LSYM(Lover6): ; \
+ mov curbit, ip ; \
+ mov work, #2 ; \
+ ror curbit, work ; \
+ tst overdone, curbit ; \
+ beq LSYM(Lover7) ; \
+ lsr work, divisor, #2 ; \
+ add dividend, work ; \
+LSYM(Lover7): ; \
+ mov curbit, ip ; \
+ mov work, #1 ; \
+ ror curbit, work ; \
+ tst overdone, curbit ; \
+ beq LSYM(Lgot_result) ; \
+ lsr work, divisor, #1 ; \
+ add dividend, work ; \
+ .endif ; \
+LSYM(Lgot_result):
+/* APPLE LOCAL end ARM MACH assembler macros */
+/* ------------------------------------------------------------------------ */
+/* Start of the Real Functions */
+/* ------------------------------------------------------------------------ */
+#ifdef L_udivsi3
+
+ FUNC_START udivsi3
+ /* APPLE LOCAL ARM MACH assembler */
+ FUNC_ALIAS (aeabi_uidiv, udivsi3)
+
+#ifdef __thumb__
+
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ blo LSYM(Lgot_result)
+
+ /* APPLE LOCAL v7 support */
+ THUMB_DIV_MOD_BODY(0)
+
+ mov r0, result
+ pop { work }
+ RET
+
+#else /* ARM version. */
+
+ subs r2, r1, #1
+ RETc(eq)
+ bcc LSYM(Ldiv0)
+ cmp r0, r1
+ /* APPLE LOCAL ARM MACH assembler */
+ bls L11
+ tst r1, r2
+ /* APPLE LOCAL ARM MACH assembler */
+ beq L12
+
+ /* APPLE LOCAL ARM MACH assembler */
+ ARM_DIV_BODY(r0, r1, r2, r3)
+
+ mov r0, r2
+ RET
+
+/* APPLE LOCAL ARM MACH assembler */
+L11: moveq r0, #1
+ movne r0, #0
+ RET
+
+/* APPLE LOCAL ARM MACH assembler */
+L12: ARM_DIV2_ORDER(r1, r2)
+
+ mov r0, r0, lsr r2
+ RET
+
+#endif /* ARM version */
+
+ DIV_FUNC_END udivsi3
+
+FUNC_START aeabi_uidivmod
+#ifdef __thumb__
+ push {r0, r1, lr}
+ bl SYM(__udivsi3)
+ /* APPLE LOCAL v7 support */
+ pop {r1, r2, r3}
+ mul r2, r0
+ sub r1, r1, r2
+ bx r3
+#else
+ stmfd sp!, { r0, r1, lr }
+ bl SYM(__udivsi3)
+ ldmfd sp!, { r1, r2, lr }
+ mul r3, r2, r0
+ sub r1, r1, r3
+ RET
+#endif
+ FUNC_END aeabi_uidivmod
+
+#endif /* L_udivsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_umodsi3
+
+ FUNC_START umodsi3
+
+#ifdef __thumb__
+
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+ mov curbit, #1
+ cmp dividend, divisor
+ bhs LSYM(Lover10)
+ RET
+
+LSYM(Lover10):
+ push { work }
+
+ /* APPLE LOCAL v7 support */
+ THUMB_DIV_MOD_BODY(1)
+
+ pop { work }
+ RET
+
+#else /* ARM version. */
+
+ subs r2, r1, #1 @ compare divisor with 1
+ bcc LSYM(Ldiv0)
+ cmpne r0, r1 @ compare dividend with divisor
+ moveq r0, #0
+ tsthi r1, r2 @ see if divisor is power of 2
+ andeq r0, r0, r2
+ RETc(ls)
+
+ /* APPLE LOCAL ARM MACH assembler */
+ ARM_MOD_BODY(r0, r1, r2, r3)
+
+ RET
+
+#endif /* ARM version. */
+
+ DIV_FUNC_END umodsi3
+
+#endif /* L_umodsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_divsi3
+
+ FUNC_START divsi3
+ /* APPLE LOCAL ARM MACH assembler */
+ FUNC_ALIAS (aeabi_idiv, divsi3)
+
+#ifdef __thumb__
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl LSYM(Lover10)
+ neg divisor, divisor @ Loops below use unsigned.
+LSYM(Lover10):
+ cmp dividend, #0
+ bpl LSYM(Lover11)
+ neg dividend, dividend
+LSYM(Lover11):
+ cmp dividend, divisor
+ blo LSYM(Lgot_result)
+
+ /* APPLE LOCAL v7 support */
+ THUMB_DIV_MOD_BODY(0)
+
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ bpl LSYM(Lover12)
+ neg r0, r0
+LSYM(Lover12):
+ pop { work }
+ RET
+
+#else /* ARM version. */
+
+ cmp r1, #0
+ eor ip, r0, r1 @ save the sign of the result.
+ beq LSYM(Ldiv0)
+ rsbmi r1, r1, #0 @ loops below use unsigned.
+ subs r2, r1, #1 @ division by 1 or -1 ?
+ /* APPLE LOCAL ARM MACH assembler */
+ beq L10
+ movs r3, r0
+ rsbmi r3, r0, #0 @ positive dividend value
+ cmp r3, r1
+ /* APPLE LOCAL ARM MACH assembler */
+ bls L11
+ tst r1, r2 @ divisor is power of 2 ?
+ /* APPLE LOCAL ARM MACH assembler */
+ beq L12
+
+ /* APPLE LOCAL ARM MACH assembler */
+ ARM_DIV_BODY(r3, r1, r0, r2)
+
+ cmp ip, #0
+ rsbmi r0, r0, #0
+ RET
+
+/* APPLE LOCAL ARM MACH assembler */
+L10: teq ip, r0 @ same sign ?
+ rsbmi r0, r0, #0
+ RET
+
+/* APPLE LOCAL ARM MACH assembler */
+L11: movlo r0, #0
+ moveq r0, ip, asr #31
+ orreq r0, r0, #1
+ RET
+
+/* APPLE LOCAL ARM MACH assembler */
+L12: ARM_DIV2_ORDER(r1, r2)
+
+ cmp ip, #0
+ mov r0, r3, lsr r2
+ rsbmi r0, r0, #0
+ RET
+
+#endif /* ARM version */
+
+ DIV_FUNC_END divsi3
+
+FUNC_START aeabi_idivmod
+#ifdef __thumb__
+ push {r0, r1, lr}
+ bl SYM(__divsi3)
+ /* APPLE LOCAL v7 support */
+ pop {r1, r2, r3}
+ mul r2, r0
+ sub r1, r1, r2
+ bx r3
+#else
+ stmfd sp!, { r0, r1, lr }
+ bl SYM(__divsi3)
+ ldmfd sp!, { r1, r2, lr }
+ mul r3, r2, r0
+ sub r1, r1, r3
+ RET
+#endif
+ FUNC_END aeabi_idivmod
+
+#endif /* L_divsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_modsi3
+
+ FUNC_START modsi3
+
+#ifdef __thumb__
+
+ mov curbit, #1
+ cmp divisor, #0
+ beq LSYM(Ldiv0)
+ bpl LSYM(Lover10)
+ neg divisor, divisor @ Loops below use unsigned.
+LSYM(Lover10):
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ work later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ bpl LSYM(Lover11)
+ neg dividend, dividend
+LSYM(Lover11):
+ cmp dividend, divisor
+ blo LSYM(Lgot_result)
+
+ /* APPLE LOCAL v7 support */
+ THUMB_DIV_MOD_BODY(1)
+
+ pop { work }
+ cmp work, #0
+ bpl LSYM(Lover12)
+ neg dividend, dividend
+LSYM(Lover12):
+ pop { work }
+ RET
+
+#else /* ARM version. */
+
+ cmp r1, #0
+ beq LSYM(Ldiv0)
+ rsbmi r1, r1, #0 @ loops below use unsigned.
+ movs ip, r0 @ preserve sign of dividend
+ rsbmi r0, r0, #0 @ if negative make positive
+ subs r2, r1, #1 @ compare divisor with 1
+ cmpne r0, r1 @ compare dividend with divisor
+ moveq r0, #0
+ tsthi r1, r2 @ see if divisor is power of 2
+ andeq r0, r0, r2
+ /* APPLE LOCAL ARM MACH assembler */
+ bls L10
+
+ /* APPLE LOCAL ARM MACH assembler */
+ ARM_MOD_BODY(r0, r1, r2, r3)
+
+/* APPLE LOCAL ARM MACH assembler */
+L10: cmp ip, #0
+ rsbmi r0, r0, #0
+ RET
+
+#endif /* ARM version */
+
+ DIV_FUNC_END modsi3
+
+#endif /* L_modsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_tls
+
+ FUNC_START div0
+ /* APPLE LOCAL begin ARM MACH assembler */
+ FUNC_ALIAS(aeabi_idiv0,div0)
+ FUNC_ALIAS(aeabi_ldiv0,div0)
+ /* APPLE LOCAL end ARM MACH assembler */
+
+ RET
+
+ FUNC_END aeabi_ldiv0
+ FUNC_END aeabi_idiv0
+ FUNC_END div0
+
+#endif /* L_divmodsi_tools */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_lnx
+@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
+
+/* Constant taken from <asm/signal.h>. */
+#define SIGFPE 8
+
+ .code 32
+ FUNC_START div0
+
+ stmfd sp!, {r1, lr}
+ mov r0, #SIGFPE
+ bl SYM(raise) __PLT__
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM1 (r1)
+
+ FUNC_END div0
+
+#endif /* L_dvmd_lnx */
+/* ------------------------------------------------------------------------ */
+/* Dword shift operations. */
+/* All the following Dword shift variants rely on the fact that
+ shft xxx, Reg
+ is in fact done as
+ shft xxx, (Reg & 255)
+ so for Reg value in (32...63) and (-1...-31) we will get zero (in the
+ case of logical shifts) or the sign (for asr). */
+
+#ifdef __ARMEB__
+#define al r1
+#define ah r0
+#else
+#define al r0
+#define ah r1
+#endif
+
+/* Prevent __aeabi double-word shifts from being produced on SymbianOS. */
+#ifndef __symbian__
+
+#ifdef L_lshrdi3
+
+ FUNC_START lshrdi3
+ /* APPLE LOCAL ARM MACH assembler */
+ FUNC_ALIAS (aeabi_llsr, lshrdi3)
+
+#ifdef __thumb__
+ lsr al, r2
+ mov r3, ah
+ lsr ah, r2
+ mov ip, r3
+ sub r2, #32
+ lsr r3, r2
+ orr al, r3
+ neg r2, r2
+ mov r3, ip
+ lsl r3, r2
+ orr al, r3
+ RET
+#else
+ subs r3, r2, #32
+ rsb ip, r2, #32
+ movmi al, al, lsr r2
+ movpl al, ah, lsr r3
+ orrmi al, al, ah, lsl ip
+ mov ah, ah, lsr r2
+ RET
+#endif
+ FUNC_END aeabi_llsr
+ FUNC_END lshrdi3
+
+#endif
+
+#ifdef L_ashrdi3
+
+ FUNC_START ashrdi3
+ /* APPLE LOCAL ARM MACH assembler */
+ FUNC_ALIAS (aeabi_lasr, ashrdi3)
+
+#ifdef __thumb__
+ lsr al, r2
+ mov r3, ah
+ asr ah, r2
+ sub r2, #32
+ @ If r2 is negative at this point the following step would OR
+ @ the sign bit into all of AL. That's not what we want...
+ bmi 1f
+ mov ip, r3
+ asr r3, r2
+ orr al, r3
+ mov r3, ip
+1:
+ neg r2, r2
+ lsl r3, r2
+ orr al, r3
+ RET
+#else
+ subs r3, r2, #32
+ rsb ip, r2, #32
+ movmi al, al, lsr r2
+ movpl al, ah, asr r3
+ orrmi al, al, ah, lsl ip
+ mov ah, ah, asr r2
+ RET
+#endif
+
+ FUNC_END aeabi_lasr
+ FUNC_END ashrdi3
+
+#endif
+
+#ifdef L_ashldi3
+
+ FUNC_START ashldi3
+ /* APPLE LOCAL ARM MACH assembler */
+ FUNC_ALIAS (aeabi_llsl, ashldi3)
+
+#ifdef __thumb__
+ lsl ah, r2
+ mov r3, al
+ lsl al, r2
+ mov ip, r3
+ sub r2, #32
+ lsl r3, r2
+ orr ah, r3
+ neg r2, r2
+ mov r3, ip
+ lsr r3, r2
+ orr ah, r3
+ RET
+#else
+ subs r3, r2, #32
+ rsb ip, r2, #32
+ movmi ah, ah, lsl r2
+ movpl ah, al, lsl r3
+ orrmi ah, ah, al, lsr ip
+ mov al, al, lsl r2
+ RET
+#endif
+ FUNC_END aeabi_llsl
+ FUNC_END ashldi3
+
+#endif
+
+/* APPLE LOCAL begin ARM 4790140 compact switch tables */
+/* ----------------------------------------------------------------------- */
+/* These aren't needed for Thumb2 since then we have actual instructions
+ to do what these functions do. */
+#ifndef __thumb2__
+
+/* Thumb switch table implementation. Arm code, although must be called
+ from Thumb (the low bit of LR is expected to be 1).
+ Expects the call site to be followed by 1-byte count, then <count>
+ 1-byte unsigned half-offsets (low bit of real offset is always 0, so
+ not stored), then the half-offset for the default case (not included
+ in the count). */
+
+#ifdef L_switchu8
+
+ FUNC_START switchu8
+
+ ldrb ip, [lr, #-1]
+ cmp r0, ip
+ ldrccb r0, [lr, r0]
+ ldrcsb r0, [lr, ip]
+ add ip, lr, r0, lsl #1
+ bx ip
+
+ FUNC_END switchu8
+#endif
+
+/* Same with signed half-offsets. */
+
+#ifdef L_switch8
+
+ FUNC_START switch8
+
+ ldrb ip, [lr, #-1]
+ cmp r0, ip
+ ldrccsb r0, [lr, r0]
+ ldrcssb r0, [lr, ip]
+ add ip, lr, r0, lsl #1
+ bx ip
+
+ FUNC_END switch8
+#endif
+
+/* Same with 16-bit signed half-offsets. (This one is not
+ all that efficient, there's no reg+reg<<const mode for
+ halfwords.) */
+
+#ifdef L_switch16
+
+ FUNC_START switch16
+
+ ldrh ip, [lr, #-1]
+ cmp r0, ip
+ add r0, lr, r0, lsl #1
+ ldrccsh r0, [r0, #1]
+ add ip, lr, ip, lsl #1
+ ldrcssh r0, [ip, #1]
+ add ip, lr, r0, lsl #1
+ bx ip
+
+ FUNC_END switch16
+#endif
+
+/* Same with 32-bit signed offset (shifting off the low
+ bit would not gain anything here). */
+
+#ifdef L_switch32
+
+ FUNC_START switch32
+
+ ldr ip, [lr, #-1]
+ cmp r0, ip
+ add r0, lr, r0, lsl #2
+ ldrcc r0, [r0, #3]
+ add ip, lr, ip, lsl #2
+ ldrcs r0, [ip, #3]
+ add ip, lr, r0
+ bx ip
+
+ FUNC_END switch32
+#endif
+
+/* APPLE LOCAL begin 6465387 exception handling interworking VFP save */
+#if (__ARM_ARCH__ == 6)
+#ifdef L_save_vfp_d8_d15_regs
+ ARM_FUNC_START save_vfp_d8_d15_regs
+ vpush {d8-d15}
+ RET
+ FUNC_END save_vfp_d8_d15_regs
+#endif
+
+#ifdef L_restore_vfp_d8_d15__regs
+ ARM_FUNC_START restore_vfp_d8_d15_regs
+ vpop {d8-d15}
+ RET
+ FUNC_END restore_vfp_d8_d15_regs
+#endif
+#endif
+/* APPLE LOCAL end 6465387 exception handling interworking VFP save */
+
+#endif /* !defined (__thumb2__) */
+/* APPLE LOCAL end ARM 4790140 compact switch tables */
+
+#endif /* __symbian__ */
+
+/* ------------------------------------------------------------------------ */
+/* These next two sections are here despite the fact that they contain Thumb
+ assembler because their presence allows interworked code to be linked even
+ when the GCC library is this one. */
+
+/* Do not build the interworking functions when the target architecture does
+ not support Thumb instructions. (This can be a multilib option). */
+#if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
+ || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
+ || __ARM_ARCH__ >= 6
+
+#if defined L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+ .force_thumb
+
+/* APPLE LOCAL begin ARM MACH assembler */
+#define call_via(register) \
+ THUMB_FUNC_START _call_via_##register ; \
+ ; \
+ bx register ; \
+ nop ; \
+ ; \
+ SIZE (_call_via_##register)
+
+ call_via(r0)
+ call_via(r1)
+ call_via(r2)
+ call_via(r3)
+ call_via(r4)
+ call_via(r5)
+ call_via(r6)
+ call_via(r7)
+ call_via(r8)
+ call_via(r9)
+ call_via(sl)
+ call_via(fp)
+ call_via(ip)
+ call_via(sp)
+ call_via(lr)
+/* APPLE LOCAL end ARM MACH assembler macros */
+
+#endif /* L_call_via_rX */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Don't bother with the old interworking routines for Thumb-2. */
+/* ??? Maybe only omit these on v7m. */
+#ifndef __thumb2__
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+#if defined L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode.
+
+ There are three variations of this code. The first,
+ _interwork_call_via_rN(), will push the return address onto the
+ stack and pop it in _arm_return(). It should only be used if all
+ arguments are passed in registers.
+
+ The second, _interwork_r7_call_via_rN(), instead stores the return
+ address at [r7, #-4]. It is the caller's responsibility to ensure
+ that this address is valid and contains no useful data.
+
+ The third, _interwork_r11_call_via_rN(), works in the same way but
+ uses r11 instead of r7. It is useful if the caller does not really
+ need a frame pointer. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+LSYM(Lstart_arm_return):
+ cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
+ cfi_push 0, 0xe, -0x8, 0x8
+ nop @ This nop is for the benefit of debuggers, so that
+ @ backtraces will use the correct unwind information.
+_arm_return:
+ /* APPLE LOCAL ARM MACH assembler */
+ RETLDM_unwind (LSYM(Lstart_arm_return))
+ cfi_end LSYM(Lend_arm_return)
+
+ .globl _arm_return_r7
+_arm_return_r7:
+ ldr lr, [r7, #-4]
+ bx lr
+
+ .globl _arm_return_r11
+_arm_return_r11:
+ ldr lr, [r11, #-4]
+ bx lr
+
+.macro interwork_with_frame frame, register, name, return
+ .code 16
+
+ THUMB_FUNC_START \name
+
+ bx pc
+ nop
+
+ .code 32
+ tst \register, #1
+ streq lr, [\frame, #-4]
+ adreq lr, _arm_return_\frame
+ bx \register
+
+ SIZE (\name)
+.endm
+
+.macro interwork register
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_\register
+
+ bx pc
+ nop
+
+ .code 32
+ .globl LSYM(Lchange_\register)
+LSYM(Lchange_\register):
+ tst \register, #1
+ streq lr, [sp, #-8]!
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+
+ interwork_with_frame r7,\register,_interwork_r7_call_via_\register
+ interwork_with_frame r11,\register,_interwork_r11_call_via_\register
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The LR case has to be handled a little differently... */
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_lr
+
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr, pc}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
+/* APPLE LOCAL v7 support. Merge from mainline */
+#endif /* !__thumb2__ */
+#endif /* Arch supports thumb. */
+
+#ifndef __symbian__
+#include "ieee754-df.S"
+#include "ieee754-sf.S"
+#include "bpabi.S"
+#endif /* __symbian__ */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/libgcc-bpabi.ver b/gcc-4.2.1-5666.3/gcc/config/arm/libgcc-bpabi.ver
new file mode 100644
index 000000000..2f259eb64
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/libgcc-bpabi.ver
@@ -0,0 +1,83 @@
+GCC_3.5 {
+ # BPABI symbols
+ __aeabi_cdcmpeq
+ __aeabi_cdcmple
+ __aeabi_cdrcmple
+ __aeabi_cfcmpeq
+ __aeabi_cfcmple
+ __aeabi_cfrcmple
+ __aeabi_d2f
+ __aeabi_d2iz
+ __aeabi_d2lz
+ __aeabi_d2uiz
+ __aeabi_d2ulz
+ __aeabi_dadd
+ __aeabi_dcmpeq
+ __aeabi_dcmpge
+ __aeabi_dcmpgt
+ __aeabi_dcmple
+ __aeabi_dcmplt
+ __aeabi_dcmpun
+ __aeabi_ddiv
+ __aeabi_dmul
+ __aeabi_dneg
+ __aeabi_drsub
+ __aeabi_dsub
+ __aeabi_f2d
+ __aeabi_f2iz
+ __aeabi_f2lz
+ __aeabi_f2uiz
+ __aeabi_f2ulz
+ __aeabi_fadd
+ __aeabi_fcmpeq
+ __aeabi_fcmpge
+ __aeabi_fcmpgt
+ __aeabi_fcmple
+ __aeabi_fcmplt
+ __aeabi_fcmpun
+ __aeabi_fdiv
+ __aeabi_fmul
+ __aeabi_fneg
+ __aeabi_frsub
+ __aeabi_fsub
+ __aeabi_i2d
+ __aeabi_i2f
+ __aeabi_idiv
+ __aeabi_idiv0
+ __aeabi_idivmod
+ __aeabi_l2d
+ __aeabi_l2f
+ __aeabi_lasr
+ __aeabi_lcmp
+ __aeabi_ldiv0
+ __aeabi_ldivmod
+ __aeabi_llsl
+ __aeabi_llsr
+ __aeabi_lmul
+ __aeabi_ui2d
+ __aeabi_ui2f
+ __aeabi_uidiv
+ __aeabi_uidivmod
+ __aeabi_uldivmod
+ __aeabi_ulcmp
+ __aeabi_ul2d
+ __aeabi_ul2f
+ __aeabi_uread4
+ __aeabi_uread8
+ __aeabi_uwrite4
+ __aeabi_uwrite8
+
+ # Exception-Handling
+ # \S 7.5
+ _Unwind_Complete
+ _Unwind_VRS_Get
+ _Unwind_VRS_Set
+ _Unwind_VRS_Pop
+ # \S 9.2
+ __aeabi_unwind_cpp_pr0
+ __aeabi_unwind_cpp_pr1
+ __aeabi_unwind_cpp_pr2
+ # The libstdc++ exception-handling personality routine uses this
+ # GNU-specific entry point.
+ __gnu_unwind_frame
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/libgcc-iphoneos.ver b/gcc-4.2.1-5666.3/gcc/config/arm/libgcc-iphoneos.ver
new file mode 100644
index 000000000..f9064fa05
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/libgcc-iphoneos.ver
@@ -0,0 +1,38 @@
+# APPLE LOCAL file ARM 5681645
+GCC_4.0.1 {
+ # Thumb VFP support
+ __muldf3vfp
+ __adddf3vfp
+ __subdf3vfp
+ __divdf3vfp
+ __eqdf2vfp
+ __nedf2vfp
+ __ltdf2vfp
+ __gtdf2vfp
+ __ledf2vfp
+ __gedf2vfp
+ __unorddf2vfp
+ __fixdfsivfp
+ __fixunsdfsivfp
+ __extendsfdf2vfp
+ __truncdfsf2vfp
+ __floatsidfvfp
+ __floatunssidfvfp
+ __mulsf3vfp
+ __addsf3vfp
+ __subsf3vfp
+ __divsf3vfp
+ __eqsf2vfp
+ __nesf2vfp
+ __ltsf2vfp
+ __gtsf2vfp
+ __lesf2vfp
+ __gesf2vfp
+ __unordsf2vfp
+ __fixsfsivfp
+ __fixunssfsivfp
+ __floatsisfvfp
+ __floatunssisfvfp
+ # Miscellaneous
+ __flt_rounds
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/libunwind.S b/gcc-4.2.1-5666.3/gcc/config/arm/libunwind.S
new file mode 100644
index 000000000..fe09281e6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/libunwind.S
@@ -0,0 +1,238 @@
+/* Support functions for the unwinder.
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#ifndef __symbian__
+
+#include "lib1funcs.asm"
+
+.macro UNPREFIX name
+ .global SYM (\name)
+ EQUIV SYM (\name), SYM (__\name)
+.endm
+
+/* r0 points to a 16-word block. Upload these values to the actual core
+ state. */
+ARM_FUNC_START restore_core_regs
+ /* We must use sp as the base register when restoring sp. Push the
+ last 3 registers onto the top of the current stack to achieve
+ this. */
+ add r1, r0, #52
+ ldmia r1, {r3, r4, r5} /* {sp, lr, pc}. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ /* Thumb-2 doesn't allow sp in a load-multiple instruction, so push
+ the target address onto the target stack. This is safe as
+ we're always returning to somewhere further up the call stack. */
+ mov ip, r3
+ mov lr, r4
+ str r5, [ip, #-4]!
+#elif defined(__INTERWORKING__)
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ /* Restore pc into ip. */
+ mov r2, r5
+ stmfd sp!, {r2, r3, r4}
+#else
+ stmfd sp!, {r3, r4, r5}
+#endif
+ /* Don't bother restoring ip. */
+ ldmia r0, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp}
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ /* Pop the return address off the target stack. */
+ mov sp, ip
+ pop {pc}
+#elif defined(__INTERWORKING__)
+ /* Pop the three registers we pushed earlier. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ ldmfd sp, {ip, sp, lr}
+ bx ip
+#else
+ ldmfd sp, {sp, lr, pc}
+#endif
+ FUNC_END restore_core_regs
+ UNPREFIX restore_core_regs
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Load VFP registers d0-d15 from the address in r0.
+ Use this to load from FSTMX format. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ARM_FUNC_START gnu_Unwind_Restore_VFP
+ /* Use the generic coprocessor form so that gas doesn't complain
+ on soft-float targets. */
+ ldc p11,cr0,[r0],{0x21} /* fldmiax r0, {d0-d15} */
+ RET
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Store VFP registers d0-d15 to the address in r0.
+ Use this to store in FSTMX format. */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ARM_FUNC_START gnu_Unwind_Save_VFP
+ /* Use the generic coprocessor form so that gas doesn't complain
+ on soft-float targets. */
+ stc p11,cr0,[r0],{0x21} /* fstmiax r0, {d0-d15} */
+ RET
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* Load VFP registers d0-d15 from the address in r0.
+ Use this to load from FSTMD format. */
+ARM_FUNC_START gnu_Unwind_Restore_VFP_D
+ ldc p11,cr0,[r0],{0x20} /* fldmiad r0, {d0-d15} */
+ RET
+
+/* Store VFP registers d0-d15 to the address in r0.
+ Use this to store in FLDMD format. */
+ARM_FUNC_START gnu_Unwind_Save_VFP_D
+ stc p11,cr0,[r0],{0x20} /* fstmiad r0, {d0-d15} */
+ RET
+
+/* Load VFP registers d16-d31 from the address in r0.
+ Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */
+ARM_FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
+ ldcl p11,cr0,[r0],{0x20} /* vldm r0, {d16-d31} */
+ RET
+
+/* Store VFP registers d16-d31 to the address in r0.
+ Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */
+ARM_FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
+ stcl p11,cr0,[r0],{0x20} /* vstm r0, {d16-d31} */
+ RET
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ARM_FUNC_START gnu_Unwind_Restore_WMMXD
+ /* Use the generic coprocessor form so that gas doesn't complain
+ on non-iWMMXt targets. */
+ ldcl p1, cr0, [r0], #8 /* wldrd wr0, [r0], #8 */
+ ldcl p1, cr1, [r0], #8 /* wldrd wr1, [r0], #8 */
+ ldcl p1, cr2, [r0], #8 /* wldrd wr2, [r0], #8 */
+ ldcl p1, cr3, [r0], #8 /* wldrd wr3, [r0], #8 */
+ ldcl p1, cr4, [r0], #8 /* wldrd wr4, [r0], #8 */
+ ldcl p1, cr5, [r0], #8 /* wldrd wr5, [r0], #8 */
+ ldcl p1, cr6, [r0], #8 /* wldrd wr6, [r0], #8 */
+ ldcl p1, cr7, [r0], #8 /* wldrd wr7, [r0], #8 */
+ ldcl p1, cr8, [r0], #8 /* wldrd wr8, [r0], #8 */
+ ldcl p1, cr9, [r0], #8 /* wldrd wr9, [r0], #8 */
+ ldcl p1, cr10, [r0], #8 /* wldrd wr10, [r0], #8 */
+ ldcl p1, cr11, [r0], #8 /* wldrd wr11, [r0], #8 */
+ ldcl p1, cr12, [r0], #8 /* wldrd wr12, [r0], #8 */
+ ldcl p1, cr13, [r0], #8 /* wldrd wr13, [r0], #8 */
+ ldcl p1, cr14, [r0], #8 /* wldrd wr14, [r0], #8 */
+ ldcl p1, cr15, [r0], #8 /* wldrd wr15, [r0], #8 */
+ RET
+
+ARM_FUNC_START gnu_Unwind_Save_WMMXD
+ /* Use the generic coprocessor form so that gas doesn't complain
+ on non-iWMMXt targets. */
+ stcl p1, cr0, [r0], #8 /* wstrd wr0, [r0], #8 */
+ stcl p1, cr1, [r0], #8 /* wstrd wr1, [r0], #8 */
+ stcl p1, cr2, [r0], #8 /* wstrd wr2, [r0], #8 */
+ stcl p1, cr3, [r0], #8 /* wstrd wr3, [r0], #8 */
+ stcl p1, cr4, [r0], #8 /* wstrd wr4, [r0], #8 */
+ stcl p1, cr5, [r0], #8 /* wstrd wr5, [r0], #8 */
+ stcl p1, cr6, [r0], #8 /* wstrd wr6, [r0], #8 */
+ stcl p1, cr7, [r0], #8 /* wstrd wr7, [r0], #8 */
+ stcl p1, cr8, [r0], #8 /* wstrd wr8, [r0], #8 */
+ stcl p1, cr9, [r0], #8 /* wstrd wr9, [r0], #8 */
+ stcl p1, cr10, [r0], #8 /* wstrd wr10, [r0], #8 */
+ stcl p1, cr11, [r0], #8 /* wstrd wr11, [r0], #8 */
+ stcl p1, cr12, [r0], #8 /* wstrd wr12, [r0], #8 */
+ stcl p1, cr13, [r0], #8 /* wstrd wr13, [r0], #8 */
+ stcl p1, cr14, [r0], #8 /* wstrd wr14, [r0], #8 */
+ stcl p1, cr15, [r0], #8 /* wstrd wr15, [r0], #8 */
+ RET
+
+ARM_FUNC_START gnu_Unwind_Restore_WMMXC
+ /* Use the generic coprocessor form so that gas doesn't complain
+ on non-iWMMXt targets. */
+ ldc2 p1, cr8, [r0], #4 /* wldrw wcgr0, [r0], #4 */
+ ldc2 p1, cr9, [r0], #4 /* wldrw wcgr1, [r0], #4 */
+ ldc2 p1, cr10, [r0], #4 /* wldrw wcgr2, [r0], #4 */
+ ldc2 p1, cr11, [r0], #4 /* wldrw wcgr3, [r0], #4 */
+ RET
+
+ARM_FUNC_START gnu_Unwind_Save_WMMXC
+ /* Use the generic coprocessor form so that gas doesn't complain
+ on non-iWMMXt targets. */
+ stc2 p1, cr8, [r0], #4 /* wstrw wcgr0, [r0], #4 */
+ stc2 p1, cr9, [r0], #4 /* wstrw wcgr1, [r0], #4 */
+ stc2 p1, cr10, [r0], #4 /* wstrw wcgr2, [r0], #4 */
+ stc2 p1, cr11, [r0], #4 /* wstrw wcgr3, [r0], #4 */
+ RET
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* Wrappers to save core registers, then call the real routine. */
+
+.macro UNWIND_WRAPPER name nargs
+ ARM_FUNC_START \name
+ /* Create a phase2_vrs structure. */
+ /* Split reg push in two to ensure the correct value for sp. */
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#if defined(__thumb2__)
+ mov ip, sp
+ push {lr} /* PC is ignored. */
+ push {ip, lr} /* Push original SP and LR. */
+#else
+ stmfd sp!, {sp, lr, pc}
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ stmfd sp!, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp, ip}
+
+ /* Demand-save flags, plus an extra word for alignment. */
+ mov r3, #0
+ stmfd sp!, {r2, r3}
+
+ /* Point r1 at the block. Pass r[0..nargs) unchanged. */
+ add r\nargs, sp, #4
+/* APPLE LOCAL v7 support. Merge from mainline */
+#if defined(__thumb__) && !defined(__thumb2__)
+ /* Switch back to thumb mode to avoid interworking hassle. */
+ adr ip, .L1_\name
+ orr ip, ip, #1
+ bx ip
+ .thumb
+.L1_\name:
+ bl SYM (__gnu\name) __PLT__
+ ldr r3, [sp, #64]
+ add sp, #72
+ bx r3
+#else
+ bl SYM (__gnu\name) __PLT__
+ ldr lr, [sp, #64]
+ add sp, sp, #72
+ RET
+#endif
+ FUNC_END \name
+ UNPREFIX \name
+.endm
+
+UNWIND_WRAPPER _Unwind_RaiseException 1
+UNWIND_WRAPPER _Unwind_Resume 1
+UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1
+UNWIND_WRAPPER _Unwind_ForcedUnwind 3
+
+#endif /* __symbian__ */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/linux-eabi.h b/gcc-4.2.1-5666.3/gcc/config/arm/linux-eabi.h
new file mode 100644
index 000000000..6612f742b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/linux-eabi.h
@@ -0,0 +1,85 @@
+/* Configuration file for ARM GNU/Linux EABI targets.
+ Copyright (C) 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* On EABI GNU/Linux, we want both the BPABI builtins and the
+ GNU/Linux builtins. */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ TARGET_BPABI_CPP_BUILTINS(); \
+ LINUX_TARGET_OS_CPP_BUILTINS(); \
+ } \
+ while (false)
+
+/* We default to a soft-float ABI so that binaries can run on all
+ target hardware. */
+#undef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT
+
+/* We default to the "aapcs-linux" ABI so that enums are int-sized by
+ default. */
+#undef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_AAPCS_LINUX
+
+/* Default to armv5t so that thumb shared libraries work.
+ The ARM10TDMI core is the default for armv5t, so set
+ SUBTARGET_CPU_DEFAULT to achieve this. */
+#undef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm10tdmi
+
+#undef SUBTARGET_EXTRA_LINK_SPEC
+#define SUBTARGET_EXTRA_LINK_SPEC " -m armelf_linux_eabi"
+
+/* Use ld-linux.so.3 so that it will be possible to run "classic"
+ GNU/Linux binaries on an EABI system. */
+#undef GLIBC_DYNAMIC_LINKER
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.3"
+
+/* At this point, bpabi.h will have clobbered LINK_SPEC. We want to
+ use the GNU/Linux version, not the generic BPABI version. */
+#undef LINK_SPEC
+#define LINK_SPEC LINUX_TARGET_LINK_SPEC
+
+/* Use the default LIBGCC_SPEC, not the version in linux-elf.h, as we
+ do not use -lfloat. */
+#undef LIBGCC_SPEC
+
+/* Use the AAPCS type for wchar_t, or the previous Linux default for
+ non-AAPCS. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "long int")
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. It is modified to work with
+ both the original and EABI-only syscall interfaces. */
+#undef CLEAR_INSN_CACHE
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("a1") = (unsigned long) (BEG); \
+ register unsigned long _end __asm ("a2") = (unsigned long) (END); \
+ register unsigned long _flg __asm ("a3") = 0; \
+ register unsigned long _scno __asm ("r7") = 0xf0002; \
+ __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
+ : "=r" (_beg) \
+ : "0" (_beg), "r" (_end), "r" (_flg), "r" (_scno)); \
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/linux-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/linux-elf.h
new file mode 100644
index 000000000..acb13cd01
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/linux-elf.h
@@ -0,0 +1,106 @@
+/* Definitions for ARM running Linux-based GNU systems using ELF
+ Copyright (C) 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+ 2005, 2006
+ Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* elfos.h should have already been included. Now just override
+ any conflicting definitions and add any extras. */
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with ELF)", stderr);
+
+#undef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_HARD
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (0)
+
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm6
+
+#define SUBTARGET_EXTRA_LINK_SPEC " -m armelf_linux -p"
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "marm", "mlittle-endian", "mhard-float", "mno-thumb-interwork" }
+
+/* Now we define the strings used to build the spec file. */
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{pthread:-lpthread} \
+ %{shared:-lc} \
+ %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
+
+#define LIBGCC_SPEC "%{msoft-float:-lfloat} %{mfloat-abi=soft*:-lfloat} -lgcc"
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
+
+#define LINUX_TARGET_LINK_SPEC "%{h*} %{version:-v} \
+ %{b} \
+ %{static:-Bstatic} \
+ %{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER "} \
+ -X \
+ %{mbig-endian:-EB}" \
+ SUBTARGET_EXTRA_LINK_SPEC
+
+#undef LINK_SPEC
+#define LINK_SPEC LINUX_TARGET_LINK_SPEC
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ LINUX_TARGET_OS_CPP_BUILTINS(); \
+ } \
+ while (0)
+
+/* This is how we tell the assembler that two symbols have the same value. */
+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
+ do \
+ { \
+ assemble_name (FILE, NAME1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, NAME2); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+/* NWFPE always understands FPA instructions. */
+#undef FPUTYPE_DEFAULT
+#define FPUTYPE_DEFAULT FPUTYPE_FPA_EMU3
+
+/* Call the function profiler with a given profile label. */
+#undef ARM_FUNCTION_PROFILER
+#define ARM_FUNCTION_PROFILER(STREAM, LABELNO) \
+{ \
+ fprintf (STREAM, "\tbl\tmcount%s\n", \
+ (TARGET_ARM && NEED_PLT_RELOC) ? "(PLT)" : ""); \
+}
+
+/* The GNU/Linux profiler clobbers the link register. Make sure the
+ prologue knows to save it. */
+#define PROFILE_HOOK(X) \
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)))
+
+/* The GNU/Linux profiler needs a frame pointer. */
+#define SUBTARGET_FRAME_POINTER_REQUIRED current_function_profile
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/linux-gas.h b/gcc-4.2.1-5666.3/gcc/config/arm/linux-gas.h
new file mode 100644
index 000000000..a04e05066
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/linux-gas.h
@@ -0,0 +1,55 @@
+/* Definitions of target machine for GNU compiler.
+ ARM Linux-based GNU systems version.
+ Copyright (C) 1997, 1998, 1999, 2000, 2001, 2004
+ Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* This is how we tell the assembler that a symbol is weak.
+ GAS always supports weak symbols. */
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. */
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("a1") = (unsigned long) (BEG); \
+ register unsigned long _end __asm ("a2") = (unsigned long) (END); \
+ register unsigned long _flg __asm ("a3") = 0; \
+ __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
+ : "=r" (_beg) \
+ : "0" (_beg), "r" (_end), "r" (_flg)); \
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/mmintrin.h b/gcc-4.2.1-5666.3/gcc/config/arm/mmintrin.h
new file mode 100644
index 000000000..bed6204c2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/mmintrin.h
@@ -0,0 +1,1257 @@
+/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+/* The data type intended for user use. */
+typedef unsigned long long __m64, __int64;
+
+/* Internal data types for implementing the intrinsics. */
+typedef int __v2si __attribute__ ((vector_size (8)));
+typedef short __v4hi __attribute__ ((vector_size (8)));
+typedef char __v8qi __attribute__ ((vector_size (8)));
+
+/* "Convert" __m64 and __int64 into each other. */
+static __inline __m64
+_mm_cvtsi64_m64 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtm64_si64 (__m64 __i)
+{
+ return __i;
+}
+
+static __inline int
+_mm_cvtsi64_si32 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtsi32_si64 (int __i)
+{
+ return __i;
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ signed saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pi64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 into the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ unsigned saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pu64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2);
+}
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Take the four 8-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+static __inline __m64
+_mm_add_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+static __inline __m64
+_mm_add_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+static __inline __m64
+_mm_add_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+static __inline __m64
+_mm_sub_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+static __inline __m64
+_mm_sub_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+static __inline __m64
+_mm_sub_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+static __inline __m64
+_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+static __inline __m64
+_mm_sll_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wslld (__m, __count);
+}
+
+static __inline __m64
+_mm_slli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wslldi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrad (__m, __count);
+}
+
+static __inline __m64
+_mm_srai_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsradi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrld (__m, __count);
+}
+
+static __inline __m64
+_mm_srli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrldi (__m, __count);
+}
+
+/* Rotate four 16-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count);
+}
+
+/* Rotate two 32-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count);
+}
+
+/* Rotate two 64-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrord (__m, __count);
+}
+
+static __inline __m64
+_mm_rori_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrordi (__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_and_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wand (__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+static __inline __m64
+_mm_andnot_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wandn (__m1, __m2);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_or_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wor (__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_xor_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wxor (__m1, __m2);
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pu16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pi16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Accumulate across all unsigned 8-bit values in __A. */
+static __inline __m64
+_mm_acc_pu8 (__m64 __A)
+{
+ return __builtin_arm_waccb ((__v8qi)__A);
+}
+
+/* Accumulate across all unsigned 16-bit values in __A. */
+static __inline __m64
+_mm_acc_pu16 (__m64 __A)
+{
+ return __builtin_arm_wacch ((__v4hi)__A);
+}
+
+/* Accumulate across all unsigned 32-bit values in __A. */
+static __inline __m64
+_mm_acc_pu32 (__m64 __A)
+{
+ return __builtin_arm_waccw ((__v2si)__A);
+}
+
+static __inline __m64
+_mm_mia_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmia (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miaph_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiaph (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabt (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatt (__A, __B, __C);
+}
+
+/* Extract one of the elements of A and sign extend. The selector N must
+ be immediate. */
+#define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N))
+#define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N))
+#define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N))
+
+/* Extract one of the elements of A and zero extend. The selector N must
+ be immediate. */
+#define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N))
+#define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N))
+#define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N))
+
+/* Inserts word D into one of the elements of A. The selector N must be
+ immediate. */
+#define _mm_insert_pi8(A, D, N) \
+ ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N)))
+#define _mm_insert_pi16(A, D, N) \
+ ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N)))
+#define _mm_insert_pi32(A, D, N) \
+ ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N)))
+
+/* Compute the element-wise maximum of signed 8-bit values. */
+static __inline __m64
+_mm_max_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+static __inline __m64
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of signed 32-bit values. */
+static __inline __m64
+_mm_max_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+static __inline __m64
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 16-bit values. */
+static __inline __m64
+_mm_max_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 32-bit values. */
+static __inline __m64
+_mm_max_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of signed 32-bit values. */
+static __inline __m64
+_mm_min_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 32-bit values. */
+static __inline __m64
+_mm_min_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+static __inline int
+_mm_movemask_pi8 (__m64 __A)
+{
+ return __builtin_arm_tmovmskb ((__v8qi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 16-bit values. */
+static __inline int
+_mm_movemask_pi16 (__m64 __A)
+{
+ return __builtin_arm_tmovmskh ((__v4hi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 32-bit values. */
+static __inline int
+_mm_movemask_pi32 (__m64 __A)
+{
+ return __builtin_arm_tmovmskw ((__v2si)__A);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+#define _mm_shuffle_pi16(A, N) \
+ ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N)))
+
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
+}
+
+static __inline __m64
+_mm_align_si64 (__m64 __A, __m64 __B, int __C)
+{
+ return (__m64) __builtin_arm_walign ((__v8qi)__A, (__v8qi)__B, __C);
+}
+
+/* Creates a 64-bit zero. */
+static __inline __m64
+_mm_setzero_si64 (void)
+{
+ return __builtin_arm_wzero ();
+}
+
+/* Set and Get arbitrary iWMMXt Control registers.
+ Note only registers 0-3 and 8-11 are currently defined,
+ the rest are reserved. */
+
+static __inline void
+_mm_setwcx (const int __value, const int __regno)
+{
+ switch (__regno)
+ {
+ case 0: __builtin_arm_setwcx (__value, 0); break;
+ case 1: __builtin_arm_setwcx (__value, 1); break;
+ case 2: __builtin_arm_setwcx (__value, 2); break;
+ case 3: __builtin_arm_setwcx (__value, 3); break;
+ case 8: __builtin_arm_setwcx (__value, 8); break;
+ case 9: __builtin_arm_setwcx (__value, 9); break;
+ case 10: __builtin_arm_setwcx (__value, 10); break;
+ case 11: __builtin_arm_setwcx (__value, 11); break;
+ default: break;
+ }
+}
+
+static __inline int
+_mm_getwcx (const int __regno)
+{
+ switch (__regno)
+ {
+ case 0: return __builtin_arm_getwcx (0);
+ case 1: return __builtin_arm_getwcx (1);
+ case 2: return __builtin_arm_getwcx (2);
+ case 3: return __builtin_arm_getwcx (3);
+ case 8: return __builtin_arm_getwcx (8);
+ case 9: return __builtin_arm_getwcx (9);
+ case 10: return __builtin_arm_getwcx (10);
+ case 11: return __builtin_arm_getwcx (11);
+ default: return 0;
+ }
+}
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+static __inline __m64
+_mm_set_pi32 (int __i1, int __i0)
+{
+ union {
+ __m64 __q;
+ struct {
+ unsigned int __i0;
+ unsigned int __i1;
+ } __s;
+ } __u;
+
+ __u.__s.__i0 = __i0;
+ __u.__s.__i1 = __i1;
+
+ return __u.__q;
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+static __inline __m64
+_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
+{
+ unsigned int __i1 = (unsigned short)__w3 << 16 | (unsigned short)__w2;
+ unsigned int __i0 = (unsigned short)__w1 << 16 | (unsigned short)__w0;
+ return _mm_set_pi32 (__i1, __i0);
+
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+static __inline __m64
+_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
+ char __b3, char __b2, char __b1, char __b0)
+{
+ unsigned int __i1, __i0;
+
+ __i1 = (unsigned char)__b7;
+ __i1 = __i1 << 8 | (unsigned char)__b6;
+ __i1 = __i1 << 8 | (unsigned char)__b5;
+ __i1 = __i1 << 8 | (unsigned char)__b4;
+
+ __i0 = (unsigned char)__b3;
+ __i0 = __i0 << 8 | (unsigned char)__b2;
+ __i0 = __i0 << 8 | (unsigned char)__b1;
+ __i0 = __i0 << 8 | (unsigned char)__b0;
+
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* Similar, but with the arguments in reverse order. */
+static __inline __m64
+_mm_setr_pi32 (int __i0, int __i1)
+{
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+static __inline __m64
+_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16 (__w3, __w2, __w1, __w0);
+}
+
+static __inline __m64
+_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
+ char __b4, char __b5, char __b6, char __b7)
+{
+ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+static __inline __m64
+_mm_set1_pi32 (int __i)
+{
+ return _mm_set_pi32 (__i, __i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+static __inline __m64
+_mm_set1_pi16 (short __w)
+{
+ unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w;
+ return _mm_set1_pi32 (__i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing B. */
+static __inline __m64
+_mm_set1_pi8 (char __b)
+{
+ unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b;
+ unsigned int __i = __w << 16 | __w;
+ return _mm_set1_pi32 (__i);
+}
+
+/* Convert an integer to a __m64 object. */
+static __inline __m64
+_m_from_int (int __a)
+{
+ return (__m64)__a;
+}
+
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_packusdw _mm_packs_pu32
+#define _m_packssqd _mm_packs_pi64
+#define _m_packusqd _mm_packs_pu64
+#define _mm_packs_si64 _mm_packs_pi64
+#define _mm_packs_su64 _mm_packs_pu64
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_punpckehsbw _mm_unpackeh_pi8
+#define _m_punpckehswd _mm_unpackeh_pi16
+#define _m_punpckehsdq _mm_unpackeh_pi32
+#define _m_punpckehubw _mm_unpackeh_pu8
+#define _m_punpckehuwd _mm_unpackeh_pu16
+#define _m_punpckehudq _mm_unpackeh_pu32
+#define _m_punpckelsbw _mm_unpackel_pi8
+#define _m_punpckelswd _mm_unpackel_pi16
+#define _m_punpckelsdq _mm_unpackel_pi32
+#define _m_punpckelubw _mm_unpackel_pu8
+#define _m_punpckeluwd _mm_unpackel_pu16
+#define _m_punpckeludq _mm_unpackel_pu32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddsd _mm_adds_pi32
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_paddusd _mm_adds_pu32
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubuw _mm_subs_pi32
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_psubusd _mm_subs_pu32
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmadduwd _mm_madd_pu16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_pmacsw _mm_mac_pi16
+#define _m_pmacuw _mm_mac_pu16
+#define _m_pmacszw _mm_macz_pi16
+#define _m_pmacuzw _mm_macz_pu16
+#define _m_paccb _mm_acc_pu8
+#define _m_paccw _mm_acc_pu16
+#define _m_paccd _mm_acc_pu32
+#define _m_pmia _mm_mia_si64
+#define _m_pmiaph _mm_miaph_si64
+#define _m_pmiabb _mm_miabb_si64
+#define _m_pmiabt _mm_miabt_si64
+#define _m_pmiatb _mm_miatb_si64
+#define _m_pmiatt _mm_miatt_si64
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psraq _mm_sra_si64
+#define _m_psraqi _mm_srai_si64
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_prorw _mm_ror_pi16
+#define _m_prorwi _mm_rori_pi16
+#define _m_prord _mm_ror_pi32
+#define _m_prordi _mm_rori_pi32
+#define _m_prorq _mm_ror_si64
+#define _m_prorqi _mm_rori_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtub _mm_cmpgt_pu8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtuw _mm_cmpgt_pu16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+#define _m_pcmpgtud _mm_cmpgt_pu32
+#define _m_pextrb _mm_extract_pi8
+#define _m_pextrw _mm_extract_pi16
+#define _m_pextrd _mm_extract_pi32
+#define _m_pextrub _mm_extract_pu8
+#define _m_pextruw _mm_extract_pu16
+#define _m_pextrud _mm_extract_pu32
+#define _m_pinsrb _mm_insert_pi8
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pinsrd _mm_insert_pi32
+#define _m_pmaxsb _mm_max_pi8
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxsd _mm_max_pi32
+#define _m_pmaxub _mm_max_pu8
+#define _m_pmaxuw _mm_max_pu16
+#define _m_pmaxud _mm_max_pu32
+#define _m_pminsb _mm_min_pi8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminsd _mm_min_pi32
+#define _m_pminub _mm_min_pu8
+#define _m_pminuw _mm_min_pu16
+#define _m_pminud _mm_min_pu32
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmovmskw _mm_movemask_pi16
+#define _m_pmovmskd _mm_movemask_pi32
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_pavg2b _mm_avg2_pu8
+#define _m_pavg2w _mm_avg2_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_psadwd _mm_sad_pu16
+#define _m_psadzbw _mm_sadz_pu8
+#define _m_psadzwd _mm_sadz_pu16
+#define _m_paligniq _mm_align_si64
+#define _m_cvt_si2pi _mm_cvtsi64_m64
+#define _m_cvt_pi2si _mm_cvtm64_si64
+
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/neon-docgen.ml b/gcc-4.2.1-5666.3/gcc/config/arm/neon-docgen.ml
new file mode 100644
index 000000000..2ac0cbdbc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/neon-docgen.ml
@@ -0,0 +1,323 @@
+(* APPLE LOCAL file v7 support. Merge from Codesourcery *)
+(* ARM NEON documentation generator.
+
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. *)
+
+open Neon
+
+(* The combined "ops" and "reinterp" table. *)
+let ops_reinterp = reinterp @ ops
+
+(* Helper functions for extracting things from the "ops" table. *)
+let single_opcode desired_opcode () =
+ List.fold_left (fun got_so_far ->
+ fun row ->
+ match row with
+ (opcode, _, _, _, _, _) ->
+ if opcode = desired_opcode then row :: got_so_far
+ else got_so_far
+ ) [] ops_reinterp
+
+let multiple_opcodes desired_opcodes () =
+ List.fold_left (fun got_so_far ->
+ fun desired_opcode ->
+ (single_opcode desired_opcode ()) @ got_so_far)
+ [] desired_opcodes
+
+let ldx_opcode number () =
+ List.fold_left (fun got_so_far ->
+ fun row ->
+ match row with
+ (opcode, _, _, _, _, _) ->
+ match opcode with
+ Vldx n | Vldx_lane n | Vldx_dup n when n = number ->
+ row :: got_so_far
+ | _ -> got_so_far
+ ) [] ops_reinterp
+
+let stx_opcode number () =
+ List.fold_left (fun got_so_far ->
+ fun row ->
+ match row with
+ (opcode, _, _, _, _, _) ->
+ match opcode with
+ Vstx n | Vstx_lane n when n = number ->
+ row :: got_so_far
+ | _ -> got_so_far
+ ) [] ops_reinterp
+
+let tbl_opcode () =
+ List.fold_left (fun got_so_far ->
+ fun row ->
+ match row with
+ (opcode, _, _, _, _, _) ->
+ match opcode with
+ Vtbl _ -> row :: got_so_far
+ | _ -> got_so_far
+ ) [] ops_reinterp
+
+let tbx_opcode () =
+ List.fold_left (fun got_so_far ->
+ fun row ->
+ match row with
+ (opcode, _, _, _, _, _) ->
+ match opcode with
+ Vtbx _ -> row :: got_so_far
+ | _ -> got_so_far
+ ) [] ops_reinterp
+
+(* The groups of intrinsics. *)
+let intrinsic_groups =
+ [ "Addition", single_opcode Vadd;
+ "Multiplication", single_opcode Vmul;
+ "Multiply-accumulate", single_opcode Vmla;
+ "Multiply-subtract", single_opcode Vmls;
+ "Subtraction", single_opcode Vsub;
+ "Comparison (equal-to)", single_opcode Vceq;
+ "Comparison (greater-than-or-equal-to)", single_opcode Vcge;
+ "Comparison (less-than-or-equal-to)", single_opcode Vcle;
+ "Comparison (greater-than)", single_opcode Vcgt;
+ "Comparison (less-than)", single_opcode Vclt;
+ "Comparison (absolute greater-than-or-equal-to)", single_opcode Vcage;
+ "Comparison (absolute less-than-or-equal-to)", single_opcode Vcale;
+ "Comparison (absolute greater-than)", single_opcode Vcagt;
+ "Comparison (absolute less-than)", single_opcode Vcalt;
+ "Test bits", single_opcode Vtst;
+ "Absolute difference", single_opcode Vabd;
+ "Absolute difference and accumulate", single_opcode Vaba;
+ "Maximum", single_opcode Vmax;
+ "Minimum", single_opcode Vmin;
+ "Pairwise add", single_opcode Vpadd;
+ "Pairwise add, single_opcode widen and accumulate", single_opcode Vpada;
+ "Folding maximum", single_opcode Vpmax;
+ "Folding minimum", single_opcode Vpmin;
+ "Reciprocal step", multiple_opcodes [Vrecps; Vrsqrts];
+ "Vector shift left", single_opcode Vshl;
+ "Vector shift left by constant", single_opcode Vshl_n;
+ "Vector shift right by constant", single_opcode Vshr_n;
+ "Vector shift right by constant and accumulate", single_opcode Vsra_n;
+ "Vector shift right and insert", single_opcode Vsri;
+ "Vector shift left and insert", single_opcode Vsli;
+ "Absolute value", single_opcode Vabs;
+ "Negation", single_opcode Vneg;
+ "Bitwise not", single_opcode Vmvn;
+ "Count leading sign bits", single_opcode Vcls;
+ "Count leading zeros", single_opcode Vclz;
+ "Count number of set bits", single_opcode Vcnt;
+ "Reciprocal estimate", single_opcode Vrecpe;
+ "Reciprocal square-root estimate", single_opcode Vrsqrte;
+ "Get lanes from a vector", single_opcode Vget_lane;
+ "Set lanes in a vector", single_opcode Vset_lane;
+ "Create vector from literal bit pattern", single_opcode Vcreate;
+ "Set all lanes to the same value",
+ multiple_opcodes [Vdup_n; Vmov_n; Vdup_lane];
+ "Combining vectors", single_opcode Vcombine;
+ "Splitting vectors", multiple_opcodes [Vget_high; Vget_low];
+ "Conversions", multiple_opcodes [Vcvt; Vcvt_n];
+ "Move, single_opcode narrowing", single_opcode Vmovn;
+ "Move, single_opcode long", single_opcode Vmovl;
+ "Table lookup", tbl_opcode;
+ "Extended table lookup", tbx_opcode;
+ "Multiply, lane", single_opcode Vmul_lane;
+ "Long multiply, lane", single_opcode Vmull_lane;
+ "Saturating doubling long multiply, lane", single_opcode Vqdmull_lane;
+ "Saturating doubling multiply high, lane", single_opcode Vqdmulh_lane;
+ "Multiply-accumulate, lane", single_opcode Vmla_lane;
+ "Multiply-subtract, lane", single_opcode Vmls_lane;
+ "Vector multiply by scalar", single_opcode Vmul_n;
+ "Vector long multiply by scalar", single_opcode Vmull_n;
+ "Vector saturating doubling long multiply by scalar",
+ single_opcode Vqdmull_n;
+ "Vector saturating doubling multiply high by scalar",
+ single_opcode Vqdmulh_n;
+ "Vector multiply-accumulate by scalar", single_opcode Vmla_n;
+ "Vector multiply-subtract by scalar", single_opcode Vmls_n;
+ "Vector extract", single_opcode Vext;
+ "Reverse elements", multiple_opcodes [Vrev64; Vrev32; Vrev16];
+ "Bit selection", single_opcode Vbsl;
+ "Transpose elements", single_opcode Vtrn;
+ "Zip elements", single_opcode Vzip;
+ "Unzip elements", single_opcode Vuzp;
+ "Element/structure loads, VLD1 variants", ldx_opcode 1;
+ "Element/structure stores, VST1 variants", stx_opcode 1;
+ "Element/structure loads, VLD2 variants", ldx_opcode 2;
+ "Element/structure stores, VST2 variants", stx_opcode 2;
+ "Element/structure loads, VLD3 variants", ldx_opcode 3;
+ "Element/structure stores, VST3 variants", stx_opcode 3;
+ "Element/structure loads, VLD4 variants", ldx_opcode 4;
+ "Element/structure stores, VST4 variants", stx_opcode 4;
+ "Logical operations (AND)", single_opcode Vand;
+ "Logical operations (OR)", single_opcode Vorr;
+ "Logical operations (exclusive OR)", single_opcode Veor;
+ "Logical operations (AND-NOT)", single_opcode Vbic;
+ "Logical operations (OR-NOT)", single_opcode Vorn;
+ "Reinterpret casts", single_opcode Vreinterp ]
+
+(* Given an intrinsic shape, produce a string to document the corresponding
+ operand shapes. *)
+let rec analyze_shape shape =
+ let rec n_things n thing =
+ match n with
+ 0 -> []
+ | n -> thing :: (n_things (n - 1) thing)
+ in
+ let rec analyze_shape_elt reg_no elt =
+ match elt with
+ Dreg -> "@var{d" ^ (string_of_int reg_no) ^ "}"
+ | Qreg -> "@var{q" ^ (string_of_int reg_no) ^ "}"
+ | Corereg -> "@var{r" ^ (string_of_int reg_no) ^ "}"
+ | Immed -> "#@var{0}"
+ | VecArray (1, elt) ->
+ let elt_regexp = analyze_shape_elt 0 elt in
+ "@{" ^ elt_regexp ^ "@}"
+ | VecArray (n, elt) ->
+ let rec f m =
+ match m with
+ 0 -> []
+ | m -> (analyze_shape_elt (m - 1) elt) :: (f (m - 1))
+ in
+ let ops = List.rev (f n) in
+ "@{" ^ (commas (fun x -> x) ops "") ^ "@}"
+ | (PtrTo elt | CstPtrTo elt) ->
+ "[" ^ (analyze_shape_elt reg_no elt) ^ "]"
+ | Element_of_dreg -> (analyze_shape_elt reg_no Dreg) ^ "[@var{0}]"
+ | Element_of_qreg -> (analyze_shape_elt reg_no Qreg) ^ "[@var{0}]"
+ | All_elements_of_dreg -> (analyze_shape_elt reg_no Dreg) ^ "[]"
+ in
+ match shape with
+ All (n, elt) -> commas (analyze_shape_elt 0) (n_things n elt) ""
+ | Long -> (analyze_shape_elt 0 Qreg) ^ ", " ^ (analyze_shape_elt 0 Dreg) ^
+ ", " ^ (analyze_shape_elt 0 Dreg)
+ | Long_noreg elt -> (analyze_shape_elt 0 elt) ^ ", " ^
+ (analyze_shape_elt 0 elt)
+ | Wide -> (analyze_shape_elt 0 Qreg) ^ ", " ^ (analyze_shape_elt 0 Qreg) ^
+ ", " ^ (analyze_shape_elt 0 Dreg)
+ | Wide_noreg elt -> analyze_shape (Long_noreg elt)
+ | Narrow -> (analyze_shape_elt 0 Dreg) ^ ", " ^ (analyze_shape_elt 0 Qreg) ^
+ ", " ^ (analyze_shape_elt 0 Qreg)
+ | Use_operands elts -> commas (analyze_shape_elt 0) (Array.to_list elts) ""
+ | By_scalar Dreg ->
+ analyze_shape (Use_operands [| Dreg; Dreg; Element_of_dreg |])
+ | By_scalar Qreg ->
+ analyze_shape (Use_operands [| Qreg; Qreg; Element_of_dreg |])
+ | By_scalar _ -> assert false
+ | Wide_lane ->
+ analyze_shape (Use_operands [| Qreg; Dreg; Element_of_dreg |])
+ | Wide_scalar ->
+ analyze_shape (Use_operands [| Qreg; Dreg; Element_of_dreg |])
+ | Pair_result elt ->
+ let elt_regexp = analyze_shape_elt 0 elt in
+ let elt_regexp' = analyze_shape_elt 1 elt in
+ elt_regexp ^ ", " ^ elt_regexp'
+ | Unary_scalar _ -> "FIXME Unary_scalar"
+ | Binary_imm elt -> analyze_shape (Use_operands [| elt; elt; Immed |])
+ | Narrow_imm -> analyze_shape (Use_operands [| Dreg; Qreg; Immed |])
+ | Long_imm -> analyze_shape (Use_operands [| Qreg; Dreg; Immed |])
+
+(* Document a single intrinsic. *)
+let describe_intrinsic first chan
+ (elt_ty, (_, features, shape, name, munge, _)) =
+ let c_arity, new_elt_ty = munge shape elt_ty in
+ let c_types = strings_of_arity c_arity in
+ Printf.fprintf chan "@itemize @bullet\n";
+ let item_code = if first then "@item" else "@itemx" in
+ Printf.fprintf chan "%s %s %s_%s (" item_code (List.hd c_types)
+ (intrinsic_name name) (string_of_elt elt_ty);
+ Printf.fprintf chan "%s)\n" (commas (fun ty -> ty) (List.tl c_types) "");
+ if not (List.exists (fun feature -> feature = No_op) features) then
+ begin
+ let print_one_insn name =
+ Printf.fprintf chan "@code{";
+ let no_suffix = (new_elt_ty = NoElts) in
+ let name_with_suffix =
+ if no_suffix then name
+ else name ^ "." ^ (string_of_elt_dots new_elt_ty)
+ in
+ let possible_operands = analyze_all_shapes features shape
+ analyze_shape
+ in
+ let rec print_one_possible_operand op =
+ Printf.fprintf chan "%s %s}" name_with_suffix op
+ in
+ (* If the intrinsic expands to multiple instructions, we assume
+ they are all of the same form. *)
+ print_one_possible_operand (List.hd possible_operands)
+ in
+ let rec print_insns names =
+ match names with
+ [] -> ()
+ | [name] -> print_one_insn name
+ | name::names -> (print_one_insn name;
+ Printf.fprintf chan " @emph{or} ";
+ print_insns names)
+ in
+ let insn_names = get_insn_names features name in
+ Printf.fprintf chan "@*@emph{Form of expected instruction(s):} ";
+ print_insns insn_names;
+ Printf.fprintf chan "\n"
+ end;
+ Printf.fprintf chan "@end itemize\n";
+ Printf.fprintf chan "\n\n"
+
+(* Document a group of intrinsics. *)
+let document_group chan (group_title, group_extractor) =
+ (* Extract the rows in question from the ops table and then turn them
+ into a list of intrinsics. *)
+ let intrinsics =
+ List.fold_left (fun got_so_far ->
+ fun row ->
+ match row with
+ (_, _, _, _, _, elt_tys) ->
+ List.fold_left (fun got_so_far' ->
+ fun elt_ty ->
+ (elt_ty, row) :: got_so_far')
+ got_so_far elt_tys
+ ) [] (group_extractor ())
+ in
+ (* Emit the title for this group. *)
+ Printf.fprintf chan "@subsubsection %s\n\n" group_title;
+ (* Emit a description of each intrinsic. *)
+ List.iter (describe_intrinsic true chan) intrinsics;
+ (* Close this group. *)
+ Printf.fprintf chan "\n\n"
+
+let gnu_header chan =
+ List.iter (fun s -> Printf.fprintf chan "%s\n" s) [
+ "@c Copyright (C) 2006 Free Software Foundation, Inc.";
+ "@c This is part of the GCC manual.";
+ "@c For copying conditions, see the file gcc.texi.";
+ "";
+ "@c This file is generated automatically using gcc/config/arm/neon-docgen.ml";
+ "@c Please do not edit manually."]
+
+(* Program entry point. *)
+let _ =
+ if Array.length Sys.argv <> 2 then
+ failwith "Usage: neon-docgen <output filename>"
+ else
+ let file = Sys.argv.(1) in
+ try
+ let chan = open_out file in
+ gnu_header chan;
+ List.iter (document_group chan) intrinsic_groups;
+ close_out chan
+ with Sys_error sys ->
+ failwith ("Could not create output file " ^ file ^ ": " ^ sys)
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/neon-gen.ml b/gcc-4.2.1-5666.3/gcc/config/arm/neon-gen.ml
new file mode 100644
index 000000000..c567e86f9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/neon-gen.ml
@@ -0,0 +1,424 @@
+(* APPLE LOCAL file v7 support. Merge from Codesourcery *)
+(* Auto-generate ARM Neon intrinsics header file.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ This is an O'Caml program. The O'Caml compiler is available from:
+
+ http://caml.inria.fr/
+
+ Or from your favourite OS's friendly packaging system. Tested with version
+ 3.09.2, though other versions will probably work too.
+
+ Compile with:
+ ocamlc -c neon.ml
+ ocamlc -o neon-gen neon.cmo neon-gen.ml
+
+ Run with:
+ ./neon-gen > arm_neon.h
+*)
+
+open Neon
+
+(* The format codes used in the following functions are documented at:
+ http://caml.inria.fr/pub/docs/manual-ocaml/libref/Format.html\
+ #6_printflikefunctionsforprettyprinting
+ (one line, remove the backslash.)
+*)
+
+(* Following functions can be used to approximate GNU indentation style. *)
+let start_function () =
+ Format.printf "@[<v 0>";
+ ref 0
+
+let end_function nesting =
+ match !nesting with
+ 0 -> Format.printf "@;@;@]"
+ | _ -> failwith ("Bad nesting (ending function at level "
+ ^ (string_of_int !nesting) ^ ")")
+
+let open_braceblock nesting =
+ begin match !nesting with
+ 0 -> Format.printf "@,@<0>{@[<v 2>@,"
+ | _ -> Format.printf "@,@[<v 2> @<0>{@[<v 2>@,"
+ end;
+ incr nesting
+
+let close_braceblock nesting =
+ decr nesting;
+ match !nesting with
+ 0 -> Format.printf "@]@,@<0>}"
+ | _ -> Format.printf "@]@,@<0>}@]"
+
+let print_function arity fnname body =
+ let ffmt = start_function () in
+ Format.printf "__extension__ static __inline ";
+ let inl = "__attribute__ ((__always_inline__))" in
+ begin match arity with
+ Arity0 ret ->
+ Format.printf "%s %s@,%s (void)" (string_of_vectype ret) inl fnname
+ | Arity1 (ret, arg0) ->
+ Format.printf "%s %s@,%s (%s __a)" (string_of_vectype ret) inl fnname
+ (string_of_vectype arg0)
+ | Arity2 (ret, arg0, arg1) ->
+ Format.printf "%s %s@,%s (%s __a, %s __b)"
+ (string_of_vectype ret) inl fnname (string_of_vectype arg0)
+ (string_of_vectype arg1)
+ | Arity3 (ret, arg0, arg1, arg2) ->
+ Format.printf "%s %s@,%s (%s __a, %s __b, %s __c)"
+ (string_of_vectype ret) inl fnname (string_of_vectype arg0)
+ (string_of_vectype arg1) (string_of_vectype arg2)
+ | Arity4 (ret, arg0, arg1, arg2, arg3) ->
+ Format.printf "%s %s@,%s (%s __a, %s __b, %s __c, %s __d)"
+ (string_of_vectype ret) inl fnname (string_of_vectype arg0)
+ (string_of_vectype arg1) (string_of_vectype arg2)
+ (string_of_vectype arg3)
+ end;
+ open_braceblock ffmt;
+ let rec print_lines = function
+ [] -> ()
+ | [line] -> Format.printf "%s" line
+ | line::lines -> Format.printf "%s@," line; print_lines lines in
+ print_lines body;
+ close_braceblock ffmt;
+ end_function ffmt
+
+let return_by_ptr features = List.mem ReturnPtr features
+
+let union_string num elts base =
+ let itype = inttype_for_array num elts in
+ let iname = string_of_inttype itype
+ and sname = string_of_vectype (T_arrayof (num, elts)) in
+ Printf.sprintf "union { %s __i; %s __o; } %s" sname iname base
+
+let rec signed_ctype = function
+ T_uint8x8 | T_poly8x8 -> T_int8x8
+ | T_uint8x16 | T_poly8x16 -> T_int8x16
+ | T_uint16x4 | T_poly16x4 -> T_int16x4
+ | T_uint16x8 | T_poly16x8 -> T_int16x8
+ | T_uint32x2 -> T_int32x2
+ | T_uint32x4 -> T_int32x4
+ | T_uint64x1 -> T_int64x1
+ | T_uint64x2 -> T_int64x2
+ (* Cast to types defined by mode in arm.c, not random types pulled in from
+ the <stdint.h> header in use. This fixes incompatible pointer errors when
+ compiling with C++. *)
+ | T_uint8 | T_int8 -> T_intQI
+ | T_uint16 | T_int16 -> T_intHI
+ | T_uint32 | T_int32 -> T_intSI
+ | T_uint64 | T_int64 -> T_intDI
+ | T_poly8 -> T_intQI
+ | T_poly16 -> T_intHI
+ | T_arrayof (n, elt) -> T_arrayof (n, signed_ctype elt)
+ | T_ptrto elt -> T_ptrto (signed_ctype elt)
+ | T_const elt -> T_const (signed_ctype elt)
+ | x -> x
+
+let add_cast ctype cval =
+ let stype = signed_ctype ctype in
+ if ctype <> stype then
+ Printf.sprintf "(%s) %s" (string_of_vectype stype) cval
+ else
+ cval
+
+let cast_for_return to_ty = "(" ^ (string_of_vectype to_ty) ^ ")"
+
+(* Return a tuple of a list of declarations to go at the start of the function,
+ and a list of statements needed to return THING. *)
+let return arity return_by_ptr thing =
+ match arity with
+ Arity0 (ret) | Arity1 (ret, _) | Arity2 (ret, _, _) | Arity3 (ret, _, _, _)
+ | Arity4 (ret, _, _, _, _) ->
+ match ret with
+ T_arrayof (num, vec) ->
+ if return_by_ptr then
+ let sname = string_of_vectype ret in
+ [Printf.sprintf "%s __rv;" sname],
+ [thing ^ ";"; "return __rv;"]
+ else
+ let uname = union_string num vec "__rv" in
+ [uname ^ ";"], ["__rv.__o = " ^ thing ^ ";"; "return __rv.__i;"]
+ | T_void -> [], [thing ^ ";"]
+ | _ ->
+ [], ["return " ^ (cast_for_return ret) ^ thing ^ ";"]
+
+let rec element_type ctype =
+ match ctype with
+ T_arrayof (_, v) -> element_type v
+ | _ -> ctype
+
+let params return_by_ptr ps =
+ let pdecls = ref [] in
+ let ptype t p =
+ match t with
+ T_arrayof (num, elts) ->
+ let uname = union_string num elts (p ^ "u") in
+ let decl = Printf.sprintf "%s = { %s };" uname p in
+ pdecls := decl :: !pdecls;
+ p ^ "u.__o"
+ | _ -> add_cast t p in
+ let plist = match ps with
+ Arity0 _ -> []
+ | Arity1 (_, t1) -> [ptype t1 "__a"]
+ | Arity2 (_, t1, t2) -> [ptype t1 "__a"; ptype t2 "__b"]
+ | Arity3 (_, t1, t2, t3) -> [ptype t1 "__a"; ptype t2 "__b"; ptype t3 "__c"]
+ | Arity4 (_, t1, t2, t3, t4) ->
+ [ptype t1 "__a"; ptype t2 "__b"; ptype t3 "__c"; ptype t4 "__d"] in
+ match ps with
+ Arity0 ret | Arity1 (ret, _) | Arity2 (ret, _, _) | Arity3 (ret, _, _, _)
+ | Arity4 (ret, _, _, _, _) ->
+ if return_by_ptr then
+ !pdecls, add_cast (T_ptrto (element_type ret)) "&__rv.val[0]" :: plist
+ else
+ !pdecls, plist
+
+let modify_params features plist =
+ let is_flipped =
+ List.exists (function Flipped _ -> true | _ -> false) features in
+ if is_flipped then
+ match plist with
+ [ a; b ] -> [ b; a ]
+ | _ ->
+ failwith ("Don't know how to flip args " ^ (String.concat ", " plist))
+ else
+ plist
+
+(* !!! Decide whether to add an extra information word based on the shape
+ form. *)
+let extra_word shape features paramlist bits =
+ let use_word =
+ match shape with
+ All _ | Long | Long_noreg _ | Wide | Wide_noreg _ | Narrow
+ | By_scalar _ | Wide_scalar | Wide_lane | Binary_imm _ | Long_imm
+ | Narrow_imm -> true
+ | _ -> List.mem InfoWord features
+ in
+ if use_word then
+ paramlist @ [string_of_int bits]
+ else
+ paramlist
+
+(* Bit 0 represents signed (1) vs unsigned (0), or float (1) vs poly (0).
+ Bit 1 represents rounding (1) vs none (0)
+ Bit 2 represents floats & polynomials (1), or ordinary integers (0). *)
+let infoword_value elttype features =
+ let bits02 =
+ match elt_class elttype with
+ Signed | ConvClass (Signed, _) | ConvClass (_, Signed) -> 0b001
+ | Poly -> 0b100
+ | Float -> 0b101
+ | _ -> 0b000
+ and rounding_bit = if List.mem Rounding features then 0b010 else 0b000 in
+ bits02 lor rounding_bit
+
+(* "Cast" type operations will throw an exception in mode_of_elt (actually in
+ elt_width, called from there). Deal with that here, and generate a suffix
+ with multiple modes (<to><from>). *)
+let rec mode_suffix elttype shape =
+ try
+ let mode = mode_of_elt elttype shape in
+ string_of_mode mode
+ with MixedMode (dst, src) ->
+ let dstmode = mode_of_elt dst shape
+ and srcmode = mode_of_elt src shape in
+ string_of_mode dstmode ^ string_of_mode srcmode
+
+let print_variant opcode features shape name (ctype, asmtype, elttype) =
+ let bits = infoword_value elttype features in
+ let modesuf = mode_suffix elttype shape in
+ let return_by_ptr = return_by_ptr features in
+ let pdecls, paramlist = params return_by_ptr ctype in
+ let paramlist' = modify_params features paramlist in
+ let paramlist'' = extra_word shape features paramlist' bits in
+ let parstr = String.concat ", " paramlist'' in
+ let builtin = Printf.sprintf "__builtin_neon_%s%s (%s)"
+ (builtin_name features name) modesuf parstr in
+ let rdecls, stmts = return ctype return_by_ptr builtin in
+ let body = pdecls @ rdecls @ stmts
+ and fnname = (intrinsic_name name) ^ "_" ^ (string_of_elt elttype) in
+ print_function ctype fnname body
+
+(* When this function processes the element types in the ops table, it rewrites
+ them in a list of tuples (a,b,c):
+ a : C type as an "arity", e.g. Arity1 (T_poly8x8, T_poly8x8)
+ b : Asm type : a single, processed element type, e.g. P16. This is the
+ type which should be attached to the asm opcode.
+ c : Variant type : the unprocessed type for this variant (e.g. in add
+ instructions which don't care about the sign, b might be i16 and c
+ might be s16.)
+*)
+
+let print_op (opcode, features, shape, name, munge, types) =
+ let sorted_types = List.sort compare types in
+ let munged_types = List.map
+ (fun elt -> let c, asm = munge shape elt in c, asm, elt) sorted_types in
+ List.iter
+ (fun variant -> print_variant opcode features shape name variant)
+ munged_types
+
+let print_ops ops =
+ List.iter print_op ops
+
+(* Output type definitions. Table entries are:
+ cbase : "C" name for the type.
+ abase : "ARM" base name for the type (i.e. int in int8x8_t).
+ esize : element size.
+ enum : element count.
+ We can't really distinguish between polynomial types and integer types in
+ the C type system, I don't think, which may allow the user to make mistakes
+ without warnings from the compiler.
+ FIXME: It's probably better to use stdint.h names here.
+*)
+
+let deftypes () =
+ let typeinfo = [
+ (* Doubleword vector types. *)
+ "__builtin_neon_qi", "int", 8, 8;
+ "__builtin_neon_hi", "int", 16, 4;
+ "__builtin_neon_si", "int", 32, 2;
+ "__builtin_neon_di", "int", 64, 1;
+ "__builtin_neon_sf", "float", 32, 2;
+ "__builtin_neon_poly8", "poly", 8, 8;
+ "__builtin_neon_poly16", "poly", 16, 4;
+ "__builtin_neon_uqi", "uint", 8, 8;
+ "__builtin_neon_uhi", "uint", 16, 4;
+ "__builtin_neon_usi", "uint", 32, 2;
+ "__builtin_neon_udi", "uint", 64, 1;
+
+ (* Quadword vector types. *)
+ "__builtin_neon_qi", "int", 8, 16;
+ "__builtin_neon_hi", "int", 16, 8;
+ "__builtin_neon_si", "int", 32, 4;
+ "__builtin_neon_di", "int", 64, 2;
+ "__builtin_neon_sf", "float", 32, 4;
+ "__builtin_neon_poly8", "poly", 8, 16;
+ "__builtin_neon_poly16", "poly", 16, 8;
+ "__builtin_neon_uqi", "uint", 8, 16;
+ "__builtin_neon_uhi", "uint", 16, 8;
+ "__builtin_neon_usi", "uint", 32, 4;
+ "__builtin_neon_udi", "uint", 64, 2
+ ] in
+ List.iter
+ (fun (cbase, abase, esize, enum) ->
+ let attr =
+ match enum with
+ 1 -> ""
+ | _ -> Printf.sprintf "\t__attribute__ ((__vector_size__ (%d)))"
+ (esize * enum / 8) in
+ Format.printf "typedef %s %s%dx%d_t%s;@\n" cbase abase esize enum attr)
+ typeinfo;
+ Format.print_newline ();
+ (* Extra types not in <stdint.h>. *)
+ Format.printf "typedef __builtin_neon_sf float32_t;\n";
+ Format.printf "typedef __builtin_neon_poly8 poly8_t;\n";
+ Format.printf "typedef __builtin_neon_poly16 poly16_t;\n"
+
+(* Output structs containing arrays, for load & store instructions etc. *)
+
+let arrtypes () =
+ let typeinfo = [
+ "int", 8; "int", 16;
+ "int", 32; "int", 64;
+ "uint", 8; "uint", 16;
+ "uint", 32; "uint", 64;
+ "float", 32; "poly", 8;
+ "poly", 16
+ ] in
+ let writestruct elname elsize regsize arrsize =
+ let elnum = regsize / elsize in
+ let structname =
+ Printf.sprintf "%s%dx%dx%d_t" elname elsize elnum arrsize in
+ let sfmt = start_function () in
+ Format.printf "typedef struct %s" structname;
+ open_braceblock sfmt;
+ Format.printf "%s%dx%d_t val[%d];" elname elsize elnum arrsize;
+ close_braceblock sfmt;
+ Format.printf " %s;" structname;
+ end_function sfmt;
+ in
+ for n = 2 to 4 do
+ List.iter
+ (fun (elname, elsize) ->
+ writestruct elname elsize 64 n;
+ writestruct elname elsize 128 n)
+ typeinfo
+ done
+
+let print_lines = List.iter (fun s -> Format.printf "%s@\n" s)
+
+(* Do it. *)
+
+let _ =
+ print_lines [
+"/* ARM NEON intrinsics include file. This file is generated automatically";
+" using neon-gen.ml. Please do not edit manually.";
+"";
+" Copyright (C) 2006, 2007 Free Software Foundation, Inc.";
+" Contributed by CodeSourcery.";
+"";
+" This file is part of GCC.";
+"";
+" GCC is free software; you can redistribute it and/or modify it";
+" under the terms of the GNU General Public License as published";
+" by the Free Software Foundation; either version 2, or (at your";
+" option) any later version.";
+"";
+" GCC is distributed in the hope that it will be useful, but WITHOUT";
+" ANY WARRANTY; without even the implied warranty of MERCHANTABILITY";
+" or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public";
+" License for more details.";
+"";
+" You should have received a copy of the GNU General Public License";
+" along with GCC; see the file COPYING. If not, write to the";
+" Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,";
+" MA 02110-1301, USA. */";
+"";
+"/* As a special exception, if you include this header file into source";
+" files compiled by GCC, this header file does not by itself cause";
+" the resulting executable to be covered by the GNU General Public";
+" License. This exception does not however invalidate any other";
+" reasons why the executable file might be covered by the GNU General";
+" Public License. */";
+"";
+"#ifndef _GCC_ARM_NEON_H";
+"#define _GCC_ARM_NEON_H 1";
+"";
+"#ifndef __ARM_NEON__";
+"#error You must enable NEON instructions (e.g. -mfloat-abi=softfp -mfpu=neon) to use arm_neon.h";
+"#else";
+"";
+"#ifdef __cplusplus";
+"extern \"C\" {";
+"#endif";
+"";
+"#include <stdint.h>";
+""];
+ deftypes ();
+ arrtypes ();
+ Format.print_newline ();
+ print_ops ops;
+ Format.print_newline ();
+ print_ops reinterp;
+ print_lines [
+"#ifdef __cplusplus";
+"}";
+"#endif";
+"#endif";
+"#endif"]
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/neon-schedgen.ml b/gcc-4.2.1-5666.3/gcc/config/arm/neon-schedgen.ml
new file mode 100644
index 000000000..69f797fd3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/neon-schedgen.ml
@@ -0,0 +1,498 @@
+(* APPLE LOCAL file v7 support. Merge from Codesourcery *)
+(* Emission of the core of the Cortex-A8 NEON scheduling description.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+*)
+
+(* This scheduling description generator works as follows.
+ - Each group of instructions has source and destination requirements
+ specified. The source requirements may be specified using
+ Source (the stage at which all source operands not otherwise
+ described are read), Source_m (the stage at which Rm operands are
+ read), Source_n (likewise for Rn) and Source_d (likewise for Rd).
+ - For each group of instructions the earliest stage where a source
+ operand may be required is calculated.
+ - Each group of instructions is selected in turn as a producer.
+ The latencies between this group and every other group are then
+ calculated, yielding up to four values for each combination:
+ 1. Producer -> consumer Rn latency
+ 2. Producer -> consumer Rm latency
+ 3. Producer -> consumer Rd (as a source) latency
+ 4. Producer -> consumer worst-case latency.
+ Value 4 is calculated from the destination availability requirements
+ of the consumer and the earliest source availability requirements
+ of the producer.
+ - The largest Value 4 calculated for the current producer is the
+ worse-case latency, L, for that instruction group. This value is written
+ out in a define_insn_reservation for the producer group.
+ - For each producer and consumer pair, the latencies calculated above
+ are collated. The average (of up to four values) is calculated and
+ if this average is different from the worst-case latency, an
+ unguarded define_bypass construction is issued for that pair.
+ (For each pair only one define_bypass construction will be emitted,
+ and at present we do not emit specific guards.)
+*)
+
+open Utils
+
+let n1 = 1 and n2 = 2 and n3 = 3 and n4 = 4 and n5 = 5 and n6 = 6
+ and n7 = 7 and n8 = 8 and n9 = 9
+
+type availability = Source of int
+ | Source_n of int
+ | Source_m of int
+ | Source_d of int
+ | Dest of int
+ | Dest_n_after of int * int
+
+type guard = Guard_none | Guard_only_m | Guard_only_n | Guard_only_d
+
+(* Reservation behaviours. All but the last row here correspond to one
+ pipeline each. Each constructor will correspond to one
+ define_reservation. *)
+type reservation =
+ Mul | Mul_2cycle | Mul_4cycle
+| Shift | Shift_2cycle
+| ALU | ALU_2cycle
+| Fmul | Fmul_2cycle
+| Fadd | Fadd_2cycle
+(* | VFP *)
+| Permute of int
+| Ls of int
+| Fmul_then_fadd | Fmul_then_fadd_2
+
+(* This table must be kept as short as possible by conflating
+ entries with the same availability behaviour.
+
+ First components: instruction group names
+ Second components: availability requirements, in the order in which
+ they should appear in the comments in the .md file.
+ Third components: reservation info
+*)
+let availability_table = [
+ (* NEON integer ALU instructions. *)
+ (* vbit vbif vbsl vorr vbic vnot vcls vclz vcnt vadd vand vorr
+ veor vbic vorn ddd qqq *)
+ "neon_int_1", [Source n2; Dest n3], ALU;
+ (* vadd vsub qqd vsub ddd qqq *)
+ "neon_int_2", [Source_m n1; Source_n n2; Dest n3], ALU;
+ (* vsum vneg dd qq vadd vsub qdd *)
+ "neon_int_3", [Source n1; Dest n3], ALU;
+ (* vabs vceqz vcgez vcbtz vclez vcltz vadh vradh vsbh vrsbh dqq *)
+ (* vhadd vrhadd vqadd vtst ddd qqq *)
+ "neon_int_4", [Source n2; Dest n4], ALU;
+ (* vabd qdd vhsub vqsub vabd vceq vcge vcgt vmax vmin vfmx vfmn ddd ddd *)
+ "neon_int_5", [Source_m n1; Source_n n2; Dest n4], ALU;
+ (* vqneg vqabs dd qq *)
+ "neon_vqneg_vqabs", [Source n1; Dest n4], ALU;
+ (* vmov vmvn *)
+ "neon_vmov", [Dest n3], ALU;
+ (* vaba *)
+ "neon_vaba", [Source_n n2; Source_m n1; Source_d n3; Dest n6], ALU;
+ "neon_vaba_qqq",
+ [Source_n n2; Source_m n1; Source_d n3; Dest_n_after (1, n6)], ALU_2cycle;
+ (* vsma *)
+ "neon_vsma", [Source_m n1; Source_d n3; Dest n6], ALU;
+
+ (* NEON integer multiply instructions. *)
+ (* vmul, vqdmlh, vqrdmlh *)
+ (* vmul, vqdmul, qdd 16/8 long 32/16 long *)
+ "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long", [Source n2; Dest n6], Mul;
+ "neon_mul_qqq_8_16_32_ddd_32", [Source n2; Dest_n_after (1, n6)], Mul_2cycle;
+ (* vmul, vqdmul again *)
+ "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar",
+ [Source_n n2; Source_m n1; Dest_n_after (1, n6)], Mul_2cycle;
+ (* vmla, vmls *)
+ "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long",
+ [Source_n n2; Source_m n2; Source_d n3; Dest n6], Mul;
+ "neon_mla_qqq_8_16",
+ [Source_n n2; Source_m n2; Source_d n3; Dest_n_after (1, n6)], Mul_2cycle;
+ "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long",
+ [Source_n n2; Source_m n1; Source_d n3; Dest_n_after (1, n6)], Mul_2cycle;
+ "neon_mla_qqq_32_qqd_32_scalar",
+ [Source_n n2; Source_m n1; Source_d n3; Dest_n_after (3, n6)], Mul_4cycle;
+ (* vmul, vqdmulh, vqrdmulh *)
+ (* vmul, vqdmul *)
+ "neon_mul_ddd_16_scalar_32_16_long_scalar",
+ [Source_n n2; Source_m n1; Dest n6], Mul;
+ "neon_mul_qqd_32_scalar",
+ [Source_n n2; Source_m n1; Dest_n_after (3, n6)], Mul_4cycle;
+ (* vmla, vmls *)
+ (* vmla, vmla, vqdmla, vqdmls *)
+ "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar",
+ [Source_n n2; Source_m n1; Source_d n3; Dest n6], Mul;
+
+ (* NEON integer shift instructions. *)
+ (* vshr/vshl immediate, vshr_narrow, vshl_vmvh, vsli_vsri_ddd *)
+ "neon_shift_1", [Source n1; Dest n3], Shift;
+ (* vqshl, vrshr immediate; vqshr, vqmov, vrshr, vqrshr narrow;
+ vqshl_vrshl_vqrshl_ddd *)
+ "neon_shift_2", [Source n1; Dest n4], Shift;
+ (* vsli, vsri and vshl for qqq *)
+ "neon_shift_3", [Source n1; Dest_n_after (1, n3)], Shift_2cycle;
+ "neon_vshl_ddd", [Source n1; Dest n1], Shift;
+ "neon_vqshl_vrshl_vqrshl_qqq", [Source n1; Dest_n_after (1, n4)],
+ Shift_2cycle;
+ "neon_vsra_vrsra", [Source_m n1; Source_d n3; Dest n6], Shift;
+
+ (* NEON floating-point instructions. *)
+ (* vadd, vsub, vabd, vmul, vceq, vcge, vcgt, vcage, vcagt, vmax, vmin *)
+ (* vabs, vneg, vceqz, vcgez, vcgtz, vclez, vcltz, vrecpe, vrsqrte, vcvt *)
+ "neon_fp_vadd_ddd_vabs_dd", [Source n2; Dest n5], Fadd;
+ "neon_fp_vadd_qqq_vabs_qq", [Source n2; Dest_n_after (1, n5)],
+ Fadd_2cycle;
+ (* vsum, fvmx, vfmn *)
+ "neon_fp_vsum", [Source n1; Dest n5], Fadd;
+ "neon_fp_vmul_ddd", [Source_n n2; Source_m n1; Dest n5], Fmul;
+ "neon_fp_vmul_qqd", [Source_n n2; Source_m n1; Dest_n_after (1, n5)],
+ Fmul_2cycle;
+ (* vmla, vmls *)
+ "neon_fp_vmla_ddd",
+ [Source_n n2; Source_m n2; Source_d n3; Dest n9], Fmul_then_fadd;
+ "neon_fp_vmla_qqq",
+ [Source_n n2; Source_m n2; Source_d n3; Dest_n_after (1, n9)],
+ Fmul_then_fadd_2;
+ "neon_fp_vmla_ddd_scalar",
+ [Source_n n2; Source_m n1; Source_d n3; Dest n9], Fmul_then_fadd;
+ "neon_fp_vmla_qqq_scalar",
+ [Source_n n2; Source_m n1; Source_d n3; Dest_n_after (1, n9)],
+ Fmul_then_fadd_2;
+ "neon_fp_vrecps_vrsqrts_ddd", [Source n2; Dest n9], Fmul_then_fadd;
+ "neon_fp_vrecps_vrsqrts_qqq", [Source n2; Dest_n_after (1, n9)],
+ Fmul_then_fadd_2;
+
+ (* NEON byte permute instructions. *)
+ (* vmov; vtrn and vswp for dd; vzip for dd; vuzp for dd; vrev; vext for dd *)
+ "neon_bp_simple", [Source n1; Dest n2], Permute 1;
+ (* vswp for qq; vext for qqq; vtbl with {Dn} or {Dn, Dn1};
+ similarly for vtbx *)
+ "neon_bp_2cycle", [Source n1; Dest_n_after (1, n2)], Permute 2;
+ (* all the rest *)
+ "neon_bp_3cycle", [Source n1; Dest_n_after (2, n2)], Permute 3;
+
+ (* NEON load/store instructions. *)
+ "neon_ldr", [Dest n1], Ls 1;
+ "neon_str", [Source n1], Ls 1;
+ "neon_vld1_1_2_regs", [Dest_n_after (1, n1)], Ls 2;
+ "neon_vld1_3_4_regs", [Dest_n_after (2, n1)], Ls 3;
+ "neon_vld2_2_regs_vld1_vld2_all_lanes", [Dest_n_after (1, n2)], Ls 2;
+ "neon_vld2_4_regs", [Dest_n_after (2, n2)], Ls 3;
+ "neon_vld3_vld4", [Dest_n_after (3, n2)], Ls 4;
+ "neon_vst1_1_2_regs_vst2_2_regs", [Source n1], Ls 2;
+ "neon_vst1_3_4_regs", [Source n1], Ls 3;
+ "neon_vst2_4_regs_vst3_vst4", [Source n1], Ls 4;
+ "neon_vst3_vst4", [Source n1], Ls 4;
+ "neon_vld1_vld2_lane", [Source n1; Dest_n_after (2, n2)], Ls 3;
+ "neon_vld3_vld4_lane", [Source n1; Dest_n_after (4, n2)], Ls 5;
+ "neon_vst1_vst2_lane", [Source n1], Ls 2;
+ "neon_vst3_vst4_lane", [Source n1], Ls 3;
+ "neon_vld3_vld4_all_lanes", [Dest_n_after (1, n2)], Ls 3;
+
+ (* NEON register transfer instructions. *)
+ "neon_mcr", [Dest n2], Permute 1;
+ "neon_mcr_2_mcrr", [Dest n2], Permute 2;
+ (* MRC instructions are in the .tpl file. *)
+]
+
+(* Augment the tuples in the availability table with an extra component
+ that describes the earliest stage where a source operand may be
+ required. (It is also possible that an entry in the table has no
+ source requirements.) *)
+let calculate_sources =
+ List.map (fun (name, avail, res) ->
+ let earliest_stage =
+ List.fold_left
+ (fun cur -> fun info ->
+ match info with
+ Source stage
+ | Source_n stage
+ | Source_m stage
+ | Source_d stage ->
+ (match cur with
+ None -> Some stage
+ | Some stage' when stage < stage' -> Some stage
+ | _ -> cur)
+ | _ -> cur) None avail
+ in
+ (name, avail, res, earliest_stage))
+
+(* Find the stage, if any, at the end of which a group produces a result. *)
+let find_dest (attr, avail, _, _) =
+ try
+ find_with_result
+ (fun av -> match av with
+ Dest st -> Some (Some st)
+ | Dest_n_after (after, st) -> Some (Some (after + st))
+ | _ -> None) avail
+ with Not_found -> None
+
+(* Find the worst-case latency between a producer and a consumer. *)
+let worst_case_latency producer (_, _, _, earliest_required) =
+ let dest = find_dest producer in
+ match earliest_required, dest with
+ None, _ ->
+ (* The consumer doesn't have any source requirements. *)
+ None
+ | _, None ->
+ (* The producer doesn't produce any results (e.g. a store insn). *)
+ None
+ | Some consumed, Some produced -> Some (produced - consumed + 1)
+
+(* Helper function for below. *)
+let latency_calc f producer (_, avail, _, _) =
+ try
+ let source_avail = find_with_result f avail in
+ match find_dest producer with
+ None ->
+ (* The producer does not produce a result. *)
+ Some 0
+ | Some produced ->
+ let latency = produced - source_avail + 1 in
+ (* Latencies below zero are raised to zero since we don't have
+ delay slots. *)
+ if latency < 0 then Some 0 else Some latency
+ with Not_found -> None
+
+(* Find any Rm latency between a producer and a consumer. If no
+ Rm source requirement is explicitly specified for the consumer,
+ return "positive infinity". Also return "positive infinity" if
+ the latency matches the supplied worst-case latency for this
+ producer. *)
+let get_m_latency producer consumer =
+ match latency_calc (fun av -> match av with Source_m stage -> Some stage
+ | _ -> None) producer consumer
+ with None -> [] | Some latency -> [(Guard_only_m, latency)]
+
+(* Likewise for Rn. *)
+let get_n_latency producer consumer =
+ match latency_calc (fun av -> match av with Source_n stage -> Some stage
+ | _ -> None) producer consumer
+ with None -> [] | Some latency -> [(Guard_only_n, latency)]
+
+(* Likewise for Rd. *)
+let get_d_latency producer consumer =
+ match
+ latency_calc (fun av -> match av with Source_d stage -> Some stage
+ | _ -> None) producer consumer
+ with None -> [] | Some latency -> [(Guard_only_d, latency)]
+
+(* Given a producer and a consumer, work out the latency of the producer
+ to the consumer in each of the four cases (availability information
+ permitting) identified at the top of this file. Return the
+ consumer, the worst-case unguarded latency and any guarded latencies. *)
+let calculate_latencies producer consumer =
+ let worst = worst_case_latency producer consumer in
+ let m_latency = get_m_latency producer consumer in
+ let n_latency = get_n_latency producer consumer in
+ let d_latency = get_d_latency producer consumer in
+ (consumer, worst, m_latency @ n_latency @ d_latency)
+
+(* Helper function for below. *)
+let pick_latency largest worst guards =
+ let guards =
+ match worst with
+ None -> guards
+ | Some worst -> (Guard_none, worst) :: guards
+ in
+ if List.length guards = 0 then None else
+ let total_latency =
+ List.fold_left (fun acc -> fun (_, latency) -> acc + latency) 0 guards
+ in
+ let average_latency = (float_of_int total_latency) /.
+ (float_of_int (List.length guards)) in
+ let rounded_latency = int_of_float (ceil average_latency) in
+ if rounded_latency = largest then None
+ else Some (Guard_none, rounded_latency)
+
+(* Collate all bypasses for a particular producer as required in
+ worst_case_latencies_and_bypasses. (By this stage there is a maximum
+ of one bypass from this producer to any particular consumer listed
+ in LATENCIES.) Use a hash table to collate bypasses with the
+ same latency and guard. *)
+let collate_bypasses (producer_name, _, _, _) largest latencies =
+ let ht = Hashtbl.create 42 in
+ let keys = ref [] in
+ List.iter (
+ fun ((consumer, _, _, _), worst, guards) ->
+ (* Find out which latency to use. Ignoring latencies that match
+ the *overall* worst-case latency for this producer (which will
+ be in define_insn_reservation), we have to examine:
+ 1. the latency with no guard between this producer and this
+ consumer; and
+ 2. any guarded latency. *)
+ let guard_latency_opt = pick_latency largest worst guards in
+ match guard_latency_opt with
+ None -> ()
+ | Some (guard, latency) ->
+ begin
+ (if (try ignore (Hashtbl.find ht (guard, latency)); false
+ with Not_found -> true) then
+ keys := (guard, latency) :: !keys);
+ Hashtbl.add ht (guard, latency) consumer
+ end
+ ) latencies;
+ (* The hash table now has bypasses collated so that ones with the
+ same latency and guard have the same keys. Walk through all the
+ keys, extract the associated bypasses, and concatenate the names
+ of the consumers for each bypass. *)
+ List.map (
+ fun ((guard, latency) as key) ->
+ let consumers = Hashtbl.find_all ht key in
+ (producer_name,
+ String.concat ",\\\n " consumers,
+ latency,
+ guard)
+ ) !keys
+
+(* For every producer, find the worst-case latency between it and
+ *any* consumer. Also determine (if such a thing exists) the
+ lowest-latency bypass from each producer to each consumer. Group
+ the output in such a way that all bypasses with the same producer
+ and latency are together, and so that bypasses with the worst-case
+ latency are ignored. *)
+let worst_case_latencies_and_bypasses =
+ let rec f (worst_acc, bypasses_acc) prev xs =
+ match xs with
+ [] -> (worst_acc, bypasses_acc)
+ | ((producer_name, producer_avail, res_string, _) as producer)::next ->
+ (* For this particular producer, work out the latencies between
+ it and every consumer. *)
+ let latencies =
+ List.fold_left (fun acc -> fun consumer ->
+ (calculate_latencies producer consumer) :: acc)
+ [] (prev @ xs)
+ in
+ (* Now work out what the overall worst case latency was for this
+ particular producer. *)
+ match latencies with
+ [] -> assert false
+ | _ ->
+ let comp_fn (_, l1, _) (_, l2, _) =
+ if l1 > l2 then -1 else if l1 = l2 then 0 else 1
+ in
+ let largest =
+ match List.hd (List.sort comp_fn latencies) with
+ (_, None, _) -> 0 (* Producer has no consumers. *)
+ | (_, Some worst, _) -> worst
+ in
+ (* Having got the largest latency, collect all bypasses for
+ this producer and filter out those with that larger
+ latency. Record the others for later emission. *)
+ let bypasses = collate_bypasses producer largest latencies in
+ (* Go on to process remaining producers, having noted
+ the result for this one. *)
+ f ((producer_name, producer_avail, largest,
+ res_string) :: worst_acc,
+ bypasses @ bypasses_acc)
+ (prev @ [producer]) next
+ in
+ f ([], []) []
+
+(* Emit a helpful comment for a define_insn_reservation. *)
+let write_comment producer avail =
+ let seen_source = ref false in
+ let describe info =
+ let read = if !seen_source then "" else "read " in
+ match info with
+ Source stage ->
+ seen_source := true;
+ Printf.printf "%stheir source operands at N%d" read stage
+ | Source_n stage ->
+ seen_source := true;
+ Printf.printf "%stheir (D|Q)n operands at N%d" read stage
+ | Source_m stage ->
+ seen_source := true;
+ Printf.printf "%stheir (D|Q)m operands at N%d" read stage
+ | Source_d stage ->
+ Printf.printf "%stheir (D|Q)d operands at N%d" read stage
+ | Dest stage ->
+ Printf.printf "produce a result at N%d" stage
+ | Dest_n_after (after, stage) ->
+ Printf.printf "produce a result at N%d on cycle %d" stage (after + 1)
+ in
+ Printf.printf ";; Instructions using this reservation ";
+ let rec f infos x =
+ let sep = if x mod 2 = 1 then "" else "\n;;" in
+ match infos with
+ [] -> assert false
+ | [info] -> describe info; Printf.printf ".\n"
+ | info::(_::[] as infos) ->
+ describe info; Printf.printf ", and%s " sep; f infos (x+1)
+ | info::infos -> describe info; Printf.printf ",%s " sep; f infos (x+1)
+ in
+ f avail 0
+
+(* Emit a define_insn_reservation for each producer. The latency
+ written in will be its worst-case latency. *)
+let emit_insn_reservations =
+ List.iter (
+ fun (producer, avail, latency, reservation) ->
+ write_comment producer avail;
+ Printf.printf "(define_insn_reservation \"%s\" %d\n" producer latency;
+ Printf.printf " (and (eq_attr \"tune\" \"cortexa8\")\n";
+ Printf.printf " (eq_attr \"neon_type\" \"%s\"))\n" producer;
+ let str =
+ match reservation with
+ Mul -> "dp" | Mul_2cycle -> "dp_2" | Mul_4cycle -> "dp_4"
+ | Shift -> "dp" | Shift_2cycle -> "dp_2"
+ | ALU -> "dp" | ALU_2cycle -> "dp_2"
+ | Fmul -> "dp" | Fmul_2cycle -> "dp_2"
+ | Fadd -> "fadd" | Fadd_2cycle -> "fadd_2"
+ | Ls 1 -> "ls"
+ | Ls n -> "ls_" ^ (string_of_int n)
+ | Permute 1 -> "perm"
+ | Permute n -> "perm_" ^ (string_of_int n)
+ | Fmul_then_fadd -> "fmul_then_fadd"
+ | Fmul_then_fadd_2 -> "fmul_then_fadd_2"
+ in
+ Printf.printf " \"cortex_a8_neon_%s\")\n\n" str
+ )
+
+(* Given a guard description, return the name of the C function to
+ be used as the guard for define_bypass. *)
+let guard_fn g =
+ match g with
+ Guard_only_m -> "arm_neon_only_m_dependency"
+ | Guard_only_n -> "arm_neon_only_n_dependency"
+ | Guard_only_d -> "arm_neon_only_d_dependency"
+ | Guard_none -> assert false
+
+(* Emit a define_bypass for each bypass. *)
+let emit_bypasses =
+ List.iter (
+ fun (producer, consumers, latency, guard) ->
+ Printf.printf "(define_bypass %d \"%s\"\n" latency producer;
+ if guard = Guard_none then
+ Printf.printf " \"%s\")\n\n" consumers
+ else
+ begin
+ Printf.printf " \"%s\"\n" consumers;
+ Printf.printf " \"%s\")\n\n" (guard_fn guard)
+ end
+ )
+
+(* Program entry point. *)
+let main =
+ let table = calculate_sources availability_table in
+ let worst_cases, bypasses = worst_case_latencies_and_bypasses table in
+ emit_insn_reservations (List.rev worst_cases);
+ Printf.printf ";; Exceptions to the default latencies.\n\n";
+ emit_bypasses bypasses
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/neon-testgen.ml b/gcc-4.2.1-5666.3/gcc/config/arm/neon-testgen.ml
new file mode 100644
index 000000000..8929b46a9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/neon-testgen.ml
@@ -0,0 +1,274 @@
+(* APPLE LOCAL file v7 support. Merge from Codesourcery *)
+(* Auto-generate ARM Neon intrinsics tests.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ This is an O'Caml program. The O'Caml compiler is available from:
+
+ http://caml.inria.fr/
+
+ Or from your favourite OS's friendly packaging system. Tested with version
+ 3.09.2, though other versions will probably work too.
+
+ Compile with:
+ ocamlc -c neon.ml
+ ocamlc -o neon-testgen neon.cmo neon-testgen.ml
+*)
+
+open Neon
+
+type c_type_flags = Pointer | Const
+
+(* Open a test source file. *)
+let open_test_file dir name =
+ try
+ open_out (dir ^ "/" ^ name ^ ".c")
+ with Sys_error str ->
+ failwith ("Could not create test source file " ^ name ^ ": " ^ str)
+
+(* Emit prologue code to a test source file. *)
+let emit_prologue chan test_name =
+ Printf.fprintf chan "/* Test the `%s' ARM Neon intrinsic. */\n" test_name;
+ Printf.fprintf chan "/* This file was autogenerated by neon-testgen. */\n\n";
+ Printf.fprintf chan "/* { dg-do assemble } */\n";
+ Printf.fprintf chan "/* { dg-require-effective-target arm_neon_ok } */\n";
+ Printf.fprintf chan
+ "/* { dg-options \"-save-temps -O0 -mfpu=neon -mfloat-abi=softfp\" } */\n";
+ Printf.fprintf chan "\n#include \"arm_neon.h\"\n\n";
+ Printf.fprintf chan "void test_%s (void)\n{\n" test_name
+
+(* Emit declarations of local variables that are going to be passed
+ to an intrinsic, together with one to take a returned value if needed. *)
+let emit_automatics chan c_types =
+ let emit () =
+ ignore (
+ List.fold_left (fun arg_number -> fun (flags, ty) ->
+ let pointer_bit =
+ if List.mem Pointer flags then "*" else ""
+ in
+ (* Const arguments to builtins are directly
+ written in as constants. *)
+ if not (List.mem Const flags) then
+ Printf.fprintf chan " %s %sarg%d_%s;\n"
+ ty pointer_bit arg_number ty;
+ arg_number + 1)
+ 0 (List.tl c_types))
+ in
+ match c_types with
+ (_, return_ty) :: tys ->
+ if return_ty <> "void" then
+ (* The intrinsic returns a value. *)
+ (Printf.fprintf chan " %s out_%s;\n" return_ty return_ty;
+ emit ())
+ else
+ (* The intrinsic does not return a value. *)
+ emit ()
+ | _ -> assert false
+
+(* Emit code to call an intrinsic. *)
+let emit_call chan const_valuator c_types name elt_ty =
+ (if snd (List.hd c_types) <> "void" then
+ Printf.fprintf chan " out_%s = " (snd (List.hd c_types))
+ else
+ Printf.fprintf chan " ");
+ Printf.fprintf chan "%s_%s (" (intrinsic_name name) (string_of_elt elt_ty);
+ let print_arg chan arg_number (flags, ty) =
+ (* If the argument is of const type, then directly write in the
+ constant now. *)
+ if List.mem Const flags then
+ match const_valuator with
+ None ->
+ if List.mem Pointer flags then
+ Printf.fprintf chan "0"
+ else
+ Printf.fprintf chan "1"
+ | Some f -> Printf.fprintf chan "%s" (string_of_int (f arg_number))
+ else
+ Printf.fprintf chan "arg%d_%s" arg_number ty
+ in
+ let rec print_args arg_number tys =
+ match tys with
+ [] -> ()
+ | [ty] -> print_arg chan arg_number ty
+ | ty::tys ->
+ print_arg chan arg_number ty;
+ Printf.fprintf chan ", ";
+ print_args (arg_number + 1) tys
+ in
+ print_args 0 (List.tl c_types);
+ Printf.fprintf chan ");\n"
+
+(* Emit epilogue code to a test source file. *)
+let emit_epilogue chan features regexps =
+ let no_op = List.exists (fun feature -> feature = No_op) features in
+ Printf.fprintf chan "}\n\n";
+ (if not no_op then
+ List.iter (fun regexp ->
+ Printf.fprintf chan
+ "/* { dg-final { scan-assembler \"%s\" } } */\n" regexp)
+ regexps
+ else
+ ()
+ );
+ Printf.fprintf chan "/* { dg-final { cleanup-saved-temps } } */\n"
+
+(* Check a list of C types to determine which ones are pointers and which
+ ones are const. *)
+let check_types tys =
+ let tys' =
+ List.map (fun ty ->
+ let len = String.length ty in
+ if len > 2 && String.get ty (len - 2) = ' '
+ && String.get ty (len - 1) = '*'
+ then ([Pointer], String.sub ty 0 (len - 2))
+ else ([], ty)) tys
+ in
+ List.map (fun (flags, ty) ->
+ if String.length ty > 6 && String.sub ty 0 6 = "const "
+ then (Const :: flags, String.sub ty 6 ((String.length ty) - 6))
+ else (flags, ty)) tys'
+
+(* Given an intrinsic shape, produce a regexp that will match
+ the right-hand sides of instructions generated by an intrinsic of
+ that shape. *)
+let rec analyze_shape shape =
+ let rec n_things n thing =
+ match n with
+ 0 -> []
+ | n -> thing :: (n_things (n - 1) thing)
+ in
+ let rec analyze_shape_elt elt =
+ match elt with
+ Dreg -> "\\[dD\\]\\[0-9\\]+"
+ | Qreg -> "\\[qQ\\]\\[0-9\\]+"
+ | Corereg -> "\\[rR\\]\\[0-9\\]+"
+ | Immed -> "#\\[0-9\\]+"
+ | VecArray (1, elt) ->
+ let elt_regexp = analyze_shape_elt elt in
+ "((\\\\\\{" ^ elt_regexp ^ "\\\\\\})|(" ^ elt_regexp ^ "))"
+ | VecArray (n, elt) ->
+ let elt_regexp = analyze_shape_elt elt in
+ let alt1 = elt_regexp ^ "-" ^ elt_regexp in
+ let alt2 = commas (fun x -> x) (n_things n elt_regexp) "" in
+ "\\\\\\{((" ^ alt1 ^ ")|(" ^ alt2 ^ "))\\\\\\}"
+ | (PtrTo elt | CstPtrTo elt) ->
+ "\\\\\\[" ^ (analyze_shape_elt elt) ^ "\\\\\\]"
+ | Element_of_dreg -> (analyze_shape_elt Dreg) ^ "\\\\\\[\\[0-9\\]+\\\\\\]"
+ | Element_of_qreg -> (analyze_shape_elt Qreg) ^ "\\\\\\[\\[0-9\\]+\\\\\\]"
+ | All_elements_of_dreg -> (analyze_shape_elt Dreg) ^ "\\\\\\[\\\\\\]"
+ in
+ match shape with
+ All (n, elt) -> commas analyze_shape_elt (n_things n elt) ""
+ | Long -> (analyze_shape_elt Qreg) ^ ", " ^ (analyze_shape_elt Dreg) ^
+ ", " ^ (analyze_shape_elt Dreg)
+ | Long_noreg elt -> (analyze_shape_elt elt) ^ ", " ^ (analyze_shape_elt elt)
+ | Wide -> (analyze_shape_elt Qreg) ^ ", " ^ (analyze_shape_elt Qreg) ^
+ ", " ^ (analyze_shape_elt Dreg)
+ | Wide_noreg elt -> analyze_shape (Long_noreg elt)
+ | Narrow -> (analyze_shape_elt Dreg) ^ ", " ^ (analyze_shape_elt Qreg) ^
+ ", " ^ (analyze_shape_elt Qreg)
+ | Use_operands elts -> commas analyze_shape_elt (Array.to_list elts) ""
+ | By_scalar Dreg ->
+ analyze_shape (Use_operands [| Dreg; Dreg; Element_of_dreg |])
+ | By_scalar Qreg ->
+ analyze_shape (Use_operands [| Qreg; Qreg; Element_of_dreg |])
+ | By_scalar _ -> assert false
+ | Wide_lane ->
+ analyze_shape (Use_operands [| Qreg; Dreg; Element_of_dreg |])
+ | Wide_scalar ->
+ analyze_shape (Use_operands [| Qreg; Dreg; Element_of_dreg |])
+ | Pair_result elt ->
+ let elt_regexp = analyze_shape_elt elt in
+ elt_regexp ^ ", " ^ elt_regexp
+ | Unary_scalar _ -> "FIXME Unary_scalar"
+ | Binary_imm elt -> analyze_shape (Use_operands [| elt; elt; Immed |])
+ | Narrow_imm -> analyze_shape (Use_operands [| Dreg; Qreg; Immed |])
+ | Long_imm -> analyze_shape (Use_operands [| Qreg; Dreg; Immed |])
+
+(* Generate tests for one intrinsic. *)
+let test_intrinsic dir opcode features shape name munge elt_ty =
+ (* Open the test source file. *)
+ let test_name = name ^ (string_of_elt elt_ty) in
+ let chan = open_test_file dir test_name in
+ (* Work out what argument and return types the intrinsic has. *)
+ let c_arity, new_elt_ty = munge shape elt_ty in
+ let c_types = check_types (strings_of_arity c_arity) in
+ (* Extract any constant valuator (a function specifying what constant
+ values are to be written into the intrinsic call) from the features
+ list. *)
+ let const_valuator =
+ try
+ match (List.find (fun feature -> match feature with
+ Const_valuator _ -> true
+ | _ -> false) features) with
+ Const_valuator f -> Some f
+ | _ -> assert false
+ with Not_found -> None
+ in
+ (* Work out what instruction name(s) to expect. *)
+ let insns = get_insn_names features name in
+ let no_suffix = (new_elt_ty = NoElts) in
+ let insns =
+ if no_suffix then insns
+ else List.map (fun insn ->
+ let suffix = string_of_elt_dots new_elt_ty in
+ insn ^ "\\." ^ suffix) insns
+ in
+ (* Construct a regexp to match against the expected instruction name(s). *)
+ let insn_regexp =
+ match insns with
+ [] -> assert false
+ | [insn] -> insn
+ | _ ->
+ let rec calc_regexp insns cur_regexp =
+ match insns with
+ [] -> cur_regexp
+ | [insn] -> cur_regexp ^ "(" ^ insn ^ "))"
+ | insn::insns -> calc_regexp insns (cur_regexp ^ "(" ^ insn ^ ")|")
+ in calc_regexp insns "("
+ in
+ (* Construct regexps to match against the instructions that this
+ intrinsic expands to. Watch out for any writeback character and
+ comments after the instruction. *)
+ let regexps = List.map (fun regexp -> insn_regexp ^ "\\[ \t\\]+" ^ regexp ^
+ "!?\\(\\[ \t\\]+@\\[a-zA-Z0-9 \\]+\\)?\\n")
+ (analyze_all_shapes features shape analyze_shape)
+ in
+ (* Emit file and function prologues. *)
+ emit_prologue chan test_name;
+ (* Emit local variable declarations. *)
+ emit_automatics chan c_types;
+ Printf.fprintf chan "\n";
+ (* Emit the call to the intrinsic. *)
+ emit_call chan const_valuator c_types name elt_ty;
+ (* Emit the function epilogue and the DejaGNU scan-assembler directives. *)
+ emit_epilogue chan features regexps;
+ (* Close the test file. *)
+ close_out chan
+
+(* Generate tests for one element of the "ops" table. *)
+let test_intrinsic_group dir (opcode, features, shape, name, munge, types) =
+ List.iter (test_intrinsic dir opcode features shape name munge) types
+
+(* Program entry point. *)
+let _ =
+ let directory = if Array.length Sys.argv <> 1 then Sys.argv.(1) else "." in
+ List.iter (test_intrinsic_group directory) (reinterp @ ops)
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/neon.md b/gcc-4.2.1-5666.3/gcc/config/arm/neon.md
new file mode 100644
index 000000000..c0472b858
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/neon.md
@@ -0,0 +1,4917 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM NEON coprocessor Machine Description
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;; Written by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA.
+
+;; Constants for unspecs.
+(define_constants
+ [(UNSPEC_VPADD 65)
+ (UNSPEC_VPSMIN 66)
+ (UNSPEC_VPUMIN 67)
+ (UNSPEC_VPSMAX 68)
+ (UNSPEC_VPUMAX 69)
+ (UNSPEC_ASHIFT_SIGNED 70)
+ (UNSPEC_ASHIFT_UNSIGNED 71)
+ (UNSPEC_VADD 72)
+ (UNSPEC_VADDL 73)
+ (UNSPEC_VADDW 74)
+ (UNSPEC_VHADD 75)
+ (UNSPEC_VQADD 76)
+ (UNSPEC_VADDHN 77)
+ (UNSPEC_VABS 78)
+ (UNSPEC_VQABS 79)
+ (UNSPEC_VGET_LANE 80)
+ (UNSPEC_VSET_LANE 81)
+ (UNSPEC_VDUP_N 82)
+ (UNSPEC_VCOMBINE 83)
+ (UNSPEC_VGET_HIGH 84)
+ (UNSPEC_VGET_LOW 85)
+ (UNSPEC_VMOVN 87)
+ (UNSPEC_VQMOVN 88)
+ (UNSPEC_VQMOVUN 89)
+ (UNSPEC_VMOVL 90)
+ (UNSPEC_VMUL_LANE 91)
+ (UNSPEC_VMLA_LANE 92)
+ (UNSPEC_VMLAL_LANE 93)
+ (UNSPEC_VQDMLAL_LANE 94)
+ (UNSPEC_VMUL_N 95)
+ (UNSPEC_VCVT 96)
+ (UNSPEC_VEXT 97)
+ (UNSPEC_VREV64 98)
+ (UNSPEC_VREV32 99)
+ (UNSPEC_VREV16 100)
+ (UNSPEC_VBSL 101)
+ (UNSPEC_VLD1 102)
+ (UNSPEC_VLD1_LANE 103)
+ (UNSPEC_VLD1_DUP 104)
+ (UNSPEC_VST1 105)
+ (UNSPEC_VST1_LANE 106)
+ (UNSPEC_VSTRUCTDUMMY 107)
+ (UNSPEC_VLD2 108)
+ (UNSPEC_VLD2_LANE 109)
+ (UNSPEC_VLD2_DUP 110)
+ (UNSPEC_VST2 111)
+ (UNSPEC_VST2_LANE 112)
+ (UNSPEC_VLD3 113)
+ (UNSPEC_VLD3A 114)
+ (UNSPEC_VLD3B 115)
+ (UNSPEC_VLD3_LANE 116)
+ (UNSPEC_VLD3_DUP 117)
+ (UNSPEC_VST3 118)
+ (UNSPEC_VST3A 119)
+ (UNSPEC_VST3B 120)
+ (UNSPEC_VST3_LANE 121)
+ (UNSPEC_VLD4 122)
+ (UNSPEC_VLD4A 123)
+ (UNSPEC_VLD4B 124)
+ (UNSPEC_VLD4_LANE 125)
+ (UNSPEC_VLD4_DUP 126)
+ (UNSPEC_VST4 127)
+ (UNSPEC_VST4A 128)
+ (UNSPEC_VST4B 129)
+ (UNSPEC_VST4_LANE 130)
+ (UNSPEC_VTRN1 131)
+ (UNSPEC_VTRN2 132)
+ (UNSPEC_VTBL 133)
+ (UNSPEC_VTBX 134)
+ (UNSPEC_VAND 135)
+ (UNSPEC_VORR 136)
+ (UNSPEC_VEOR 137)
+ (UNSPEC_VBIC 138)
+ (UNSPEC_VORN 139)
+ (UNSPEC_VCVT_N 140)
+ (UNSPEC_VQNEG 142)
+ (UNSPEC_VMVN 143)
+ (UNSPEC_VCLS 144)
+ (UNSPEC_VCLZ 145)
+ (UNSPEC_VCNT 146)
+ (UNSPEC_VRECPE 147)
+ (UNSPEC_VRSQRTE 148)
+ (UNSPEC_VMUL 149)
+ (UNSPEC_VMLA 150)
+ (UNSPEC_VMLAL 151)
+ (UNSPEC_VMLS 152)
+ (UNSPEC_VMLSL 153)
+ (UNSPEC_VQDMULH 154)
+ (UNSPEC_VQDMLAL 155)
+ (UNSPEC_VQDMLSL 156)
+ (UNSPEC_VMULL 157)
+ (UNSPEC_VQDMULL 158)
+ (UNSPEC_VMLS_LANE 159)
+ (UNSPEC_VMLSL_LANE 160)
+ (UNSPEC_VQDMLSL_LANE 161)
+ (UNSPEC_VDUP_LANE 162)
+ (UNSPEC_VZIP1 163)
+ (UNSPEC_VZIP2 164)
+ (UNSPEC_VUZP1 165)
+ (UNSPEC_VUZP2 166)
+ (UNSPEC_VSRI 167)
+ (UNSPEC_VSLI 168)
+ (UNSPEC_VSRA_N 169)
+ (UNSPEC_VSHL_N 170)
+ (UNSPEC_VQSHL_N 171)
+ (UNSPEC_VQSHLU_N 172)
+ (UNSPEC_VSHLL_N 173)
+ (UNSPEC_VSHR_N 174)
+ (UNSPEC_VSHRN_N 175)
+ (UNSPEC_VQSHRN_N 176)
+ (UNSPEC_VQSHRUN_N 177)
+ (UNSPEC_VSUB 178)
+ (UNSPEC_VSUBL 179)
+ (UNSPEC_VSUBW 180)
+ (UNSPEC_VQSUB 181)
+ (UNSPEC_VHSUB 182)
+ (UNSPEC_VSUBHN 183)
+ (UNSPEC_VCEQ 184)
+ (UNSPEC_VCGE 185)
+ (UNSPEC_VCGT 186)
+ (UNSPEC_VCAGE 187)
+ (UNSPEC_VCAGT 188)
+ (UNSPEC_VTST 189)
+ (UNSPEC_VABD 190)
+ (UNSPEC_VABDL 191)
+ (UNSPEC_VABA 192)
+ (UNSPEC_VABAL 193)
+ (UNSPEC_VMAX 194)
+ (UNSPEC_VMIN 195)
+ (UNSPEC_VPADDL 196)
+ (UNSPEC_VPADAL 197)
+ (UNSPEC_VSHL 198)
+ (UNSPEC_VQSHL 199)
+ (UNSPEC_VPMAX 200)
+ (UNSPEC_VPMIN 201)
+ (UNSPEC_VRECPS 202)
+ (UNSPEC_VRSQRTS 203)
+ (UNSPEC_VMULL_LANE 204)
+ (UNSPEC_VQDMULL_LANE 205)
+ (UNSPEC_VQDMULH_LANE 206)])
+
+
+;; Double-width vector modes.
+(define_mode_macro VD [V8QI V4HI V2SI V2SF])
+
+;; Double-width vector modes plus 64-bit elements.
+(define_mode_macro VDX [V8QI V4HI V2SI V2SF DI])
+
+;; Same, without floating-point elements.
+(define_mode_macro VDI [V8QI V4HI V2SI])
+
+;; Quad-width vector modes.
+(define_mode_macro VQ [V16QI V8HI V4SI V4SF])
+
+;; Quad-width vector modes plus 64-bit elements.
+(define_mode_macro VQX [V16QI V8HI V4SI V4SF V2DI])
+
+;; Same, without floating-point elements.
+(define_mode_macro VQI [V16QI V8HI V4SI])
+
+;; Same, with TImode added, for moves.
+(define_mode_macro VQXMOV [V16QI V8HI V4SI V4SF V2DI TI])
+
+;; Opaque structure types wider than TImode.
+(define_mode_macro VSTRUCT [EI OI CI XI])
+
+;; Number of instructions needed to load/store struct elements. FIXME!
+(define_mode_attr V_slen [(EI "2") (OI "2") (CI "3") (XI "4")])
+
+;; Opaque structure types used in table lookups (except vtbl1/vtbx1).
+(define_mode_macro VTAB [TI EI OI])
+
+;; vtbl<n> suffix for above modes.
+(define_mode_attr VTAB_n [(TI "2") (EI "3") (OI "4")])
+
+;; Widenable modes.
+(define_mode_macro VW [V8QI V4HI V2SI])
+
+;; Narrowable modes.
+(define_mode_macro VN [V8HI V4SI V2DI])
+
+;; All supported vector modes (except singleton DImode).
+(define_mode_macro VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DI])
+
+;; All supported vector modes (except those with 64-bit integer elements).
+(define_mode_macro VDQW [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF])
+
+;; Supported integer vector modes (not 64 bit elements).
+(define_mode_macro VDQIW [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Supported integer vector modes (not singleton DI)
+(define_mode_macro VDQI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; Vector modes, including 64-bit integer elements.
+(define_mode_macro VDQX [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF DI V2DI])
+
+;; Vector modes including 64-bit integer elements, but no floats.
+(define_mode_macro VDQIX [V8QI V16QI V4HI V8HI V2SI V4SI DI V2DI])
+
+;; Vector modes for float->int conversions.
+(define_mode_macro VCVTF [V2SF V4SF])
+
+;; Vector modes form int->float conversions.
+(define_mode_macro VCVTI [V2SI V4SI])
+
+;; Vector modes for doubleword multiply-accumulate, etc. insns.
+(define_mode_macro VMD [V4HI V2SI V2SF])
+
+;; Vector modes for quadword multiply-accumulate, etc. insns.
+(define_mode_macro VMQ [V8HI V4SI V4SF])
+
+;; Above modes combined.
+(define_mode_macro VMDQ [V4HI V2SI V2SF V8HI V4SI V4SF])
+
+;; As VMD, but integer modes only.
+(define_mode_macro VMDI [V4HI V2SI])
+
+;; As VMQ, but integer modes only.
+(define_mode_macro VMQI [V8HI V4SI])
+
+;; Above modes combined.
+(define_mode_macro VMDQI [V4HI V2SI V8HI V4SI])
+
+;; Modes with 8-bit and 16-bit elements.
+(define_mode_macro VX [V8QI V4HI V16QI V8HI])
+
+;; Modes with 8-bit elements.
+(define_mode_macro VE [V8QI V16QI])
+
+;; Modes with 64-bit elements only.
+(define_mode_macro V64 [DI V2DI])
+
+;; Modes with 32-bit elements only.
+(define_mode_macro V32 [V2SI V2SF V4SI V4SF])
+
+;; (Opposite) mode to convert to/from for above conversions.
+(define_mode_attr V_CVTTO [(V2SI "V2SF") (V2SF "V2SI")
+ (V4SI "V4SF") (V4SF "V4SI")])
+
+;; Define element mode for each vector mode.
+(define_mode_attr V_elem [(V8QI "QI") (V16QI "QI")
+ (V4HI "HI") (V8HI "HI")
+ (V2SI "SI") (V4SI "SI")
+ (V2SF "SF") (V4SF "SF")
+ (DI "DI") (V2DI "DI")])
+
+;; Mode of pair of elements for each vector mode, to define transfer
+;; size for structure lane/dup loads and stores.
+(define_mode_attr V_two_elem [(V8QI "HI") (V16QI "HI")
+ (V4HI "SI") (V8HI "SI")
+ (V2SI "V2SI") (V4SI "V2SI")
+ (V2SF "V2SF") (V4SF "V2SF")
+ (DI "V2DI") (V2DI "V2DI")])
+
+;; Similar, for three elements.
+;; ??? Should we define extra modes so that sizes of all three-element
+;; accesses can be accurately represented?
+(define_mode_attr V_three_elem [(V8QI "SI") (V16QI "SI")
+ (V4HI "V4HI") (V8HI "V4HI")
+ (V2SI "V4SI") (V4SI "V4SI")
+ (V2SF "V4SF") (V4SF "V4SF")
+ (DI "EI") (V2DI "EI")])
+
+;; Similar, for four elements.
+(define_mode_attr V_four_elem [(V8QI "SI") (V16QI "SI")
+ (V4HI "V4HI") (V8HI "V4HI")
+ (V2SI "V4SI") (V4SI "V4SI")
+ (V2SF "V4SF") (V4SF "V4SF")
+ (DI "OI") (V2DI "OI")])
+
+;; Register width from element mode
+(define_mode_attr V_reg [(V8QI "P") (V16QI "q")
+ (V4HI "P") (V8HI "q")
+ (V2SI "P") (V4SI "q")
+ (V2SF "P") (V4SF "q")
+ (DI "P") (V2DI "q")])
+
+;; Wider modes with the same number of elements.
+(define_mode_attr V_widen [(V8QI "V8HI") (V4HI "V4SI") (V2SI "V2DI")])
+
+;; Narrower modes with the same number of elements.
+(define_mode_attr V_narrow [(V8HI "V8QI") (V4SI "V4HI") (V2DI "V2SI")])
+
+;; Modes with half the number of equal-sized elements.
+(define_mode_attr V_HALF [(V16QI "V8QI") (V8HI "V4HI")
+ (V4SI "V2SI") (V4SF "V2SF")
+ (V2DI "DI")])
+
+;; Same, but lower-case.
+(define_mode_attr V_half [(V16QI "v8qi") (V8HI "v4hi")
+ (V4SI "v2si") (V4SF "v2sf")
+ (V2DI "di")])
+
+;; Modes with twice the number of equal-sized elements.
+(define_mode_attr V_DOUBLE [(V8QI "V16QI") (V4HI "V8HI")
+ (V2SI "V4SI") (V2SF "V4SF")
+ (DI "V2DI")])
+
+;; Same, but lower-case.
+(define_mode_attr V_double [(V8QI "v16qi") (V4HI "v8hi")
+ (V2SI "v4si") (V2SF "v4sf")
+ (DI "v2di")])
+
+;; Modes with double-width elements.
+(define_mode_attr V_double_width [(V8QI "V4HI") (V16QI "V8HI")
+ (V4HI "V2SI") (V8HI "V4SI")
+ (V2SI "DI") (V4SI "V2DI")])
+
+;; Mode of result of comparison operations (and bit-select operand 1).
+(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
+ (V4HI "V4HI") (V8HI "V8HI")
+ (V2SI "V2SI") (V4SI "V4SI")
+ (V2SF "V2SI") (V4SF "V4SI")
+ (DI "DI") (V2DI "V2DI")])
+
+;; Get element type from double-width mode, for operations where we don't care
+;; about signedness.
+(define_mode_attr V_if_elem [(V8QI "i8") (V16QI "i8")
+ (V4HI "i16") (V8HI "i16")
+ (V2SI "i32") (V4SI "i32")
+ (DI "i64") (V2DI "i64")
+ (V2SF "f32") (V4SF "f32")])
+
+;; Same, but for operations which work on signed values.
+(define_mode_attr V_s_elem [(V8QI "s8") (V16QI "s8")
+ (V4HI "s16") (V8HI "s16")
+ (V2SI "s32") (V4SI "s32")
+ (DI "s64") (V2DI "s64")
+ (V2SF "f32") (V4SF "f32")])
+
+;; Same, but for operations which work on unsigned values.
+(define_mode_attr V_u_elem [(V8QI "u8") (V16QI "u8")
+ (V4HI "u16") (V8HI "u16")
+ (V2SI "u32") (V4SI "u32")
+ (DI "u64") (V2DI "u64")
+ (V2SF "f32") (V4SF "f32")])
+
+;; Element types for extraction of unsigned scalars.
+(define_mode_attr V_uf_sclr [(V8QI "u8") (V16QI "u8")
+ (V4HI "u16") (V8HI "u16")
+ (V2SI "32") (V4SI "32")
+ (V2SF "32") (V4SF "32")])
+
+(define_mode_attr V_sz_elem [(V8QI "8") (V16QI "8")
+ (V4HI "16") (V8HI "16")
+ (V2SI "32") (V4SI "32")
+ (DI "64") (V2DI "64")
+ (V2SF "32") (V4SF "32")])
+
+;; Element sizes for duplicating ARM registers to all elements of a vector.
+(define_mode_attr VD_dup [(V8QI "8") (V4HI "16") (V2SI "32") (V2SF "32")])
+
+;; Opaque integer types for results of pair-forming intrinsics (vtrn, etc.)
+(define_mode_attr V_PAIR [(V8QI "TI") (V16QI "OI")
+ (V4HI "TI") (V8HI "OI")
+ (V2SI "TI") (V4SI "OI")
+ (V2SF "TI") (V4SF "OI")
+ (DI "TI") (V2DI "OI")])
+
+;; Same, but lower-case.
+(define_mode_attr V_pair [(V8QI "ti") (V16QI "oi")
+ (V4HI "ti") (V8HI "oi")
+ (V2SI "ti") (V4SI "oi")
+ (V2SF "ti") (V4SF "oi")
+ (DI "ti") (V2DI "oi")])
+
+;; Operations on two halves of a quadword vector.
+(define_code_macro vqh_ops [plus smin smax umin umax])
+
+;; Same, without unsigned variants (for use with *SFmode pattern).
+(define_code_macro vqhs_ops [plus smin smax])
+
+;; Assembler mnemonics for above codes.
+(define_code_attr VQH_mnem [(plus "vadd") (smin "vmin") (smax "vmax")
+ (umin "vmin") (umax "vmax")])
+
+;; Signs of above, where relevant.
+(define_code_attr VQH_sign [(plus "i") (smin "s") (smax "s") (umin "u")
+ (umax "u")])
+
+;; Extra suffix on some 64-bit insn names (to avoid collision with standard
+;; names which we don't want to define).
+(define_mode_attr V_suf64 [(V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2SF "") (V4SF "")
+ (DI "_neon") (V2DI "")])
+
+;; Scalars to be presented to scalar multiplication instructions
+;; must satisfy the following constraints.
+;; 1. If the mode specifies 16-bit elements, the scalar must be in D0-D7.
+;; 2. If the mode specifies 32-bit elements, the scalar must be in D0-D15.
+;; This mode attribute is used to obtain the correct register constraints.
+(define_mode_attr scalar_mul_constraint [(V4HI "x") (V2SI "t") (V2SF "t")
+ (V8HI "x") (V4SI "t") (V4SF "t")])
+
+;; Attribute used to permit string comparisons against <VQH_mnem> in
+;; neon_type attribute definitions.
+(define_attr "vqh_mnem" "vadd,vmin,vmax" (const_string "vadd"))
+
+;; Classification of NEON instructions for scheduling purposes.
+;; Do not set this attribute and the "type" attribute together in
+;; any one instruction pattern.
+(define_attr "neon_type"
+ "neon_int_1,\
+ neon_int_2,\
+ neon_int_3,\
+ neon_int_4,\
+ neon_int_5,\
+ neon_vqneg_vqabs,\
+ neon_vmov,\
+ neon_vaba,\
+ neon_vsma,\
+ neon_vaba_qqq,\
+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mul_qqq_8_16_32_ddd_32,\
+ neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,\
+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,\
+ neon_mla_qqq_8_16,\
+ neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,\
+ neon_mla_qqq_32_qqd_32_scalar,\
+ neon_mul_ddd_16_scalar_32_16_long_scalar,\
+ neon_mul_qqd_32_scalar,\
+ neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,\
+ neon_shift_1,\
+ neon_shift_2,\
+ neon_shift_3,\
+ neon_vshl_ddd,\
+ neon_vqshl_vrshl_vqrshl_qqq,\
+ neon_vsra_vrsra,\
+ neon_fp_vadd_ddd_vabs_dd,\
+ neon_fp_vadd_qqq_vabs_qq,\
+ neon_fp_vsum,\
+ neon_fp_vmul_ddd,\
+ neon_fp_vmul_qqd,\
+ neon_fp_vmla_ddd,\
+ neon_fp_vmla_qqq,\
+ neon_fp_vmla_ddd_scalar,\
+ neon_fp_vmla_qqq_scalar,\
+ neon_fp_vrecps_vrsqrts_ddd,\
+ neon_fp_vrecps_vrsqrts_qqq,\
+ neon_bp_simple,\
+ neon_bp_2cycle,\
+ neon_bp_3cycle,\
+ neon_ldr,\
+ neon_str,\
+ neon_vld1_1_2_regs,\
+ neon_vld1_3_4_regs,\
+ neon_vld2_2_regs_vld1_vld2_all_lanes,\
+ neon_vld2_4_regs,\
+ neon_vld3_vld4,\
+ neon_vst1_1_2_regs_vst2_2_regs,\
+ neon_vst1_3_4_regs,\
+ neon_vst2_4_regs_vst3_vst4,\
+ neon_vst3_vst4,\
+ neon_vld1_vld2_lane,\
+ neon_vld3_vld4_lane,\
+ neon_vst1_vst2_lane,\
+ neon_vst3_vst4_lane,\
+ neon_vld3_vld4_all_lanes,\
+ neon_mcr,\
+ neon_mcr_2_mcrr,\
+ neon_mrc,\
+ neon_mrrc,\
+ neon_ldm_2,\
+ neon_stm_2,\
+ none"
+ (const_string "none"))
+
+;; Predicates used for setting the above attribute.
+
+(define_mode_attr Is_float_mode [(V8QI "false") (V16QI "false")
+ (V4HI "false") (V8HI "false")
+ (V2SI "false") (V4SI "false")
+ (V2SF "true") (V4SF "true")
+ (DI "false") (V2DI "false")])
+
+(define_mode_attr Scalar_mul_8_16 [(V8QI "true") (V16QI "true")
+ (V4HI "true") (V8HI "true")
+ (V2SI "false") (V4SI "false")
+ (V2SF "false") (V4SF "false")
+ (DI "false") (V2DI "false")])
+
+
+(define_mode_attr Is_d_reg [(V8QI "true") (V16QI "false")
+ (V4HI "true") (V8HI "false")
+ (V2SI "true") (V4SI "false")
+ (V2SF "true") (V4SF "false")
+ (DI "true") (V2DI "false")])
+
+(define_mode_attr V_mode_nunits [(V8QI "8") (V16QI "16")
+ (V4HI "4") (V8HI "8")
+ (V2SI "2") (V4SI "4")
+ (V2SF "2") (V4SF "4")
+ (DI "1") (V2DI "2")])
+
+;; FIXME: Attributes are probably borked.
+(define_insn "*neon_mov<mode>"
+ [(set (match_operand:VD 0 "nonimmediate_operand"
+ "=w,Uv,w, w, ?r,?w,?r,?r, ?Us")
+ (match_operand:VD 1 "general_operand"
+ " w,w, Dn,Uvi, w, r, r, Usi,r"))]
+ "TARGET_NEON"
+{
+ if (which_alternative == 2)
+ {
+ int width, is_valid;
+ static char templ[40];
+
+ is_valid = neon_immediate_valid_for_move (operands[1], <MODE>mode,
+ &operands[1], &width);
+
+ gcc_assert (is_valid != 0);
+
+ if (width == 0)
+ return "vmov.f32\t%P0, %1 @ <mode>";
+ else
+ sprintf (templ, "vmov.i%d\t%%P0, %%1 @ <mode>", width);
+
+ return templ;
+ }
+
+ /* FIXME: If the memory layout is changed in big-endian mode, output_move_vfp
+ below must be changed to output_move_neon (which will use the
+ element/structure loads/stores), and the constraint changed to 'Un' instead
+ of 'Uv'. */
+
+ switch (which_alternative)
+ {
+ case 0: return "vmov\t%P0, %P1 @ <mode>";
+ case 1: case 3: return output_move_vfp (operands);
+ case 2: gcc_unreachable ();
+ case 4: return "vmov\t%Q0, %R0, %P1 @ <mode>";
+ case 5: return "vmov\t%P0, %Q1, %R1 @ <mode>";
+ default: return output_move_double (operands);
+ }
+}
+ [(set_attr "neon_type" "neon_int_1,*,neon_vmov,*,neon_mrrc,neon_mcr_2_mcrr,*,*,*")
+ (set_attr "type" "*,f_stored,*,f_loadd,*,*,alu,load2,store2")
+ (set_attr "insn" "*,*,*,*,*,*,mov,*,*")
+ (set_attr "length" "4,4,4,4,4,4,8,8,8")
+ (set_attr "pool_range" "*,*,*,1020,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,*,*,1008,*")])
+
+(define_insn "*neon_mov<mode>"
+ [(set (match_operand:VQXMOV 0 "nonimmediate_operand"
+ "=w,Un,w, w, ?r,?w,?r,?r, ?Us")
+ (match_operand:VQXMOV 1 "general_operand"
+ " w,w, Dn,Uni, w, r, r, Usi, r"))]
+ "TARGET_NEON"
+{
+ if (which_alternative == 2)
+ {
+ int width, is_valid;
+ static char templ[40];
+
+ is_valid = neon_immediate_valid_for_move (operands[1], <MODE>mode,
+ &operands[1], &width);
+
+ gcc_assert (is_valid != 0);
+
+ if (width == 0)
+ return "vmov.f32\t%q0, %1 @ <mode>";
+ else
+ sprintf (templ, "vmov.i%d\t%%q0, %%1 @ <mode>", width);
+
+ return templ;
+ }
+
+ switch (which_alternative)
+ {
+ case 0: return "vmov\t%q0, %q1 @ <mode>";
+ case 1: case 3: return output_move_neon (operands);
+ case 2: gcc_unreachable ();
+ case 4: return "vmov\t%Q0, %R0, %e1 @ <mode>\;vmov\t%J0, %K0, %f1";
+ case 5: return "vmov\t%e0, %Q1, %R1 @ <mode>\;vmov\t%f0, %J1, %K1";
+ default: return output_move_quad (operands);
+ }
+}
+ [(set_attr "neon_type" "neon_int_1,neon_stm_2,neon_vmov,neon_ldm_2,\
+ neon_mrrc,neon_mcr_2_mcrr,*,*,*")
+ (set_attr "type" "*,*,*,*,*,*,alu,load4,store4")
+ (set_attr "insn" "*,*,*,*,*,*,mov,*,*")
+ (set_attr "length" "4,8,4,8,8,8,16,8,16")
+ (set_attr "pool_range" "*,*,*,1020,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,*,*,1008,*")])
+
+(define_expand "movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "general_operand" ""))]
+ "TARGET_NEON"
+{
+})
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VSTRUCT 0 "nonimmediate_operand" "")
+ (match_operand:VSTRUCT 1 "general_operand" ""))]
+ "TARGET_NEON"
+{
+})
+
+;; APPLE LOCAL begin 6160917
+(define_expand "reload_in<mode>"
+ [(parallel [(match_operand:VDQW 0 "s_register_operand" "=w")
+ (match_operand:VDQW 1 "neon_reload_mem_operand" "m")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_NEON"
+ "
+{
+ neon_reload_in (operands, <MODE>mode);
+ DONE;
+}")
+
+(define_expand "reload_out<mode>"
+ [(parallel [(match_operand:VDQW 0 "neon_reload_mem_operand" "=m")
+ (match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_NEON"
+ "
+{
+ neon_reload_out (operands, <MODE>mode);
+ DONE;
+}")
+;; APPLE LOCAL end 6160917
+
+(define_insn "*neon_mov<mode>"
+ [(set (match_operand:VSTRUCT 0 "nonimmediate_operand" "=w,Ut,w")
+ (match_operand:VSTRUCT 1 "general_operand" " w,w, Ut"))]
+ "TARGET_NEON"
+{
+ switch (which_alternative)
+ {
+ case 0: return "#";
+ case 1: case 2: return output_move_neon (operands);
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "length" "<V_slen>,<V_slen>,<V_slen>")])
+
+(define_split
+ [(set (match_operand:EI 0 "s_register_operand" "")
+ (match_operand:EI 1 "s_register_operand" ""))]
+ "TARGET_NEON && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[2], src[2];
+
+ dest[0] = gen_rtx_REG (TImode, rdest);
+ src[0] = gen_rtx_REG (TImode, rsrc);
+ dest[1] = gen_rtx_REG (DImode, rdest + 4);
+ src[1] = gen_rtx_REG (DImode, rsrc + 4);
+
+ neon_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_split
+ [(set (match_operand:OI 0 "s_register_operand" "")
+ (match_operand:OI 1 "s_register_operand" ""))]
+ "TARGET_NEON && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[2], src[2];
+
+ dest[0] = gen_rtx_REG (TImode, rdest);
+ src[0] = gen_rtx_REG (TImode, rsrc);
+ dest[1] = gen_rtx_REG (TImode, rdest + 4);
+ src[1] = gen_rtx_REG (TImode, rsrc + 4);
+
+ neon_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_split
+ [(set (match_operand:CI 0 "s_register_operand" "")
+ (match_operand:CI 1 "s_register_operand" ""))]
+ "TARGET_NEON && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[3], src[3];
+
+ dest[0] = gen_rtx_REG (TImode, rdest);
+ src[0] = gen_rtx_REG (TImode, rsrc);
+ dest[1] = gen_rtx_REG (TImode, rdest + 4);
+ src[1] = gen_rtx_REG (TImode, rsrc + 4);
+ dest[2] = gen_rtx_REG (TImode, rdest + 8);
+ src[2] = gen_rtx_REG (TImode, rsrc + 8);
+
+ neon_disambiguate_copy (operands, dest, src, 3);
+})
+
+(define_split
+ [(set (match_operand:XI 0 "s_register_operand" "")
+ (match_operand:XI 1 "s_register_operand" ""))]
+ "TARGET_NEON && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[4], src[4];
+
+ dest[0] = gen_rtx_REG (TImode, rdest);
+ src[0] = gen_rtx_REG (TImode, rsrc);
+ dest[1] = gen_rtx_REG (TImode, rdest + 4);
+ src[1] = gen_rtx_REG (TImode, rsrc + 4);
+ dest[2] = gen_rtx_REG (TImode, rdest + 8);
+ src[2] = gen_rtx_REG (TImode, rsrc + 8);
+ dest[3] = gen_rtx_REG (TImode, rdest + 12);
+ src[3] = gen_rtx_REG (TImode, rsrc + 12);
+
+ neon_disambiguate_copy (operands, dest, src, 4);
+})
+
+; FIXME: Set/extract/init quads.
+
+(define_insn "vec_set<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "+w")
+ (vec_merge:VD
+ (match_operand:VD 3 "s_register_operand" "0")
+ (vec_duplicate:VD
+ (match_operand:<V_elem> 1 "s_register_operand" "r"))
+ (ashift:SI (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "i"))))]
+ "TARGET_NEON"
+ "vmov%?.<V_uf_sclr>\t%P0[%c2], %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_mcr")]
+)
+
+(define_insn "vec_set<mode>"
+ [(set (match_operand:VQ 0 "s_register_operand" "+w")
+ (vec_merge:VQ
+ (match_operand:VQ 3 "s_register_operand" "0")
+ (vec_duplicate:VQ
+ (match_operand:<V_elem> 1 "s_register_operand" "r"))
+ (ashift:SI (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "i"))))]
+ "TARGET_NEON"
+{
+ int half_elts = GET_MODE_NUNITS (<MODE>mode) / 2;
+ int elt = INTVAL (operands[2]) % half_elts;
+ int hi = (INTVAL (operands[2]) / half_elts) * 2;
+ int regno = REGNO (operands[0]);
+
+ operands[0] = gen_rtx_REG (<V_HALF>mode, regno + hi);
+ operands[2] = GEN_INT (elt);
+
+ return "vmov%?.<V_uf_sclr>\t%P0[%c2], %1";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_mcr")]
+)
+
+(define_insn "vec_setv2di"
+ [(set (match_operand:V2DI 0 "s_register_operand" "+w")
+ (vec_merge:V2DI
+ (match_operand:V2DI 3 "s_register_operand" "0")
+ (vec_duplicate:V2DI
+ (match_operand:DI 1 "s_register_operand" "r"))
+ (ashift:SI (const_int 1)
+ (match_operand:SI 2 "immediate_operand" "i"))))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[0]) + INTVAL (operands[2]);
+
+ operands[0] = gen_rtx_REG (DImode, regno);
+
+ return "vmov%?.64\t%P0, %Q1, %R1";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_mcr_2_mcrr")]
+)
+
+(define_insn "vec_extract<mode>"
+ [(set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (vec_select:<V_elem>
+ (match_operand:VD 1 "s_register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+ "TARGET_NEON"
+ "vmov%?.<V_uf_sclr>\t%0, %P1[%c2]"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "vec_extract<mode>"
+ [(set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (vec_select:<V_elem>
+ (match_operand:VQ 1 "s_register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+ "TARGET_NEON"
+{
+ int half_elts = GET_MODE_NUNITS (<MODE>mode) / 2;
+ int elt = INTVAL (operands[2]) % half_elts;
+ int hi = (INTVAL (operands[2]) / half_elts) * 2;
+ int regno = REGNO (operands[1]);
+
+ operands[1] = gen_rtx_REG (<V_HALF>mode, regno + hi);
+ operands[2] = GEN_INT (elt);
+
+ return "vmov%?.<V_uf_sclr>\t%0, %P1[%c2]";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "vec_extractv2di"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (vec_select:DI
+ (match_operand:V2DI 1 "s_register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[1]) + INTVAL (operands[2]);
+
+ operands[1] = gen_rtx_REG (DImode, regno);
+
+ return "vmov%?.64\t%Q0, %R0, %P1";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_int_1")]
+)
+
+(define_expand "vec_init<mode>"
+ [(match_operand:VDQ 0 "s_register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_NEON"
+{
+ neon_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+;; Doubleword and quadword arithmetic.
+
+;; NOTE: vadd/vsub and some other instructions also support 64-bit integer
+;; element size, which we could potentially use for "long long" operations. We
+;; don't want to do this at present though, because moving values from the
+;; vector unit to the ARM core is currently slow and 64-bit addition (etc.) is
+;; easy to do with ARM instructions anyway.
+
+(define_insn "*add<mode>3_neon"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (plus:VDQ (match_operand:VDQ 1 "s_register_operand" "w")
+ (match_operand:VDQ 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_1")))]
+)
+
+(define_insn "*sub<mode>3_neon"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (minus:VDQ (match_operand:VDQ 1 "s_register_operand" "w")
+ (match_operand:VDQ 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vsub.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_2")))]
+)
+
+(define_insn "*mul<mode>3_neon"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (mult:VDQ (match_operand:VDQ 1 "s_register_operand" "w")
+ (match_operand:VDQ 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vmul.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (if_then_else
+ (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mul_qqq_8_16_32_ddd_32"))
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_qqq_8_16_32_ddd_32")
+ (const_string "neon_mul_qqq_8_16_32_ddd_32")))))]
+)
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w,w")
+ (ior:VDQ (match_operand:VDQ 1 "s_register_operand" "w,0")
+ (match_operand:VDQ 2 "neon_logic_op2" "w,Dl")))]
+ "TARGET_NEON"
+{
+ switch (which_alternative)
+ {
+ case 0: return "vorr\t%<V_reg>0, %<V_reg>1, %<V_reg>2";
+ case 1: return neon_output_logic_immediate ("vorr", &operands[2],
+ <MODE>mode, 0, VALID_NEON_QREG_MODE (<MODE>mode));
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "iordi3_neon"
+ [(set (match_operand:DI 0 "s_register_operand" "=w,w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w,0")
+ (match_operand:DI 2 "neon_logic_op2" "w,Dl")]
+ UNSPEC_VORR))]
+ "TARGET_NEON"
+{
+ switch (which_alternative)
+ {
+ case 0: return "vorr\t%P0, %P1, %P2";
+ case 1: return neon_output_logic_immediate ("vorr", &operands[2],
+ DImode, 0, VALID_NEON_QREG_MODE (DImode));
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+;; The concrete forms of the Neon immediate-logic instructions are vbic and
+;; vorr. We support the pseudo-instruction vand instead, because that
+;; corresponds to the canonical form the middle-end expects to use for
+;; immediate bitwise-ANDs.
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w,w")
+ (and:VDQ (match_operand:VDQ 1 "s_register_operand" "w,0")
+ (match_operand:VDQ 2 "neon_inv_logic_op2" "w,DL")))]
+ "TARGET_NEON"
+{
+ switch (which_alternative)
+ {
+ case 0: return "vand\t%<V_reg>0, %<V_reg>1, %<V_reg>2";
+ case 1: return neon_output_logic_immediate ("vand", &operands[2],
+ <MODE>mode, 1, VALID_NEON_QREG_MODE (<MODE>mode));
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "anddi3_neon"
+ [(set (match_operand:DI 0 "s_register_operand" "=w,w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w,0")
+ (match_operand:DI 2 "neon_inv_logic_op2" "w,DL")]
+ UNSPEC_VAND))]
+ "TARGET_NEON"
+{
+ switch (which_alternative)
+ {
+ case 0: return "vand\t%P0, %P1, %P2";
+ case 1: return neon_output_logic_immediate ("vand", &operands[2],
+ DImode, 1, VALID_NEON_QREG_MODE (DImode));
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "orn<mode>3_neon"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (ior:VDQ (match_operand:VDQ 1 "s_register_operand" "w")
+ (not:VDQ (match_operand:VDQ 2 "s_register_operand" "w"))))]
+ "TARGET_NEON"
+ "vorn\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "orndi3_neon"
+ [(set (match_operand:DI 0 "s_register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
+ (match_operand:DI 2 "s_register_operand" "w")]
+ UNSPEC_VORN))]
+ "TARGET_NEON"
+ "vorn\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "bic<mode>3_neon"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (and:VDQ (match_operand:VDQ 1 "s_register_operand" "w")
+ (not:VDQ (match_operand:VDQ 2 "s_register_operand" "w"))))]
+ "TARGET_NEON"
+ "vbic\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "bicdi3_neon"
+ [(set (match_operand:DI 0 "s_register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
+ (match_operand:DI 2 "s_register_operand" "w")]
+ UNSPEC_VBIC))]
+ "TARGET_NEON"
+ "vbic\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (xor:VDQ (match_operand:VDQ 1 "s_register_operand" "w")
+ (match_operand:VDQ 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "veor\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "xordi3_neon"
+ [(set (match_operand:DI 0 "s_register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
+ (match_operand:DI 2 "s_register_operand" "w")]
+ UNSPEC_VEOR))]
+ "TARGET_NEON"
+ "veor\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (not:VDQ (match_operand:VDQ 1 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vmvn\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (abs:VDQW (match_operand:VDQW 1 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vabs.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_3")))]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (neg:VDQW (match_operand:VDQW 1 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vneg.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_3")))]
+)
+
+(define_insn "*umin<mode>3_neon"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (umin:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vmin.<V_u_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "*umax<mode>3_neon"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (umax:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vmax.<V_u_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "*smin<mode>3_neon"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (smin:VDQW (match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vmin.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "*smax<mode>3_neon"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (smax:VDQW (match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vmax.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_int_5")))]
+)
+
+; TODO: V2DI shifts are current disabled because there are bugs in the
+; generic vectorizer code. It ends up creating a V2DI constructor with
+; SImode elements.
+
+(define_insn "ashl<mode>3"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (ashift:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vshl.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_vshl_ddd")
+ (const_string "neon_shift_3")))]
+)
+
+; Used for implementing logical shift-right, which is a left-shift by a negative
+; amount, with signed operands. This is essentially the same as ashl<mode>3
+; above, but using an unspec in case GCC tries anything tricky with negative
+; shift amounts.
+
+(define_insn "ashl<mode>3_signed"
+ [(set (match_operand:VDQI 0 "s_register_operand" "=w")
+ (unspec:VDQI [(match_operand:VDQI 1 "s_register_operand" "w")
+ (match_operand:VDQI 2 "s_register_operand" "w")]
+ UNSPEC_ASHIFT_SIGNED))]
+ "TARGET_NEON"
+ "vshl.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_vshl_ddd")
+ (const_string "neon_shift_3")))]
+)
+
+; Used for implementing logical shift-right, which is a left-shift by a negative
+; amount, with unsigned operands.
+
+(define_insn "ashl<mode>3_unsigned"
+ [(set (match_operand:VDQI 0 "s_register_operand" "=w")
+ (unspec:VDQI [(match_operand:VDQI 1 "s_register_operand" "w")
+ (match_operand:VDQI 2 "s_register_operand" "w")]
+ UNSPEC_ASHIFT_UNSIGNED))]
+ "TARGET_NEON"
+ "vshl.<V_u_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_vshl_ddd")
+ (const_string "neon_shift_3")))]
+)
+
+(define_expand "ashr<mode>3"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "")
+ (ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand" "")
+ (match_operand:VDQIW 2 "s_register_operand" "")))]
+ "TARGET_NEON"
+{
+ rtx neg = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_ashl<mode>3_signed (operands[0], operands[1], neg));
+
+ DONE;
+})
+
+(define_expand "lshr<mode>3"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "")
+ (lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand" "")
+ (match_operand:VDQIW 2 "s_register_operand" "")))]
+ "TARGET_NEON"
+{
+ rtx neg = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_ashl<mode>3_unsigned (operands[0], operands[1], neg));
+
+ DONE;
+})
+
+;; Widening operations
+
+;; FIXME: I'm not sure if sign/zero_extend are legal to use on vector modes.
+
+(define_insn "widen_ssum<mode>3"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (plus:<V_widen> (sign_extend:<V_widen>
+ (match_operand:VW 1 "s_register_operand" "%w"))
+ (match_operand:<V_widen> 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vaddw.<V_s_elem>\t%q0, %q2, %P1"
+ [(set_attr "neon_type" "neon_int_3")]
+)
+
+(define_insn "widen_usum<mode>3"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (plus:<V_widen> (zero_extend:<V_widen>
+ (match_operand:VW 1 "s_register_operand" "%w"))
+ (match_operand:<V_widen> 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vaddw.<V_u_elem>\t%q0, %q2, %P1"
+ [(set_attr "neon_type" "neon_int_3")]
+)
+
+;; VEXT can be used to synthesize coarse whole-vector shifts with 8-bit
+;; shift-count granularity. That's good enough for the middle-end's current
+;; needs.
+
+(define_expand "vec_shr_<mode>"
+ [(match_operand:VDQ 0 "s_register_operand" "")
+ (match_operand:VDQ 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_multiple_of_8_operand" "")]
+ "TARGET_NEON"
+{
+ rtx zero_reg;
+ HOST_WIDE_INT num_bits = INTVAL (operands[2]);
+ const int width = GET_MODE_BITSIZE (<MODE>mode);
+ const enum machine_mode bvecmode = (width == 128) ? V16QImode : V8QImode;
+ rtx (*gen_ext) (rtx, rtx, rtx, rtx) =
+ (width == 128) ? gen_neon_vextv16qi : gen_neon_vextv8qi;
+
+ if (num_bits == width)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+
+ zero_reg = force_reg (bvecmode, CONST0_RTX (bvecmode));
+ operands[0] = gen_lowpart (bvecmode, operands[0]);
+ operands[1] = gen_lowpart (bvecmode, operands[1]);
+
+ emit_insn (gen_ext (operands[0], operands[1], zero_reg,
+ GEN_INT (num_bits / BITS_PER_UNIT)));
+ DONE;
+})
+
+(define_expand "vec_shl_<mode>"
+ [(match_operand:VDQ 0 "s_register_operand" "")
+ (match_operand:VDQ 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_multiple_of_8_operand" "")]
+ "TARGET_NEON"
+{
+ rtx zero_reg;
+ HOST_WIDE_INT num_bits = INTVAL (operands[2]);
+ const int width = GET_MODE_BITSIZE (<MODE>mode);
+ const enum machine_mode bvecmode = (width == 128) ? V16QImode : V8QImode;
+ rtx (*gen_ext) (rtx, rtx, rtx, rtx) =
+ (width == 128) ? gen_neon_vextv16qi : gen_neon_vextv8qi;
+
+ if (num_bits == 0)
+ {
+ emit_move_insn (operands[0], CONST0_RTX (<MODE>mode));
+ DONE;
+ }
+
+ num_bits = width - num_bits;
+
+ zero_reg = force_reg (bvecmode, CONST0_RTX (bvecmode));
+ operands[0] = gen_lowpart (bvecmode, operands[0]);
+ operands[1] = gen_lowpart (bvecmode, operands[1]);
+
+ emit_insn (gen_ext (operands[0], zero_reg, operands[1],
+ GEN_INT (num_bits / BITS_PER_UNIT)));
+ DONE;
+})
+
+;; Helpers for quad-word reduction operations
+
+; Add (or smin, smax...) the low N/2 elements of the N-element vector
+; operand[1] to the high N/2 elements of same. Put the result in operand[0], an
+; N/2-element vector.
+
+(define_insn "quad_halves_<code>v4si"
+ [(set (match_operand:V2SI 0 "s_register_operand" "=w")
+ (vqh_ops:V2SI
+ (vec_select:V2SI (match_operand:V4SI 1 "s_register_operand" "w")
+ (parallel [(const_int 0) (const_int 1)]))
+ (vec_select:V2SI (match_dup 1)
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_NEON"
+ "<VQH_mnem>.<VQH_sign>32\t%P0, %e1, %f1"
+ [(set_attr "vqh_mnem" "<VQH_mnem>")
+ (set (attr "neon_type")
+ (if_then_else (eq_attr "vqh_mnem" "vadd")
+ (const_string "neon_int_1") (const_string "neon_int_5")))]
+)
+
+(define_insn "quad_halves_<code>v4sf"
+ [(set (match_operand:V2SF 0 "s_register_operand" "=w")
+ (vqhs_ops:V2SF
+ (vec_select:V2SF (match_operand:V4SF 1 "s_register_operand" "w")
+ (parallel [(const_int 0) (const_int 1)]))
+ (vec_select:V2SF (match_dup 1)
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_NEON"
+ "<VQH_mnem>.f32\t%P0, %e1, %f1"
+ [(set_attr "vqh_mnem" "<VQH_mnem>")
+ (set (attr "neon_type")
+ (if_then_else (eq_attr "vqh_mnem" "vadd")
+ (const_string "neon_int_1") (const_string "neon_int_5")))]
+)
+
+(define_insn "quad_halves_<code>v8hi"
+ [(set (match_operand:V4HI 0 "s_register_operand" "+w")
+ (vqh_ops:V4HI
+ (vec_select:V4HI (match_operand:V8HI 1 "s_register_operand" "w")
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)]))
+ (vec_select:V4HI (match_dup 1)
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)]))))]
+ "TARGET_NEON"
+ "<VQH_mnem>.<VQH_sign>16\t%P0, %e1, %f1"
+ [(set_attr "vqh_mnem" "<VQH_mnem>")
+ (set (attr "neon_type")
+ (if_then_else (eq_attr "vqh_mnem" "vadd")
+ (const_string "neon_int_1") (const_string "neon_int_5")))]
+)
+
+(define_insn "quad_halves_<code>v16qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "+w")
+ (vqh_ops:V8QI
+ (vec_select:V8QI (match_operand:V16QI 1 "s_register_operand" "w")
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)]))
+ (vec_select:V8QI (match_dup 1)
+ (parallel [(const_int 8) (const_int 9)
+ (const_int 10) (const_int 11)
+ (const_int 12) (const_int 13)
+ (const_int 14) (const_int 15)]))))]
+ "TARGET_NEON"
+ "<VQH_mnem>.<VQH_sign>8\t%P0, %e1, %f1"
+ [(set_attr "vqh_mnem" "<VQH_mnem>")
+ (set (attr "neon_type")
+ (if_then_else (eq_attr "vqh_mnem" "vadd")
+ (const_string "neon_int_1") (const_string "neon_int_5")))]
+)
+
+; FIXME: We wouldn't need the following insns if we could write subregs of
+; vector registers. Make an attempt at removing unnecessary moves, though
+; we're really at the mercy of the register allocator.
+
+(define_insn "move_lo_quad_v4si"
+ [(set (match_operand:V4SI 0 "s_register_operand" "+w")
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "s_register_operand" "w")
+ (vec_select:V2SI (match_dup 0)
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src = REGNO (operands[1]);
+
+ if (dest != src)
+ return "vmov\t%e0, %P1";
+ else
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "move_lo_quad_v4sf"
+ [(set (match_operand:V4SF 0 "s_register_operand" "+w")
+ (vec_concat:V4SF
+ (match_operand:V2SF 1 "s_register_operand" "w")
+ (vec_select:V2SF (match_dup 0)
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src = REGNO (operands[1]);
+
+ if (dest != src)
+ return "vmov\t%e0, %P1";
+ else
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "move_lo_quad_v8hi"
+ [(set (match_operand:V8HI 0 "s_register_operand" "+w")
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "s_register_operand" "w")
+ (vec_select:V4HI (match_dup 0)
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)]))))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src = REGNO (operands[1]);
+
+ if (dest != src)
+ return "vmov\t%e0, %P1";
+ else
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "move_lo_quad_v16qi"
+ [(set (match_operand:V16QI 0 "s_register_operand" "+w")
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "s_register_operand" "w")
+ (vec_select:V8QI (match_dup 0)
+ (parallel [(const_int 8) (const_int 9)
+ (const_int 10) (const_int 11)
+ (const_int 12) (const_int 13)
+ (const_int 14) (const_int 15)]))))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src = REGNO (operands[1]);
+
+ if (dest != src)
+ return "vmov\t%e0, %P1";
+ else
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+;; Reduction operations
+
+(define_expand "reduc_splus_<mode>"
+ [(match_operand:VD 0 "s_register_operand" "")
+ (match_operand:VD 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_pairwise_reduce (operands[0], operands[1], <MODE>mode,
+ &gen_neon_vpadd_internal<mode>);
+ DONE;
+})
+
+(define_expand "reduc_splus_<mode>"
+ [(match_operand:VQ 0 "s_register_operand" "")
+ (match_operand:VQ 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx step1 = gen_reg_rtx (<V_HALF>mode);
+ rtx res_d = gen_reg_rtx (<V_HALF>mode);
+
+ emit_insn (gen_quad_halves_plus<mode> (step1, operands[1]));
+ emit_insn (gen_reduc_splus_<V_half> (res_d, step1));
+ emit_insn (gen_move_lo_quad_<mode> (operands[0], res_d));
+
+ DONE;
+})
+
+(define_insn "reduc_splus_v2di"
+ [(set (match_operand:V2DI 0 "s_register_operand" "=w")
+ (unspec:V2DI [(match_operand:V2DI 1 "s_register_operand" "w")]
+ UNSPEC_VPADD))]
+ "TARGET_NEON"
+ "vadd.i64\t%e0, %e1, %f1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+;; NEON does not distinguish between signed and unsigned addition except on
+;; widening operations.
+(define_expand "reduc_uplus_<mode>"
+ [(match_operand:VDQI 0 "s_register_operand" "")
+ (match_operand:VDQI 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_reduc_splus_<mode> (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "reduc_smin_<mode>"
+ [(match_operand:VD 0 "s_register_operand" "")
+ (match_operand:VD 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_pairwise_reduce (operands[0], operands[1], <MODE>mode,
+ &gen_neon_vpsmin<mode>);
+ DONE;
+})
+
+(define_expand "reduc_smin_<mode>"
+ [(match_operand:VQ 0 "s_register_operand" "")
+ (match_operand:VQ 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx step1 = gen_reg_rtx (<V_HALF>mode);
+ rtx res_d = gen_reg_rtx (<V_HALF>mode);
+
+ emit_insn (gen_quad_halves_smin<mode> (step1, operands[1]));
+ emit_insn (gen_reduc_smin_<V_half> (res_d, step1));
+ emit_insn (gen_move_lo_quad_<mode> (operands[0], res_d));
+
+ DONE;
+})
+
+(define_expand "reduc_smax_<mode>"
+ [(match_operand:VD 0 "s_register_operand" "")
+ (match_operand:VD 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_pairwise_reduce (operands[0], operands[1], <MODE>mode,
+ &gen_neon_vpsmax<mode>);
+ DONE;
+})
+
+(define_expand "reduc_smax_<mode>"
+ [(match_operand:VQ 0 "s_register_operand" "")
+ (match_operand:VQ 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx step1 = gen_reg_rtx (<V_HALF>mode);
+ rtx res_d = gen_reg_rtx (<V_HALF>mode);
+
+ emit_insn (gen_quad_halves_smax<mode> (step1, operands[1]));
+ emit_insn (gen_reduc_smax_<V_half> (res_d, step1));
+ emit_insn (gen_move_lo_quad_<mode> (operands[0], res_d));
+
+ DONE;
+})
+
+(define_expand "reduc_umin_<mode>"
+ [(match_operand:VDI 0 "s_register_operand" "")
+ (match_operand:VDI 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_pairwise_reduce (operands[0], operands[1], <MODE>mode,
+ &gen_neon_vpumin<mode>);
+ DONE;
+})
+
+(define_expand "reduc_umin_<mode>"
+ [(match_operand:VQI 0 "s_register_operand" "")
+ (match_operand:VQI 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx step1 = gen_reg_rtx (<V_HALF>mode);
+ rtx res_d = gen_reg_rtx (<V_HALF>mode);
+
+ emit_insn (gen_quad_halves_umin<mode> (step1, operands[1]));
+ emit_insn (gen_reduc_umin_<V_half> (res_d, step1));
+ emit_insn (gen_move_lo_quad_<mode> (operands[0], res_d));
+
+ DONE;
+})
+
+(define_expand "reduc_umax_<mode>"
+ [(match_operand:VDI 0 "s_register_operand" "")
+ (match_operand:VDI 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_pairwise_reduce (operands[0], operands[1], <MODE>mode,
+ &gen_neon_vpumax<mode>);
+ DONE;
+})
+
+(define_expand "reduc_umax_<mode>"
+ [(match_operand:VQI 0 "s_register_operand" "")
+ (match_operand:VQI 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ rtx step1 = gen_reg_rtx (<V_HALF>mode);
+ rtx res_d = gen_reg_rtx (<V_HALF>mode);
+
+ emit_insn (gen_quad_halves_umax<mode> (step1, operands[1]));
+ emit_insn (gen_reduc_umax_<V_half> (res_d, step1));
+ emit_insn (gen_move_lo_quad_<mode> (operands[0], res_d));
+
+ DONE;
+})
+
+(define_insn "neon_vpadd_internal<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")]
+ UNSPEC_VPADD))]
+ "TARGET_NEON"
+ "vpadd.<V_if_elem>\t%P0, %P1, %P2"
+ ;; Assume this schedules like vadd.
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_1")))]
+)
+
+(define_insn "neon_vpsmin<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")]
+ UNSPEC_VPSMIN))]
+ "TARGET_NEON"
+ "vpmin.<V_s_elem>\t%P0, %P1, %P2"
+ ;; Assume this schedules like vmin.
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vpsmax<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")]
+ UNSPEC_VPSMAX))]
+ "TARGET_NEON"
+ "vpmax.<V_s_elem>\t%P0, %P1, %P2"
+ ;; Assume this schedules like vmax.
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vpumin<mode>"
+ [(set (match_operand:VDI 0 "s_register_operand" "=w")
+ (unspec:VDI [(match_operand:VDI 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ UNSPEC_VPUMIN))]
+ "TARGET_NEON"
+ "vpmin.<V_u_elem>\t%P0, %P1, %P2"
+ ;; Assume this schedules like umin.
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "neon_vpumax<mode>"
+ [(set (match_operand:VDI 0 "s_register_operand" "=w")
+ (unspec:VDI [(match_operand:VDI 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")]
+ UNSPEC_VPUMAX))]
+ "TARGET_NEON"
+ "vpmax.<V_u_elem>\t%P0, %P1, %P2"
+ ;; Assume this schedules like umax.
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+;; Saturating arithmetic
+
+; NOTE: Neon supports many more saturating variants of instructions than the
+; following, but these are all GCC currently understands.
+; FIXME: Actually, GCC doesn't know how to create saturating add/sub by itself
+; yet either, although these patterns may be used by intrinsics when they're
+; added.
+
+(define_insn "*ss_add<mode>_neon"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (ss_plus:VD (match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vqadd.<V_s_elem>\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "*us_add<mode>_neon"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (us_plus:VD (match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vqadd.<V_u_elem>\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "*ss_sub<mode>_neon"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (ss_minus:VD (match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vqsub.<V_s_elem>\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "*us_sub<mode>_neon"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (us_minus:VD (match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")))]
+ "TARGET_NEON"
+ "vqsub.<V_u_elem>\t%P0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+; FIXME: These instructions aren't supported in GCC 4.1, but are documented
+; for the current trunk. Uncomment when this code is merged to a GCC version
+; which supports them.
+
+;(define_insn "*ss_neg<mode>_neon"
+; [(set (match_operand:VD 0 "s_register_operand" "=w")
+; (ss_neg:VD 1 (match_operand:VD 1 "s_register_operand" "w")))]
+; "TARGET_NEON"
+; "vqneg.<V_s_elem>\t%P0, %P1")
+
+;(define_insn "*ss_ashift<mode>_neon"
+; [(set (match_operand:VD 0 "s_register_operand" "=w")
+; (ss_ashift:VD (match_operand:VD 1 "s_register_operand" "w")
+; (match_operand:VD 2 "s_register_operand" "w")))]
+; "TARGET_NEON"
+; "vqshl.<V_s_elem>\t%P0, %P1, %P2")
+
+;; Patterns for builtins.
+
+; good for plain vadd, vaddq.
+
+(define_insn "neon_vadd<mode>"
+ [(set (match_operand:VDQX 0 "s_register_operand" "=w")
+ (unspec:VDQX [(match_operand:VDQX 1 "s_register_operand" "w")
+ (match_operand:VDQX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VADD))]
+ "TARGET_NEON"
+ "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_1")))]
+)
+
+; operand 3 represents in bits:
+; bit 0: signed (vs unsigned).
+; bit 1: rounding (vs none).
+
+(define_insn "neon_vaddl<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VDI 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VADDL))]
+ "TARGET_NEON"
+ "vaddl.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_3")]
+)
+
+(define_insn "neon_vaddw<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VADDW))]
+ "TARGET_NEON"
+ "vaddw.%T3%#<V_sz_elem>\t%q0, %q1, %P2"
+ [(set_attr "neon_type" "neon_int_2")]
+)
+
+; vhadd and vrhadd.
+
+(define_insn "neon_vhadd<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VHADD))]
+ "TARGET_NEON"
+ "v%O3hadd.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "neon_vqadd<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQADD))]
+ "TARGET_NEON"
+ "vqadd.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "neon_vaddhn<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:VN 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VADDHN))]
+ "TARGET_NEON"
+ "v%O3addhn.<V_if_elem>\t%P0, %q1, %q2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "neon_vmul<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VMUL))]
+ "TARGET_NEON"
+ "vmul.%F3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (if_then_else
+ (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mul_qqq_8_16_32_ddd_32"))
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_qqq_8_16_32_ddd_32")
+ (const_string "neon_mul_qqq_8_16_32_ddd_32")))))]
+)
+
+(define_insn "neon_vmla<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "0")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:VDQW 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMLA))]
+ "TARGET_NEON"
+ "vmla.<V_if_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vmla_ddd")
+ (const_string "neon_fp_vmla_qqq"))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (if_then_else
+ (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_qqq_8_16")
+ (const_string "neon_mla_qqq_32_qqd_32_scalar")))))]
+)
+
+(define_insn "neon_vmlal<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VW 2 "s_register_operand" "w")
+ (match_operand:VW 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMLAL))]
+ "TARGET_NEON"
+ "vmlal.%T4%#<V_sz_elem>\t%q0, %P2, %P3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vmls<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "0")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:VDQW 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMLS))]
+ "TARGET_NEON"
+ "vmls.<V_if_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vmla_ddd")
+ (const_string "neon_fp_vmla_qqq"))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (if_then_else
+ (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))
+ (if_then_else
+ (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_qqq_8_16")
+ (const_string "neon_mla_qqq_32_qqd_32_scalar")))))]
+)
+
+(define_insn "neon_vmlsl<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VW 2 "s_register_operand" "w")
+ (match_operand:VW 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMLSL))]
+ "TARGET_NEON"
+ "vmlsl.%T4%#<V_sz_elem>\t%q0, %P2, %P3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vqdmulh<mode>"
+ [(set (match_operand:VMDQI 0 "s_register_operand" "=w")
+ (unspec:VMDQI [(match_operand:VMDQI 1 "s_register_operand" "w")
+ (match_operand:VMDQI 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQDMULH))]
+ "TARGET_NEON"
+ "vq%O3dmulh.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mul_qqq_8_16_32_ddd_32"))
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_qqq_8_16_32_ddd_32")
+ (const_string "neon_mul_qqq_8_16_32_ddd_32"))))]
+)
+
+(define_insn "neon_vqdmlal<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:VMDI 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VQDMLAL))]
+ "TARGET_NEON"
+ "vqdmlal.<V_s_elem>\t%q0, %P2, %P3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vqdmlsl<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:VMDI 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VQDMLSL))]
+ "TARGET_NEON"
+ "vqdmlsl.<V_s_elem>\t%q0, %P2, %P3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vmull<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
+ (match_operand:VW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VMULL))]
+ "TARGET_NEON"
+ "vmull.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")))]
+)
+
+(define_insn "neon_vqdmull<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VMDI 1 "s_register_operand" "w")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQDMULL))]
+ "TARGET_NEON"
+ "vqdmull.<V_s_elem>\t%q0, %P1, %P2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")))]
+)
+
+(define_insn "neon_vsub<mode>"
+ [(set (match_operand:VDQX 0 "s_register_operand" "=w")
+ (unspec:VDQX [(match_operand:VDQX 1 "s_register_operand" "w")
+ (match_operand:VDQX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSUB))]
+ "TARGET_NEON"
+ "vsub.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_2")))]
+)
+
+(define_insn "neon_vsubl<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VDI 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSUBL))]
+ "TARGET_NEON"
+ "vsubl.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_2")]
+)
+
+(define_insn "neon_vsubw<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "w")
+ (match_operand:VDI 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSUBW))]
+ "TARGET_NEON"
+ "vsubw.%T3%#<V_sz_elem>\t%q0, %q1, %P2"
+ [(set_attr "neon_type" "neon_int_2")]
+)
+
+(define_insn "neon_vqsub<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQSUB))]
+ "TARGET_NEON"
+ "vqsub.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "neon_vhsub<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VHSUB))]
+ "TARGET_NEON"
+ "vhsub.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "neon_vsubhn<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:VN 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSUBHN))]
+ "TARGET_NEON"
+ "v%O3subhn.<V_if_elem>\t%P0, %q1, %q2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "neon_vceq<mode>"
+ [(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
+ (unspec:<V_cmp_result> [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCEQ))]
+ "TARGET_NEON"
+ "vceq.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vcge<mode>"
+ [(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
+ (unspec:<V_cmp_result> [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCGE))]
+ "TARGET_NEON"
+ "vcge.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vcgt<mode>"
+ [(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
+ (unspec:<V_cmp_result> [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCGT))]
+ "TARGET_NEON"
+ "vcgt.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vcage<mode>"
+ [(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
+ (unspec:<V_cmp_result> [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCAGE))]
+ "TARGET_NEON"
+ "vacge.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vcagt<mode>"
+ [(set (match_operand:<V_cmp_result> 0 "s_register_operand" "=w")
+ (unspec:<V_cmp_result> [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCAGT))]
+ "TARGET_NEON"
+ "vacgt.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vtst<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:VDQIW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VTST))]
+ "TARGET_NEON"
+ "vtst.<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "neon_type" "neon_int_4")]
+)
+
+(define_insn "neon_vabd<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VABD))]
+ "TARGET_NEON"
+ "vabd.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vabdl<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
+ (match_operand:VW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VABDL))]
+ "TARGET_NEON"
+ "vabdl.%T3%#<V_sz_elem>\t%q0, %P1, %P2"
+ [(set_attr "neon_type" "neon_int_5")]
+)
+
+(define_insn "neon_vaba<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "0")
+ (match_operand:VDQIW 2 "s_register_operand" "w")
+ (match_operand:VDQIW 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VABA))]
+ "TARGET_NEON"
+ "vaba.%T4%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %<V_reg>3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_vaba") (const_string "neon_vaba_qqq")))]
+)
+
+(define_insn "neon_vabal<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VW 2 "s_register_operand" "w")
+ (match_operand:VW 3 "s_register_operand" "w")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VABAL))]
+ "TARGET_NEON"
+ "vabal.%T4%#<V_sz_elem>\t%q0, %P2, %P3"
+ [(set_attr "neon_type" "neon_vaba")]
+)
+
+(define_insn "neon_vmax<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VMAX))]
+ "TARGET_NEON"
+ "vmax.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vmin<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VMIN))]
+ "TARGET_NEON"
+ "vmin.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_int_5")))]
+)
+
+(define_expand "neon_vpadd<mode>"
+ [(match_operand:VD 0 "s_register_operand" "=w")
+ (match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_neon_vpadd_internal<mode> (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_insn "neon_vpaddl<mode>"
+ [(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
+ (unspec:<V_double_width> [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VPADDL))]
+ "TARGET_NEON"
+ "vpaddl.%T2%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
+ ;; Assume this schedules like vaddl.
+ [(set_attr "neon_type" "neon_int_3")]
+)
+
+(define_insn "neon_vpadal<mode>"
+ [(set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
+ (unspec:<V_double_width> [(match_operand:<V_double_width> 1 "s_register_operand" "0")
+ (match_operand:VDQIW 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VPADAL))]
+ "TARGET_NEON"
+ "vpadal.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2"
+ ;; Assume this schedules like vpadd.
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "neon_vpmax<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VPMAX))]
+ "TARGET_NEON"
+ "vpmax.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ ;; Assume this schedules like vmax.
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vpmin<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:VD 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VPMIN))]
+ "TARGET_NEON"
+ "vpmin.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ ;; Assume this schedules like vmin.
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_int_5")))]
+)
+
+(define_insn "neon_vrecps<mode>"
+ [(set (match_operand:VCVTF 0 "s_register_operand" "=w")
+ (unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VRECPS))]
+ "TARGET_NEON"
+ "vrecps.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vrecps_vrsqrts_ddd")
+ (const_string "neon_fp_vrecps_vrsqrts_qqq")))]
+)
+
+(define_insn "neon_vrsqrts<mode>"
+ [(set (match_operand:VCVTF 0 "s_register_operand" "=w")
+ (unspec:VCVTF [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:VCVTF 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VRSQRTS))]
+ "TARGET_NEON"
+ "vrsqrts.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vrecps_vrsqrts_ddd")
+ (const_string "neon_fp_vrecps_vrsqrts_qqq")))]
+)
+
+(define_insn "neon_vabs<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VABS))]
+ "TARGET_NEON"
+ "vabs.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ior (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (ne (symbol_ref "<Is_float_mode>") (const_int 0)))
+ (if_then_else
+ (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq"))
+ (const_string "neon_vqneg_vqabs")))]
+)
+
+(define_insn "neon_vqabs<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VQABS))]
+ "TARGET_NEON"
+ "vqabs.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_vqneg_vqabs")]
+)
+
+(define_expand "neon_vneg<mode>"
+ [(match_operand:VDQW 0 "s_register_operand" "")
+ (match_operand:VDQW 1 "s_register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_neg<mode>2 (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "neon_vqneg<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VQNEG))]
+ "TARGET_NEON"
+ "vqneg.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_vqneg_vqabs")]
+)
+
+(define_insn "neon_vcls<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VCLS))]
+ "TARGET_NEON"
+ "vcls.<V_s_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "neon_vclz<mode>"
+ [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
+ (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VCLZ))]
+ "TARGET_NEON"
+ "vclz.<V_if_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "neon_vcnt<mode>"
+ [(set (match_operand:VE 0 "s_register_operand" "=w")
+ (unspec:VE [(match_operand:VE 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VCNT))]
+ "TARGET_NEON"
+ "vcnt.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_insn "neon_vrecpe<mode>"
+ [(set (match_operand:V32 0 "s_register_operand" "=w")
+ (unspec:V32 [(match_operand:V32 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VRECPE))]
+ "TARGET_NEON"
+ "vrecpe.<V_u_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vrsqrte<mode>"
+ [(set (match_operand:V32 0 "s_register_operand" "=w")
+ (unspec:V32 [(match_operand:V32 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VRSQRTE))]
+ "TARGET_NEON"
+ "vrsqrte.<V_u_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_expand "neon_vmvn<mode>"
+ [(match_operand:VDQIW 0 "s_register_operand" "")
+ (match_operand:VDQIW 1 "s_register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_one_cmpl<mode>2 (operands[0], operands[1]));
+ DONE;
+})
+
+;; FIXME: 32-bit element sizes are a bit funky (should be output as .32 not
+;; .u32), but the assembler should cope with that.
+
+(define_insn "neon_vget_lane<mode>"
+ [(set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (unspec:<V_elem> [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VGET_LANE))]
+ "TARGET_NEON"
+ "vmov%?.%t3%#<V_sz_elem>\t%0, %P1[%c2]"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+; Operand 2 (lane number) is ignored because we can only extract the zeroth lane
+; with this insn. Operand 3 (info word) is ignored because it does nothing
+; useful with 64-bit elements.
+
+(define_insn "neon_vget_lanedi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VGET_LANE))]
+ "TARGET_NEON"
+ "vmov%?\t%Q0, %R0, %P1 @ di"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vget_lane<mode>"
+ [(set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (unspec:<V_elem> [(match_operand:VQ 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VGET_LANE))]
+ "TARGET_NEON"
+{
+ rtx ops[4];
+ int regno = REGNO (operands[1]);
+ unsigned int halfelts = GET_MODE_NUNITS (<MODE>mode) / 2;
+ unsigned int elt = INTVAL (operands[2]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (<V_HALF>mode, regno + 2 * (elt / halfelts));
+ ops[2] = GEN_INT (elt % halfelts);
+ ops[3] = operands[3];
+ output_asm_insn ("vmov%?.%t3%#<V_sz_elem>\t%0, %P1[%c2]", ops);
+
+ return "";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vget_lanev2di"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (unspec:DI [(match_operand:V2DI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VGET_LANE))]
+ "TARGET_NEON"
+{
+ rtx ops[2];
+ unsigned int regno = REGNO (operands[1]);
+ unsigned int elt = INTVAL (operands[2]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno + 2 * elt);
+ output_asm_insn ("vmov%?\t%Q0, %R0, %P1 @ v2di", ops);
+
+ return "";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vset_lane<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:<V_elem> 1 "s_register_operand" "r")
+ (match_operand:VD 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSET_LANE))]
+ "TARGET_NEON"
+ "vmov%?.<V_sz_elem>\t%P0[%c3], %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+; See neon_vget_lanedi comment for reasons operands 2 & 3 are ignored.
+
+(define_insn "neon_vset_lanedi"
+ [(set (match_operand:DI 0 "s_register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "r")
+ (match_operand:DI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSET_LANE))]
+ "TARGET_NEON"
+ "vmov%?\t%P0, %Q1, %R1 @ di"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vset_lane<mode>"
+ [(set (match_operand:VQ 0 "s_register_operand" "=w")
+ (unspec:VQ [(match_operand:<V_elem> 1 "s_register_operand" "r")
+ (match_operand:VQ 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSET_LANE))]
+ "TARGET_NEON"
+{
+ rtx ops[4];
+ unsigned int regno = REGNO (operands[0]);
+ unsigned int halfelts = GET_MODE_NUNITS (<MODE>mode) / 2;
+ unsigned int elt = INTVAL (operands[3]);
+
+ ops[0] = gen_rtx_REG (<V_HALF>mode, regno + 2 * (elt / halfelts));
+ ops[1] = operands[1];
+ ops[2] = GEN_INT (elt % halfelts);
+ output_asm_insn ("vmov%?.<V_sz_elem>\t%P0[%c2], %1", ops);
+
+ return "";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vset_lanev2di"
+ [(set (match_operand:V2DI 0 "s_register_operand" "=w")
+ (unspec:V2DI [(match_operand:DI 1 "s_register_operand" "r")
+ (match_operand:V2DI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSET_LANE))]
+ "TARGET_NEON"
+{
+ rtx ops[2];
+ unsigned int regno = REGNO (operands[0]);
+ unsigned int elt = INTVAL (operands[3]);
+
+ ops[0] = gen_rtx_REG (DImode, regno + 2 * elt);
+ ops[1] = operands[1];
+ output_asm_insn ("vmov%?\t%P0, %Q1, %R1 @ v2di", ops);
+
+ return "";
+}
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_expand "neon_vcreate<mode>"
+ [(match_operand:VDX 0 "s_register_operand" "")
+ (match_operand:DI 1 "general_operand" "")]
+ "TARGET_NEON"
+{
+ rtx src = gen_lowpart (<MODE>mode, operands[1]);
+ emit_move_insn (operands[0], src);
+ DONE;
+})
+
+(define_insn "neon_vdup_n<mode>"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:<V_elem> 1 "s_register_operand" "r")]
+ UNSPEC_VDUP_N))]
+ "TARGET_NEON"
+ "vdup%?.<V_sz_elem>\t%<V_reg>0, %1"
+ ;; Assume this schedules like vmov.
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vdup_ndi"
+ [(set (match_operand:DI 0 "s_register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "r")]
+ UNSPEC_VDUP_N))]
+ "TARGET_NEON"
+ "vmov%?\t%P0, %Q1, %R1"
+ [(set_attr "predicable" "yes")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vdup_nv2di"
+ [(set (match_operand:V2DI 0 "s_register_operand" "=w")
+ (unspec:V2DI [(match_operand:DI 1 "s_register_operand" "r")]
+ UNSPEC_VDUP_N))]
+ "TARGET_NEON"
+ "vmov%?\t%e0, %Q1, %R1\;vmov%?\t%f0, %Q1, %R1"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "8")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vdup_lane<mode>"
+ [(set (match_operand:VD 0 "s_register_operand" "=w")
+ (unspec:VD [(match_operand:VD 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VDUP_LANE))]
+ "TARGET_NEON"
+ "vdup.<V_sz_elem>\t%P0, %P1[%c2]"
+ ;; Assume this schedules like vmov.
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vdup_lane<mode>"
+ [(set (match_operand:VQ 0 "s_register_operand" "=w")
+ (unspec:VQ [(match_operand:<V_HALF> 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VDUP_LANE))]
+ "TARGET_NEON"
+ "vdup.<V_sz_elem>\t%q0, %P1[%c2]"
+ ;; Assume this schedules like vmov.
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+; Scalar index is ignored, since only zero is valid here.
+(define_expand "neon_vdup_lanedi"
+ [(set (match_operand:DI 0 "s_register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VDUP_LANE))]
+ "TARGET_NEON"
+{
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+; Likewise.
+(define_insn "neon_vdup_lanev2di"
+ [(set (match_operand:V2DI 0 "s_register_operand" "=w")
+ (unspec:V2DI [(match_operand:DI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VDUP_LANE))]
+ "TARGET_NEON"
+ "vmov\t%e0, %P1\;vmov\t%f0, %P1"
+ [(set_attr "length" "8")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+;; In this insn, operand 1 should be low, and operand 2 the high part of the
+;; dest vector.
+;; FIXME: A different implementation of this builtin could make it much
+;; more likely that we wouldn't actually need to output anything (we could make
+;; it so that the reg allocator puts things in the right places magically
+;; instead). Lack of subregs for vectors makes that tricky though, I think.
+
+(define_insn "neon_vcombine<mode>"
+ [(set (match_operand:<V_DOUBLE> 0 "s_register_operand" "=w")
+ (unspec:<V_DOUBLE> [(match_operand:VDX 1 "s_register_operand" "w")
+ (match_operand:VDX 2 "s_register_operand" "w")]
+ UNSPEC_VCOMBINE))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src1 = REGNO (operands[1]);
+ int src2 = REGNO (operands[2]);
+ rtx destlo;
+
+ if (src1 == dest && src2 == dest + 2)
+ return "";
+ else if (src2 == dest && src1 == dest + 2)
+ /* Special case of reversed high/low parts. */
+ return "vswp\t%P1, %P2";
+
+ destlo = gen_rtx_REG (<MODE>mode, dest);
+
+ if (!reg_overlap_mentioned_p (operands[2], destlo))
+ {
+ /* Try to avoid unnecessary moves if part of the result is in the right
+ place already. */
+ if (src1 != dest)
+ output_asm_insn ("vmov\t%e0, %P1", operands);
+ if (src2 != dest + 2)
+ output_asm_insn ("vmov\t%f0, %P2", operands);
+ }
+ else
+ {
+ if (src2 != dest + 2)
+ output_asm_insn ("vmov\t%f0, %P2", operands);
+ if (src1 != dest)
+ output_asm_insn ("vmov\t%e0, %P1", operands);
+ }
+
+ return "";
+}
+ ;; We set the neon_type attribute based on the vmov instructions above.
+ [(set_attr "length" "8")
+ (set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vget_high<mode>"
+ [(set (match_operand:<V_HALF> 0 "s_register_operand" "=w")
+ (unspec:<V_HALF> [(match_operand:VQX 1 "s_register_operand" "w")]
+ UNSPEC_VGET_HIGH))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src = REGNO (operands[1]);
+
+ if (dest != src + 2)
+ return "vmov\t%P0, %f1";
+ else
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vget_low<mode>"
+ [(set (match_operand:<V_HALF> 0 "s_register_operand" "=w")
+ (unspec:<V_HALF> [(match_operand:VQX 1 "s_register_operand" "w")]
+ UNSPEC_VGET_LOW))]
+ "TARGET_NEON"
+{
+ int dest = REGNO (operands[0]);
+ int src = REGNO (operands[1]);
+
+ if (dest != src)
+ return "vmov\t%P0, %e1";
+ else
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vcvt<mode>"
+ [(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
+ (unspec:<V_CVTTO> [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VCVT))]
+ "TARGET_NEON"
+ "vcvt.%T2%#32.f32\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vcvt<mode>"
+ [(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
+ (unspec:<V_CVTTO> [(match_operand:VCVTI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VCVT))]
+ "TARGET_NEON"
+ "vcvt.f32.%T2%#32\t%<V_reg>0, %<V_reg>1"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vcvt_n<mode>"
+ [(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
+ (unspec:<V_CVTTO> [(match_operand:VCVTF 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCVT_N))]
+ "TARGET_NEON"
+ "vcvt.%T3%#32.f32\t%<V_reg>0, %<V_reg>1, %2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vcvt_n<mode>"
+ [(set (match_operand:<V_CVTTO> 0 "s_register_operand" "=w")
+ (unspec:<V_CVTTO> [(match_operand:VCVTI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VCVT_N))]
+ "TARGET_NEON"
+ "vcvt.f32.%T3%#32\t%<V_reg>0, %<V_reg>1, %2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_fp_vadd_ddd_vabs_dd")
+ (const_string "neon_fp_vadd_qqq_vabs_qq")))]
+)
+
+(define_insn "neon_vmovn<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VMOVN))]
+ "TARGET_NEON"
+ "vmovn.<V_if_elem>\t%P0, %q1"
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vqmovn<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VQMOVN))]
+ "TARGET_NEON"
+ "vqmovn.%T2%#<V_sz_elem>\t%P0, %q1"
+ [(set_attr "neon_type" "neon_shift_2")]
+)
+
+(define_insn "neon_vqmovun<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VQMOVUN))]
+ "TARGET_NEON"
+ "vqmovun.<V_s_elem>\t%P0, %q1"
+ [(set_attr "neon_type" "neon_shift_2")]
+)
+
+(define_insn "neon_vmovl<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VMOVL))]
+ "TARGET_NEON"
+ "vmovl.%T2%#<V_sz_elem>\t%q0, %P1"
+ [(set_attr "neon_type" "neon_shift_1")]
+)
+
+(define_insn "neon_vmul_lane<mode>"
+ [(set (match_operand:VMD 0 "s_register_operand" "=w")
+ (unspec:VMD [(match_operand:VMD 1 "s_register_operand" "w")
+ (match_operand:VMD 2 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMUL_LANE))]
+ "TARGET_NEON"
+ "vmul.<V_if_elem>\t%P0, %P1, %P2[%c3]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vmul_ddd")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_16_scalar_32_16_long_scalar")
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar"))))]
+)
+
+(define_insn "neon_vmul_lane<mode>"
+ [(set (match_operand:VMQ 0 "s_register_operand" "=w")
+ (unspec:VMQ [(match_operand:VMQ 1 "s_register_operand" "w")
+ (match_operand:<V_HALF> 2 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMUL_LANE))]
+ "TARGET_NEON"
+ "vmul.<V_if_elem>\t%q0, %q1, %P2[%c3]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vmul_qqd")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
+ (const_string "neon_mul_qqd_32_scalar"))))]
+)
+
+(define_insn "neon_vmull_lane<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VMDI 1 "s_register_operand" "w")
+ (match_operand:VMDI 2 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VMULL_LANE))]
+ "TARGET_NEON"
+ "vmull.%T4%#<V_sz_elem>\t%q0, %P1, %P2[%c3]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_16_scalar_32_16_long_scalar")
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")))]
+)
+
+(define_insn "neon_vqdmull_lane<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VMDI 1 "s_register_operand" "w")
+ (match_operand:VMDI 2 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VQDMULL_LANE))]
+ "TARGET_NEON"
+ "vqdmull.<V_s_elem>\t%q0, %P1, %P2[%c3]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_16_scalar_32_16_long_scalar")
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")))]
+)
+
+(define_insn "neon_vqdmulh_lane<mode>"
+ [(set (match_operand:VMQI 0 "s_register_operand" "=w")
+ (unspec:VMQI [(match_operand:VMQI 1 "s_register_operand" "w")
+ (match_operand:<V_HALF> 2 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VQDMULH_LANE))]
+ "TARGET_NEON"
+ "vq%O4dmulh.%T4%#<V_sz_elem>\t%q0, %q1, %P2[%c3]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
+ (const_string "neon_mul_qqd_32_scalar")))]
+)
+
+(define_insn "neon_vqdmulh_lane<mode>"
+ [(set (match_operand:VMDI 0 "s_register_operand" "=w")
+ (unspec:VMDI [(match_operand:VMDI 1 "s_register_operand" "w")
+ (match_operand:VMDI 2 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VQDMULH_LANE))]
+ "TARGET_NEON"
+ "vq%O4dmulh.%T4%#<V_sz_elem>\t%P0, %P1, %P2[%c3]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mul_ddd_16_scalar_32_16_long_scalar")
+ (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")))]
+)
+
+(define_insn "neon_vmla_lane<mode>"
+ [(set (match_operand:VMD 0 "s_register_operand" "=w")
+ (unspec:VMD [(match_operand:VMD 1 "s_register_operand" "0")
+ (match_operand:VMD 2 "s_register_operand" "w")
+ (match_operand:VMD 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VMLA_LANE))]
+ "TARGET_NEON"
+ "vmla.<V_if_elem>\t%P0, %P2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vmla_ddd_scalar")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))))]
+)
+
+(define_insn "neon_vmla_lane<mode>"
+ [(set (match_operand:VMQ 0 "s_register_operand" "=w")
+ (unspec:VMQ [(match_operand:VMQ 1 "s_register_operand" "0")
+ (match_operand:VMQ 2 "s_register_operand" "w")
+ (match_operand:<V_HALF> 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VMLA_LANE))]
+ "TARGET_NEON"
+ "vmla.<V_if_elem>\t%q0, %q2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vmla_qqq_scalar")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
+ (const_string "neon_mla_qqq_32_qqd_32_scalar"))))]
+)
+
+(define_insn "neon_vmlal_lane<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:VMDI 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VMLAL_LANE))]
+ "TARGET_NEON"
+ "vmlal.%T5%#<V_sz_elem>\t%q0, %P2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vqdmlal_lane<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:VMDI 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VQDMLAL_LANE))]
+ "TARGET_NEON"
+ "vqdmlal.<V_s_elem>\t%q0, %P2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vmls_lane<mode>"
+ [(set (match_operand:VMD 0 "s_register_operand" "=w")
+ (unspec:VMD [(match_operand:VMD 1 "s_register_operand" "0")
+ (match_operand:VMD 2 "s_register_operand" "w")
+ (match_operand:VMD 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VMLS_LANE))]
+ "TARGET_NEON"
+ "vmls.<V_if_elem>\t%P0, %P2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vmla_ddd_scalar")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long"))))]
+)
+
+(define_insn "neon_vmls_lane<mode>"
+ [(set (match_operand:VMQ 0 "s_register_operand" "=w")
+ (unspec:VMQ [(match_operand:VMQ 1 "s_register_operand" "0")
+ (match_operand:VMQ 2 "s_register_operand" "w")
+ (match_operand:<V_HALF> 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VMLS_LANE))]
+ "TARGET_NEON"
+ "vmls.<V_if_elem>\t%q0, %q2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_float_mode>") (const_int 0))
+ (const_string "neon_fp_vmla_qqq_scalar")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
+ (const_string "neon_mla_qqq_32_qqd_32_scalar"))))]
+)
+
+(define_insn "neon_vmlsl_lane<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:VMDI 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VMLSL_LANE))]
+ "TARGET_NEON"
+ "vmlsl.%T5%#<V_sz_elem>\t%q0, %P2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+(define_insn "neon_vqdmlsl_lane<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:<V_widen> 1 "s_register_operand" "0")
+ (match_operand:VMDI 2 "s_register_operand" "w")
+ (match_operand:VMDI 3 "s_register_operand"
+ "<scalar_mul_constraint>")
+ (match_operand:SI 4 "immediate_operand" "i")
+ (match_operand:SI 5 "immediate_operand" "i")]
+ UNSPEC_VQDMLSL_LANE))]
+ "TARGET_NEON"
+ "vqdmlsl.<V_s_elem>\t%q0, %P2, %P3[%c4]"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Scalar_mul_8_16>") (const_int 0))
+ (const_string "neon_mla_ddd_16_scalar_qdd_32_16_long_scalar")
+ (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")))]
+)
+
+; FIXME: For the "_n" multiply/multiply-accumulate insns, we copy a value in a
+; core register into a temp register, then use a scalar taken from that. This
+; isn't an optimal solution if e.g. the scalar has just been read from memory
+; or extracted from another vector. The latter case it's currently better to
+; use the "_lane" variant, and the former case can probably be implemented
+; using vld1_lane, but that hasn't been done yet.
+
+(define_expand "neon_vmul_n<mode>"
+ [(match_operand:VMD 0 "s_register_operand" "")
+ (match_operand:VMD 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vmul_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx, const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vmul_n<mode>"
+ [(match_operand:VMQ 0 "s_register_operand" "")
+ (match_operand:VMQ 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<V_HALF>mode);
+ emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vmul_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx, const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vmull_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:VMDI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vmull_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx, operands[3]));
+ DONE;
+})
+
+(define_expand "neon_vqdmull_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:VMDI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vqdmull_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx, const0_rtx));
+ DONE;
+})
+
+(define_expand "neon_vqdmulh_n<mode>"
+ [(match_operand:VMDI 0 "s_register_operand" "")
+ (match_operand:VMDI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vqdmulh_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx, operands[3]));
+ DONE;
+})
+
+(define_expand "neon_vqdmulh_n<mode>"
+ [(match_operand:VMQI 0 "s_register_operand" "")
+ (match_operand:VMQI 1 "s_register_operand" "")
+ (match_operand:<V_elem> 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<V_HALF>mode);
+ emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[2], tmp, const0_rtx));
+ emit_insn (gen_neon_vqdmulh_lane<mode> (operands[0], operands[1], tmp,
+ const0_rtx, operands[3]));
+ DONE;
+})
+
+(define_expand "neon_vmla_n<mode>"
+ [(match_operand:VMD 0 "s_register_operand" "")
+ (match_operand:VMD 1 "s_register_operand" "")
+ (match_operand:VMD 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmla_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vmla_n<mode>"
+ [(match_operand:VMQ 0 "s_register_operand" "")
+ (match_operand:VMQ 1 "s_register_operand" "")
+ (match_operand:VMQ 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<V_HALF>mode);
+ emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmla_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vmlal_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:<V_widen> 1 "s_register_operand" "")
+ (match_operand:VMDI 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmlal_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vqdmlal_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:<V_widen> 1 "s_register_operand" "")
+ (match_operand:VMDI 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vqdmlal_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vmls_n<mode>"
+ [(match_operand:VMD 0 "s_register_operand" "")
+ (match_operand:VMD 1 "s_register_operand" "")
+ (match_operand:VMD 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmls_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vmls_n<mode>"
+ [(match_operand:VMQ 0 "s_register_operand" "")
+ (match_operand:VMQ 1 "s_register_operand" "")
+ (match_operand:VMQ 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<V_HALF>mode);
+ emit_insn (gen_neon_vset_lane<V_half> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmls_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vmlsl_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:<V_widen> 1 "s_register_operand" "")
+ (match_operand:VMDI 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vmlsl_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_expand "neon_vqdmlsl_n<mode>"
+ [(match_operand:<V_widen> 0 "s_register_operand" "")
+ (match_operand:<V_widen> 1 "s_register_operand" "")
+ (match_operand:VMDI 2 "s_register_operand" "")
+ (match_operand:<V_elem> 3 "s_register_operand" "")
+ (match_operand:SI 4 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neon_vset_lane<mode> (tmp, operands[3], tmp, const0_rtx));
+ emit_insn (gen_neon_vqdmlsl_lane<mode> (operands[0], operands[1], operands[2],
+ tmp, const0_rtx, operands[4]));
+ DONE;
+})
+
+(define_insn "neon_vext<mode>"
+ [(set (match_operand:VDQX 0 "s_register_operand" "=w")
+ (unspec:VDQX [(match_operand:VDQX 1 "s_register_operand" "w")
+ (match_operand:VDQX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VEXT))]
+ "TARGET_NEON"
+ "vext.<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2, %3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_bp_simple")
+ (const_string "neon_bp_2cycle")))]
+)
+
+(define_insn "neon_vrev64<mode>"
+ [(set (match_operand:VDQ 0 "s_register_operand" "=w")
+ (unspec:VDQ [(match_operand:VDQ 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VREV64))]
+ "TARGET_NEON"
+ "vrev64.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vrev32<mode>"
+ [(set (match_operand:VX 0 "s_register_operand" "=w")
+ (unspec:VX [(match_operand:VX 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VREV32))]
+ "TARGET_NEON"
+ "vrev32.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+(define_insn "neon_vrev16<mode>"
+ [(set (match_operand:VE 0 "s_register_operand" "=w")
+ (unspec:VE [(match_operand:VE 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VREV16))]
+ "TARGET_NEON"
+ "vrev16.<V_sz_elem>\t%<V_reg>0, %<V_reg>1"
+ [(set_attr "neon_type" "neon_bp_simple")]
+)
+
+; vbsl_* intrinsics may compile to any of vbsl/vbif/vbit depending on register
+; allocation. For an intrinsic of form:
+; rD = vbsl_* (rS, rN, rM)
+; We can use any of:
+; vbsl rS, rN, rM (if D = S)
+; vbit rD, rN, rS (if D = M, so 1-bits in rS choose bits from rN, else rM)
+; vbif rD, rM, rS (if D = N, so 0-bits in rS choose bits from rM, else rN)
+
+(define_insn "neon_vbsl<mode>_internal"
+ [(set (match_operand:VDQX 0 "s_register_operand" "=w,w,w")
+ (unspec:VDQX [(match_operand:VDQX 1 "s_register_operand" " 0,w,w")
+ (match_operand:VDQX 2 "s_register_operand" " w,w,0")
+ (match_operand:VDQX 3 "s_register_operand" " w,0,w")]
+ UNSPEC_VBSL))]
+ "TARGET_NEON"
+ "@
+ vbsl\t%<V_reg>0, %<V_reg>2, %<V_reg>3
+ vbit\t%<V_reg>0, %<V_reg>2, %<V_reg>1
+ vbif\t%<V_reg>0, %<V_reg>3, %<V_reg>1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+
+(define_expand "neon_vbsl<mode>"
+ [(set (match_operand:VDQX 0 "s_register_operand" "")
+ (unspec:VDQX [(match_operand:<V_cmp_result> 1 "s_register_operand" "")
+ (match_operand:VDQX 2 "s_register_operand" "")
+ (match_operand:VDQX 3 "s_register_operand" "")]
+ UNSPEC_VBSL))]
+ "TARGET_NEON"
+{
+ /* We can't alias operands together if they have different modes. */
+ operands[1] = gen_lowpart (<MODE>mode, operands[1]);
+})
+
+(define_insn "neon_vshl<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSHL))]
+ "TARGET_NEON"
+ "v%O3shl.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_vshl_ddd")
+ (const_string "neon_shift_3")))]
+)
+
+(define_insn "neon_vqshl<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQSHL))]
+ "TARGET_NEON"
+ "vq%O3shl.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_shift_2")
+ (const_string "neon_vqshl_vrshl_vqrshl_qqq")))]
+)
+
+(define_insn "neon_vshr_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSHR_N))]
+ "TARGET_NEON"
+ "v%O3shr.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2"
+ [(set_attr "neon_type" "neon_shift_1")]
+)
+
+(define_insn "neon_vshrn_n<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSHRN_N))]
+ "TARGET_NEON"
+ "v%O3shrn.<V_if_elem>\t%P0, %q1, %2"
+ [(set_attr "neon_type" "neon_shift_1")]
+)
+
+(define_insn "neon_vqshrn_n<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQSHRN_N))]
+ "TARGET_NEON"
+ "vq%O3shrn.%T3%#<V_sz_elem>\t%P0, %q1, %2"
+ [(set_attr "neon_type" "neon_shift_2")]
+)
+
+(define_insn "neon_vqshrun_n<mode>"
+ [(set (match_operand:<V_narrow> 0 "s_register_operand" "=w")
+ (unspec:<V_narrow> [(match_operand:VN 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQSHRUN_N))]
+ "TARGET_NEON"
+ "vq%O3shrun.%T3%#<V_sz_elem>\t%P0, %q1, %2"
+ [(set_attr "neon_type" "neon_shift_2")]
+)
+
+(define_insn "neon_vshl_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSHL_N))]
+ "TARGET_NEON"
+ "vshl.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %2"
+ [(set_attr "neon_type" "neon_shift_1")]
+)
+
+(define_insn "neon_vqshl_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQSHL_N))]
+ "TARGET_NEON"
+ "vqshl.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2"
+ [(set_attr "neon_type" "neon_shift_2")]
+)
+
+(define_insn "neon_vqshlu_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VQSHLU_N))]
+ "TARGET_NEON"
+ "vqshlu.%T3%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2"
+ [(set_attr "neon_type" "neon_shift_2")]
+)
+
+(define_insn "neon_vshll_n<mode>"
+ [(set (match_operand:<V_widen> 0 "s_register_operand" "=w")
+ (unspec:<V_widen> [(match_operand:VW 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSHLL_N))]
+ "TARGET_NEON"
+ "vshll.%T3%#<V_sz_elem>\t%q0, %P1, %2"
+ [(set_attr "neon_type" "neon_shift_1")]
+)
+
+(define_insn "neon_vsra_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "0")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ UNSPEC_VSRA_N))]
+ "TARGET_NEON"
+ "v%O4sra.%T4%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3"
+ [(set_attr "neon_type" "neon_vsra_vrsra")]
+)
+
+(define_insn "neon_vsri_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "0")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSRI))]
+ "TARGET_NEON"
+ "vsri.<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_shift_1")
+ (const_string "neon_shift_3")))]
+)
+
+(define_insn "neon_vsli_n<mode>"
+ [(set (match_operand:VDQIX 0 "s_register_operand" "=w")
+ (unspec:VDQIX [(match_operand:VDQIX 1 "s_register_operand" "0")
+ (match_operand:VDQIX 2 "s_register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VSLI))]
+ "TARGET_NEON"
+ "vsli.<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_shift_1")
+ (const_string "neon_shift_3")))]
+)
+
+(define_insn "neon_vtbl1v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:V8QI 1 "s_register_operand" "w")
+ (match_operand:V8QI 2 "s_register_operand" "w")]
+ UNSPEC_VTBL))]
+ "TARGET_NEON"
+ "vtbl.8\t%P0, {%P1}, %P2"
+ [(set_attr "neon_type" "neon_bp_2cycle")]
+)
+
+(define_insn "neon_vtbl2v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:TI 1 "s_register_operand" "w")
+ (match_operand:V8QI 2 "s_register_operand" "w")]
+ UNSPEC_VTBL))]
+ "TARGET_NEON"
+{
+ rtx ops[4];
+ int tabbase = REGNO (operands[1]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (V8QImode, tabbase);
+ ops[2] = gen_rtx_REG (V8QImode, tabbase + 2);
+ ops[3] = operands[2];
+ output_asm_insn ("vtbl.8\t%P0, {%P1, %P2}, %P3", ops);
+
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_2cycle")]
+)
+
+(define_insn "neon_vtbl3v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:EI 1 "s_register_operand" "w")
+ (match_operand:V8QI 2 "s_register_operand" "w")]
+ UNSPEC_VTBL))]
+ "TARGET_NEON"
+{
+ rtx ops[5];
+ int tabbase = REGNO (operands[1]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (V8QImode, tabbase);
+ ops[2] = gen_rtx_REG (V8QImode, tabbase + 2);
+ ops[3] = gen_rtx_REG (V8QImode, tabbase + 4);
+ ops[4] = operands[2];
+ output_asm_insn ("vtbl.8\t%P0, {%P1, %P2, %P3}, %P4", ops);
+
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_3cycle")]
+)
+
+(define_insn "neon_vtbl4v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:OI 1 "s_register_operand" "w")
+ (match_operand:V8QI 2 "s_register_operand" "w")]
+ UNSPEC_VTBL))]
+ "TARGET_NEON"
+{
+ rtx ops[6];
+ int tabbase = REGNO (operands[1]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (V8QImode, tabbase);
+ ops[2] = gen_rtx_REG (V8QImode, tabbase + 2);
+ ops[3] = gen_rtx_REG (V8QImode, tabbase + 4);
+ ops[4] = gen_rtx_REG (V8QImode, tabbase + 6);
+ ops[5] = operands[2];
+ output_asm_insn ("vtbl.8\t%P0, {%P1, %P2, %P3, %P4}, %P5", ops);
+
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_3cycle")]
+)
+
+(define_insn "neon_vtbx1v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:V8QI 1 "s_register_operand" "0")
+ (match_operand:V8QI 2 "s_register_operand" "w")
+ (match_operand:V8QI 3 "s_register_operand" "w")]
+ UNSPEC_VTBX))]
+ "TARGET_NEON"
+ "vtbx.8\t%P0, {%P2}, %P3"
+ [(set_attr "neon_type" "neon_bp_2cycle")]
+)
+
+(define_insn "neon_vtbx2v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:V8QI 1 "s_register_operand" "0")
+ (match_operand:TI 2 "s_register_operand" "w")
+ (match_operand:V8QI 3 "s_register_operand" "w")]
+ UNSPEC_VTBX))]
+ "TARGET_NEON"
+{
+ rtx ops[4];
+ int tabbase = REGNO (operands[2]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (V8QImode, tabbase);
+ ops[2] = gen_rtx_REG (V8QImode, tabbase + 2);
+ ops[3] = operands[3];
+ output_asm_insn ("vtbx.8\t%P0, {%P1, %P2}, %P3", ops);
+
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_2cycle")]
+)
+
+(define_insn "neon_vtbx3v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:V8QI 1 "s_register_operand" "0")
+ (match_operand:EI 2 "s_register_operand" "w")
+ (match_operand:V8QI 3 "s_register_operand" "w")]
+ UNSPEC_VTBX))]
+ "TARGET_NEON"
+{
+ rtx ops[5];
+ int tabbase = REGNO (operands[2]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (V8QImode, tabbase);
+ ops[2] = gen_rtx_REG (V8QImode, tabbase + 2);
+ ops[3] = gen_rtx_REG (V8QImode, tabbase + 4);
+ ops[4] = operands[3];
+ output_asm_insn ("vtbx.8\t%P0, {%P1, %P2, %P3}, %P4", ops);
+
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_3cycle")]
+)
+
+(define_insn "neon_vtbx4v8qi"
+ [(set (match_operand:V8QI 0 "s_register_operand" "=w")
+ (unspec:V8QI [(match_operand:V8QI 1 "s_register_operand" "0")
+ (match_operand:OI 2 "s_register_operand" "w")
+ (match_operand:V8QI 3 "s_register_operand" "w")]
+ UNSPEC_VTBX))]
+ "TARGET_NEON"
+{
+ rtx ops[6];
+ int tabbase = REGNO (operands[2]);
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (V8QImode, tabbase);
+ ops[2] = gen_rtx_REG (V8QImode, tabbase + 2);
+ ops[3] = gen_rtx_REG (V8QImode, tabbase + 4);
+ ops[4] = gen_rtx_REG (V8QImode, tabbase + 6);
+ ops[5] = operands[3];
+ output_asm_insn ("vtbx.8\t%P0, {%P1, %P2, %P3, %P4}, %P5", ops);
+
+ return "";
+}
+ [(set_attr "neon_type" "neon_bp_3cycle")]
+)
+
+(define_insn "neon_vtrn<mode>_internal"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "0")]
+ UNSPEC_VTRN1))
+ (set (match_operand:VDQW 2 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 3 "s_register_operand" "2")]
+ UNSPEC_VTRN2))]
+ "TARGET_NEON"
+ "vtrn.<V_sz_elem>\t%<V_reg>0, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_bp_simple")
+ (const_string "neon_bp_3cycle")))]
+)
+
+(define_expand "neon_vtrn<mode>"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")]
+ "TARGET_NEON"
+{
+ neon_emit_pair_result_insn (<MODE>mode, gen_neon_vtrn<mode>_internal,
+ operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "neon_vzip<mode>_internal"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "0")]
+ UNSPEC_VZIP1))
+ (set (match_operand:VDQW 2 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 3 "s_register_operand" "2")]
+ UNSPEC_VZIP2))]
+ "TARGET_NEON"
+ "vzip.<V_sz_elem>\t%<V_reg>0, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_bp_simple")
+ (const_string "neon_bp_3cycle")))]
+)
+
+(define_expand "neon_vzip<mode>"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")]
+ "TARGET_NEON"
+{
+ neon_emit_pair_result_insn (<MODE>mode, gen_neon_vzip<mode>_internal,
+ operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "neon_vuzp<mode>_internal"
+ [(set (match_operand:VDQW 0 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 1 "s_register_operand" "0")]
+ UNSPEC_VUZP1))
+ (set (match_operand:VDQW 2 "s_register_operand" "=w")
+ (unspec:VDQW [(match_operand:VDQW 3 "s_register_operand" "2")]
+ UNSPEC_VUZP2))]
+ "TARGET_NEON"
+ "vuzp.<V_sz_elem>\t%<V_reg>0, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (ne (symbol_ref "<Is_d_reg>") (const_int 0))
+ (const_string "neon_bp_simple")
+ (const_string "neon_bp_3cycle")))]
+)
+
+(define_expand "neon_vuzp<mode>"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:VDQW 1 "s_register_operand" "w")
+ (match_operand:VDQW 2 "s_register_operand" "w")]
+ "TARGET_NEON"
+{
+ neon_emit_pair_result_insn (<MODE>mode, gen_neon_vuzp<mode>_internal,
+ operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv8qi<mode>"
+ [(match_operand:V8QI 0 "s_register_operand" "")
+ (match_operand:VDX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv4hi<mode>"
+ [(match_operand:V4HI 0 "s_register_operand" "")
+ (match_operand:VDX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv2si<mode>"
+ [(match_operand:V2SI 0 "s_register_operand" "")
+ (match_operand:VDX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv2sf<mode>"
+ [(match_operand:V2SF 0 "s_register_operand" "")
+ (match_operand:VDX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretdi<mode>"
+ [(match_operand:DI 0 "s_register_operand" "")
+ (match_operand:VDX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv16qi<mode>"
+ [(match_operand:V16QI 0 "s_register_operand" "")
+ (match_operand:VQX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv8hi<mode>"
+ [(match_operand:V8HI 0 "s_register_operand" "")
+ (match_operand:VQX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv4si<mode>"
+ [(match_operand:V4SI 0 "s_register_operand" "")
+ (match_operand:VQX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv4sf<mode>"
+ [(match_operand:V4SF 0 "s_register_operand" "")
+ (match_operand:VQX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "neon_vreinterpretv2di<mode>"
+ [(match_operand:V2DI 0 "s_register_operand" "")
+ (match_operand:VQX 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "neon_vld1<mode>"
+ [(set (match_operand:VDQX 0 "s_register_operand" "=w")
+ (unspec:VDQX [(mem:VDQX (match_operand:SI 1 "s_register_operand" "r"))]
+ UNSPEC_VLD1))]
+ "TARGET_NEON"
+ "vld1.<V_sz_elem>\t%h0, [%1]"
+ [(set_attr "neon_type" "neon_vld1_1_2_regs")]
+)
+
+(define_insn "neon_vld1_lane<mode>"
+ [(set (match_operand:VDX 0 "s_register_operand" "=w")
+ (unspec:VDX [(mem:<V_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:VDX 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VLD1_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ if (max == 1)
+ return "vld1.<V_sz_elem>\t%P0, [%1]";
+ else
+ return "vld1.<V_sz_elem>\t{%P0[%c3]}, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_mode_nunits>") (const_int 2))
+ (const_string "neon_vld1_1_2_regs")
+ (const_string "neon_vld1_vld2_lane")))]
+)
+
+(define_insn "neon_vld1_lane<mode>"
+ [(set (match_operand:VQX 0 "s_register_operand" "=w")
+ (unspec:VQX [(mem:<V_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:VQX 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ UNSPEC_VLD1_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ operands[3] = GEN_INT (lane);
+ }
+ operands[0] = gen_rtx_REG (<V_HALF>mode, regno);
+ if (max == 2)
+ return "vld1.<V_sz_elem>\t%P0, [%1]";
+ else
+ return "vld1.<V_sz_elem>\t{%P0[%c3]}, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_mode_nunits>") (const_int 2))
+ (const_string "neon_vld1_1_2_regs")
+ (const_string "neon_vld1_vld2_lane")))]
+)
+
+(define_insn "neon_vld1_dup<mode>"
+ [(set (match_operand:VDX 0 "s_register_operand" "=w")
+ (unspec:VDX [(mem:<V_elem> (match_operand:SI 1 "s_register_operand" "r"))]
+ UNSPEC_VLD1_DUP))]
+ "TARGET_NEON"
+{
+ if (GET_MODE_NUNITS (<MODE>mode) > 1)
+ return "vld1.<V_sz_elem>\t{%P0[]}, [%1]";
+ else
+ return "vld1.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (gt (const_string "<V_mode_nunits>") (const_string "1"))
+ (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
+ (const_string "neon_vld1_1_2_regs")))]
+)
+
+(define_insn "neon_vld1_dup<mode>"
+ [(set (match_operand:VQX 0 "s_register_operand" "=w")
+ (unspec:VQX [(mem:<V_elem> (match_operand:SI 1 "s_register_operand" "r"))]
+ UNSPEC_VLD1_DUP))]
+ "TARGET_NEON"
+{
+ if (GET_MODE_NUNITS (<MODE>mode) > 2)
+ return "vld1.<V_sz_elem>\t{%e0[], %f0[]}, [%1]";
+ else
+ return "vld1.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (gt (const_string "<V_mode_nunits>") (const_string "1"))
+ (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
+ (const_string "neon_vld1_1_2_regs")))]
+)
+
+(define_insn "neon_vst1<mode>"
+ [(set (mem:VDQX (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:VDQX [(match_operand:VDQX 1 "s_register_operand" "w")]
+ UNSPEC_VST1))]
+ "TARGET_NEON"
+ "vst1.<V_sz_elem>\t%h1, [%0]"
+ [(set_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs")])
+
+(define_insn "neon_vst1_lane<mode>"
+ [(set (mem:<V_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (vec_select:<V_elem>
+ (match_operand:VDX 1 "s_register_operand" "w")
+ (parallel [(match_operand:SI 2 "neon_lane_number" "i")])))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ if (max == 1)
+ return "vst1.<V_sz_elem>\t{%P1}, [%0]";
+ else
+ return "vst1.<V_sz_elem>\t{%P1[%c2]}, [%0]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_mode_nunits>") (const_int 1))
+ (const_string "neon_vst1_1_2_regs_vst2_2_regs")
+ (const_string "neon_vst1_vst2_lane")))])
+
+(define_insn "neon_vst1_lane<mode>"
+ [(set (mem:<V_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (vec_select:<V_elem>
+ (match_operand:VQX 1 "s_register_operand" "w")
+ (parallel [(match_operand:SI 2 "neon_lane_number" "i")])))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ operands[2] = GEN_INT (lane);
+ }
+ operands[1] = gen_rtx_REG (<V_HALF>mode, regno);
+ if (max == 2)
+ return "vst1.<V_sz_elem>\t{%P1}, [%0]";
+ else
+ return "vst1.<V_sz_elem>\t{%P1[%c2]}, [%0]";
+}
+ [(set_attr "neon_type" "neon_vst1_vst2_lane")]
+)
+
+(define_insn "neon_vld2<mode>"
+ [(set (match_operand:TI 0 "s_register_operand" "=w")
+ (unspec:TI [(mem:TI (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD2))]
+ "TARGET_NEON"
+{
+ if (<V_sz_elem> == 64)
+ return "vld1.64\t%h0, [%1]";
+ else
+ return "vld2.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_sz_elem>") (const_string "64"))
+ (const_string "neon_vld1_1_2_regs")
+ (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")))]
+)
+
+(define_insn "neon_vld2<mode>"
+ [(set (match_operand:OI 0 "s_register_operand" "=w")
+ (unspec:OI [(mem:OI (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD2))]
+ "TARGET_NEON"
+ "vld2.<V_sz_elem>\t%h0, [%1]"
+ [(set_attr "neon_type" "neon_vld2_2_regs_vld1_vld2_all_lanes")])
+
+(define_insn "neon_vld2_lane<mode>"
+ [(set (match_operand:TI 0 "s_register_operand" "=w")
+ (unspec:TI [(mem:<V_two_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:TI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD2_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ rtx ops[4];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = operands[1];
+ ops[3] = operands[3];
+ output_asm_insn ("vld2.<V_sz_elem>\t{%P0[%c3], %P1[%c3]}, [%2]", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld1_vld2_lane")]
+)
+
+(define_insn "neon_vld2_lane<mode>"
+ [(set (match_operand:OI 0 "s_register_operand" "=w")
+ (unspec:OI [(mem:<V_two_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:OI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (unspec:VMQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD2_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ rtx ops[4];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ }
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 4);
+ ops[2] = operands[1];
+ ops[3] = GEN_INT (lane);
+ output_asm_insn ("vld2.<V_sz_elem>\t{%P0[%c3], %P1[%c3]}, [%2]", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld1_vld2_lane")]
+)
+
+(define_insn "neon_vld2_dup<mode>"
+ [(set (match_operand:TI 0 "s_register_operand" "=w")
+ (unspec:TI [(mem:<V_two_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD2_DUP))]
+ "TARGET_NEON"
+{
+ if (GET_MODE_NUNITS (<MODE>mode) > 1)
+ return "vld2.<V_sz_elem>\t{%e0[], %f0[]}, [%1]";
+ else
+ return "vld1.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (gt (const_string "<V_mode_nunits>") (const_string "1"))
+ (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
+ (const_string "neon_vld1_1_2_regs")))]
+)
+
+(define_insn "neon_vst2<mode>"
+ [(set (mem:TI (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:TI [(match_operand:TI 1 "s_register_operand" "w")
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST2))]
+ "TARGET_NEON"
+{
+ if (<V_sz_elem> == 64)
+ return "vst1.64\t%h1, [%0]";
+ else
+ return "vst2.<V_sz_elem>\t%h1, [%0]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_sz_elem>") (const_string "64"))
+ (const_string "neon_vst1_1_2_regs_vst2_2_regs")
+ (const_string "neon_vst1_1_2_regs_vst2_2_regs")))]
+)
+
+(define_insn "neon_vst2<mode>"
+ [(set (mem:OI (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:OI [(match_operand:OI 1 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST2))]
+ "TARGET_NEON"
+ "vst2.<V_sz_elem>\t%h1, [%0]"
+ [(set_attr "neon_type" "neon_vst1_1_2_regs_vst2_2_regs")]
+)
+
+(define_insn "neon_vst2_lane<mode>"
+ [(set (mem:<V_two_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:<V_two_elem>
+ [(match_operand:TI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST2_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ rtx ops[4];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 2);
+ ops[3] = operands[2];
+ output_asm_insn ("vst2.<V_sz_elem>\t{%P1[%c3], %P2[%c3]}, [%0]", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst1_vst2_lane")]
+)
+
+(define_insn "neon_vst2_lane<mode>"
+ [(set (mem:<V_two_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:<V_two_elem>
+ [(match_operand:OI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (unspec:VMQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST2_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ rtx ops[4];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ }
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = GEN_INT (lane);
+ output_asm_insn ("vst2.<V_sz_elem>\t{%P1[%c3], %P2[%c3]}, [%0]", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst1_vst2_lane")]
+)
+
+(define_insn "neon_vld3<mode>"
+ [(set (match_operand:EI 0 "s_register_operand" "=w")
+ (unspec:EI [(mem:EI (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD3))]
+ "TARGET_NEON"
+{
+ if (<V_sz_elem> == 64)
+ return "vld1.64\t%h0, [%1]";
+ else
+ return "vld3.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_sz_elem>") (const_string "64"))
+ (const_string "neon_vld1_1_2_regs")
+ (const_string "neon_vld3_vld4")))]
+)
+
+(define_expand "neon_vld3<mode>"
+ [(match_operand:CI 0 "s_register_operand" "=w")
+ (match_operand:SI 1 "s_register_operand" "+r")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_NEON"
+{
+ emit_insn (gen_neon_vld3qa<mode> (operands[0], operands[0],
+ operands[1], operands[1]));
+ emit_insn (gen_neon_vld3qb<mode> (operands[0], operands[0],
+ operands[1], operands[1]));
+ DONE;
+})
+
+(define_insn "neon_vld3qa<mode>"
+ [(set (match_operand:CI 0 "s_register_operand" "=w")
+ (unspec:CI [(mem:CI (match_operand:SI 3 "s_register_operand" "2"))
+ (match_operand:CI 1 "s_register_operand" "0")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD3A))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (plus:SI (match_dup 3)
+ (const_int 24)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[0]);
+ rtx ops[4];
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 4);
+ ops[2] = gen_rtx_REG (DImode, regno + 8);
+ ops[3] = operands[2];
+ output_asm_insn ("vld3.<V_sz_elem>\t{%P0, %P1, %P2}, [%3]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4")]
+)
+
+(define_insn "neon_vld3qb<mode>"
+ [(set (match_operand:CI 0 "s_register_operand" "=w")
+ (unspec:CI [(mem:CI (match_operand:SI 3 "s_register_operand" "2"))
+ (match_operand:CI 1 "s_register_operand" "0")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD3B))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (plus:SI (match_dup 3)
+ (const_int 24)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[0]);
+ rtx ops[4];
+ ops[0] = gen_rtx_REG (DImode, regno + 2);
+ ops[1] = gen_rtx_REG (DImode, regno + 6);
+ ops[2] = gen_rtx_REG (DImode, regno + 10);
+ ops[3] = operands[2];
+ output_asm_insn ("vld3.<V_sz_elem>\t{%P0, %P1, %P2}, [%3]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4")]
+)
+
+(define_insn "neon_vld3_lane<mode>"
+ [(set (match_operand:EI 0 "s_register_operand" "=w")
+ (unspec:EI [(mem:<V_three_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:EI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD3_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ rtx ops[5];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = operands[1];
+ ops[4] = operands[3];
+ output_asm_insn ("vld3.<V_sz_elem>\t{%P0[%c4], %P1[%c4], %P2[%c4]}, [%3]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4_lane")]
+)
+
+(define_insn "neon_vld3_lane<mode>"
+ [(set (match_operand:CI 0 "s_register_operand" "=w")
+ (unspec:CI [(mem:<V_three_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:CI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (unspec:VMQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD3_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ rtx ops[5];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ }
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 4);
+ ops[2] = gen_rtx_REG (DImode, regno + 8);
+ ops[3] = operands[1];
+ ops[4] = GEN_INT (lane);
+ output_asm_insn ("vld3.<V_sz_elem>\t{%P0[%c4], %P1[%c4], %P2[%c4]}, [%3]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4_lane")]
+)
+
+(define_insn "neon_vld3_dup<mode>"
+ [(set (match_operand:EI 0 "s_register_operand" "=w")
+ (unspec:EI [(mem:<V_three_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD3_DUP))]
+ "TARGET_NEON"
+{
+ if (GET_MODE_NUNITS (<MODE>mode) > 1)
+ {
+ int regno = REGNO (operands[0]);
+ rtx ops[4];
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = operands[1];
+ output_asm_insn ("vld3.<V_sz_elem>\t{%P0[], %P1[], %P2[]}, [%3]", ops);
+ return "";
+ }
+ else
+ return "vld1.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (gt (const_string "<V_mode_nunits>") (const_string "1"))
+ (const_string "neon_vld3_vld4_all_lanes")
+ (const_string "neon_vld1_1_2_regs")))])
+
+(define_insn "neon_vst3<mode>"
+ [(set (mem:EI (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:EI [(match_operand:EI 1 "s_register_operand" "w")
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST3))]
+ "TARGET_NEON"
+{
+ if (<V_sz_elem> == 64)
+ return "vst1.64\t%h1, [%0]";
+ else
+ return "vst3.<V_sz_elem>\t%h1, [%0]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_sz_elem>") (const_string "64"))
+ (const_string "neon_vst1_1_2_regs_vst2_2_regs")
+ (const_string "neon_vst2_4_regs_vst3_vst4")))])
+
+(define_expand "neon_vst3<mode>"
+ [(match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:CI 1 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_NEON"
+{
+ emit_insn (gen_neon_vst3qa<mode> (operands[0], operands[0], operands[1]));
+ emit_insn (gen_neon_vst3qb<mode> (operands[0], operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "neon_vst3qa<mode>"
+ [(set (mem:EI (match_operand:SI 1 "s_register_operand" "0"))
+ (unspec:EI [(match_operand:CI 2 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST3A))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1)
+ (const_int 24)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[2]);
+ rtx ops[4];
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 8);
+ output_asm_insn ("vst3.<V_sz_elem>\t{%P1, %P2, %P3}, [%0]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst2_4_regs_vst3_vst4")]
+)
+
+(define_insn "neon_vst3qb<mode>"
+ [(set (mem:EI (match_operand:SI 1 "s_register_operand" "0"))
+ (unspec:EI [(match_operand:CI 2 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST3B))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1)
+ (const_int 24)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[2]);
+ rtx ops[4];
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 6);
+ ops[3] = gen_rtx_REG (DImode, regno + 10);
+ output_asm_insn ("vst3.<V_sz_elem>\t{%P1, %P2, %P3}, [%0]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst2_4_regs_vst3_vst4")]
+)
+
+(define_insn "neon_vst3_lane<mode>"
+ [(set (mem:<V_three_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:<V_three_elem>
+ [(match_operand:EI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST3_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ rtx ops[5];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 2);
+ ops[3] = gen_rtx_REG (DImode, regno + 4);
+ ops[4] = operands[2];
+ output_asm_insn ("vst3.<V_sz_elem>\t{%P1[%c4], %P2[%c4], %P3[%c4]}, [%0]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst3_vst4_lane")]
+)
+
+(define_insn "neon_vst3_lane<mode>"
+ [(set (mem:<V_three_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:<V_three_elem>
+ [(match_operand:CI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (unspec:VMQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST3_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ rtx ops[5];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ }
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 8);
+ ops[4] = GEN_INT (lane);
+ output_asm_insn ("vst3.<V_sz_elem>\t{%P1[%c4], %P2[%c4], %P3[%c4]}, [%0]",
+ ops);
+ return "";
+}
+[(set_attr "neon_type" "neon_vst3_vst4_lane")])
+
+(define_insn "neon_vld4<mode>"
+ [(set (match_operand:OI 0 "s_register_operand" "=w")
+ (unspec:OI [(mem:OI (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4))]
+ "TARGET_NEON"
+{
+ if (<V_sz_elem> == 64)
+ return "vld1.64\t%h0, [%1]";
+ else
+ return "vld4.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_sz_elem>") (const_string "64"))
+ (const_string "neon_vld1_1_2_regs")
+ (const_string "neon_vld3_vld4")))]
+)
+
+(define_expand "neon_vld4<mode>"
+ [(match_operand:XI 0 "s_register_operand" "=w")
+ (match_operand:SI 1 "s_register_operand" "+r")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_NEON"
+{
+ emit_insn (gen_neon_vld4qa<mode> (operands[0], operands[0],
+ operands[1], operands[1]));
+ emit_insn (gen_neon_vld4qb<mode> (operands[0], operands[0],
+ operands[1], operands[1]));
+ DONE;
+})
+
+(define_insn "neon_vld4qa<mode>"
+ [(set (match_operand:XI 0 "s_register_operand" "=w")
+ (unspec:XI [(mem:XI (match_operand:SI 3 "s_register_operand" "2"))
+ (match_operand:XI 1 "s_register_operand" "0")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4A))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (plus:SI (match_dup 3)
+ (const_int 32)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[0]);
+ rtx ops[5];
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 4);
+ ops[2] = gen_rtx_REG (DImode, regno + 8);
+ ops[3] = gen_rtx_REG (DImode, regno + 12);
+ ops[4] = operands[2];
+ output_asm_insn ("vld4.<V_sz_elem>\t{%P0, %P1, %P2, %P3}, [%4]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4")]
+)
+
+(define_insn "neon_vld4qb<mode>"
+ [(set (match_operand:XI 0 "s_register_operand" "=w")
+ (unspec:XI [(mem:XI (match_operand:SI 3 "s_register_operand" "2"))
+ (match_operand:XI 1 "s_register_operand" "0")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4B))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (plus:SI (match_dup 3)
+ (const_int 32)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[0]);
+ rtx ops[5];
+ ops[0] = gen_rtx_REG (DImode, regno + 2);
+ ops[1] = gen_rtx_REG (DImode, regno + 6);
+ ops[2] = gen_rtx_REG (DImode, regno + 10);
+ ops[3] = gen_rtx_REG (DImode, regno + 14);
+ ops[4] = operands[2];
+ output_asm_insn ("vld4.<V_sz_elem>\t{%P0, %P1, %P2, %P3}, [%4]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4")]
+)
+
+(define_insn "neon_vld4_lane<mode>"
+ [(set (match_operand:OI 0 "s_register_operand" "=w")
+ (unspec:OI [(mem:<V_four_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:OI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ rtx ops[6];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 6);
+ ops[4] = operands[1];
+ ops[5] = operands[3];
+ output_asm_insn ("vld4.<V_sz_elem>\t{%P0[%c5], %P1[%c5], %P2[%c5], %P3[%c5]}, [%4]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4_lane")]
+)
+
+(define_insn "neon_vld4_lane<mode>"
+ [(set (match_operand:XI 0 "s_register_operand" "=w")
+ (unspec:XI [(mem:<V_four_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:XI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (unspec:VMQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[3]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[0]);
+ rtx ops[6];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ }
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 4);
+ ops[2] = gen_rtx_REG (DImode, regno + 8);
+ ops[3] = gen_rtx_REG (DImode, regno + 12);
+ ops[4] = operands[1];
+ ops[5] = GEN_INT (lane);
+ output_asm_insn ("vld4.<V_sz_elem>\t{%P0[%c5], %P1[%c5], %P2[%c5], %P3[%c5]}, [%4]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vld3_vld4_lane")]
+)
+
+(define_insn "neon_vld4_dup<mode>"
+ [(set (match_operand:OI 0 "s_register_operand" "=w")
+ (unspec:OI [(mem:<V_four_elem> (match_operand:SI 1 "s_register_operand" "r"))
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VLD4_DUP))]
+ "TARGET_NEON"
+{
+ if (GET_MODE_NUNITS (<MODE>mode) > 1)
+ {
+ int regno = REGNO (operands[0]);
+ rtx ops[5];
+ ops[0] = gen_rtx_REG (DImode, regno);
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 6);
+ ops[4] = operands[1];
+ output_asm_insn ("vld4.<V_sz_elem>\t{%P0[], %P1[], %P2[], %P3[]}, [%4]",
+ ops);
+ return "";
+ }
+ else
+ return "vld1.<V_sz_elem>\t%h0, [%1]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (gt (const_string "<V_mode_nunits>") (const_string "1"))
+ (const_string "neon_vld3_vld4_all_lanes")
+ (const_string "neon_vld1_1_2_regs")))]
+)
+
+(define_insn "neon_vst4<mode>"
+ [(set (mem:OI (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:OI [(match_operand:OI 1 "s_register_operand" "w")
+ (unspec:VDX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST4))]
+ "TARGET_NEON"
+{
+ if (<V_sz_elem> == 64)
+ return "vst1.64\t%h1, [%0]";
+ else
+ return "vst4.<V_sz_elem>\t%h1, [%0]";
+}
+ [(set (attr "neon_type")
+ (if_then_else (eq (const_string "<V_sz_elem>") (const_string "64"))
+ (const_string "neon_vst1_1_2_regs_vst2_2_regs")
+ (const_string "neon_vst2_4_regs_vst3_vst4")))]
+)
+
+(define_expand "neon_vst4<mode>"
+ [(match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:XI 1 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_NEON"
+{
+ emit_insn (gen_neon_vst4qa<mode> (operands[0], operands[0], operands[1]));
+ emit_insn (gen_neon_vst4qb<mode> (operands[0], operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "neon_vst4qa<mode>"
+ [(set (mem:OI (match_operand:SI 1 "s_register_operand" "0"))
+ (unspec:OI [(match_operand:XI 2 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST4A))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1)
+ (const_int 32)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[2]);
+ rtx ops[5];
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 8);
+ ops[4] = gen_rtx_REG (DImode, regno + 12);
+ output_asm_insn ("vst4.<V_sz_elem>\t{%P1, %P2, %P3, %P4}, [%0]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst2_4_regs_vst3_vst4")]
+)
+
+(define_insn "neon_vst4qb<mode>"
+ [(set (mem:OI (match_operand:SI 1 "s_register_operand" "0"))
+ (unspec:OI [(match_operand:XI 2 "s_register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST4B))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1)
+ (const_int 32)))]
+ "TARGET_NEON"
+{
+ int regno = REGNO (operands[2]);
+ rtx ops[5];
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno + 2);
+ ops[2] = gen_rtx_REG (DImode, regno + 6);
+ ops[3] = gen_rtx_REG (DImode, regno + 10);
+ ops[4] = gen_rtx_REG (DImode, regno + 14);
+ output_asm_insn ("vst4.<V_sz_elem>\t{%P1, %P2, %P3, %P4}, [%0]!", ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst2_4_regs_vst3_vst4")]
+)
+
+(define_insn "neon_vst4_lane<mode>"
+ [(set (mem:<V_four_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:<V_four_elem>
+ [(match_operand:OI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST4_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ rtx ops[6];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 2);
+ ops[3] = gen_rtx_REG (DImode, regno + 4);
+ ops[4] = gen_rtx_REG (DImode, regno + 6);
+ ops[5] = operands[2];
+ output_asm_insn ("vst4.<V_sz_elem>\t{%P1[%c5], %P2[%c5], %P3[%c5], %P4[%c5]}, [%0]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst3_vst4_lane")]
+)
+
+(define_insn "neon_vst4_lane<mode>"
+ [(set (mem:<V_four_elem> (match_operand:SI 0 "s_register_operand" "r"))
+ (unspec:<V_four_elem>
+ [(match_operand:XI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")
+ (unspec:VMQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_VST4_LANE))]
+ "TARGET_NEON"
+{
+ HOST_WIDE_INT lane = INTVAL (operands[2]);
+ HOST_WIDE_INT max = GET_MODE_NUNITS (<MODE>mode);
+ int regno = REGNO (operands[1]);
+ rtx ops[6];
+ if (lane < 0 || lane >= max)
+ error ("lane out of range");
+ else if (lane >= max / 2)
+ {
+ lane -= max / 2;
+ regno += 2;
+ }
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_REG (DImode, regno);
+ ops[2] = gen_rtx_REG (DImode, regno + 4);
+ ops[3] = gen_rtx_REG (DImode, regno + 8);
+ ops[4] = gen_rtx_REG (DImode, regno + 12);
+ ops[5] = GEN_INT (lane);
+ output_asm_insn ("vst4.<V_sz_elem>\t{%P1[%c5], %P2[%c5], %P3[%c5], %P4[%c5]}, [%0]",
+ ops);
+ return "";
+}
+ [(set_attr "neon_type" "neon_vst3_vst4_lane")]
+)
+
+(define_expand "neon_vand<mode>"
+ [(match_operand:VDQX 0 "s_register_operand" "")
+ (match_operand:VDQX 1 "s_register_operand" "")
+ (match_operand:VDQX 2 "neon_inv_logic_op2" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_and<mode>3<V_suf64> (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "neon_vorr<mode>"
+ [(match_operand:VDQX 0 "s_register_operand" "")
+ (match_operand:VDQX 1 "s_register_operand" "")
+ (match_operand:VDQX 2 "neon_logic_op2" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_ior<mode>3<V_suf64> (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "neon_veor<mode>"
+ [(match_operand:VDQX 0 "s_register_operand" "")
+ (match_operand:VDQX 1 "s_register_operand" "")
+ (match_operand:VDQX 2 "s_register_operand" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_xor<mode>3<V_suf64> (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "neon_vbic<mode>"
+ [(match_operand:VDQX 0 "s_register_operand" "")
+ (match_operand:VDQX 1 "s_register_operand" "")
+ (match_operand:VDQX 2 "neon_logic_op2" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_bic<mode>3_neon (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "neon_vorn<mode>"
+ [(match_operand:VDQX 0 "s_register_operand" "")
+ (match_operand:VDQX 1 "s_register_operand" "")
+ (match_operand:VDQX 2 "neon_inv_logic_op2" "")
+ (match_operand:SI 3 "immediate_operand" "")]
+ "TARGET_NEON"
+{
+ emit_insn (gen_orn<mode>3_neon (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; APPLE LOCAL 6150859 begin use NEON instructions for SF math
+;; When possible, use the NEON instructions for single precision floating
+;; point operations. On NEON CPUs, the VFP instructions are not scoreboarded,
+;; so they perform poorly compared to the NEON ones. We use 32x2 vector
+;; instructions and just ignore the upper values.
+
+(define_insn "*addsf3_neon"
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON"
+ "vadd.f32\\t%p0, %p1, %p2"
+ [(set_attr "neon_type" "neon_fp_vadd_ddd_vabs_dd")]
+)
+
+(define_insn "*subsf3_neon"
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON"
+ "vsub.f32\\t%p0, %p1, %p2"
+ [(set_attr "neon_type" "neon_fp_vadd_ddd_vabs_dd")]
+)
+
+(define_insn "*mulsf3_neon"
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON"
+ "vmul.f32\\t%p0, %p1, %p2"
+ [(set_attr "neon_type" "neon_fp_vadd_ddd_vabs_dd")]
+)
+
+;; APPLE LOCAL begin 6197406 disable vmla.f32 and vmls.f32
+;; The multiply-accumulate and multiply-decrement? instructions cause a
+;; pipeline flush such that they are not useful in general. Disabling
+;; them for now.
+;; Multiply-accumulate insns
+;; 0 = 1 * 2 + 0
+; (define_insn "*mulsf3addsf_neon"
+; [(set (match_operand:SF 0 "s_register_operand" "=t")
+; (plus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+; (match_operand:SF 3 "s_register_operand" "t"))
+; (match_operand:SF 1 "s_register_operand" "0")))]
+; "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON"
+; "vmla.f32\\t%p0, %p2, %p3"
+; [(set_attr "neon_type" "neon_fp_vmla_ddd")]
+; )
+
+;; APPLE LOCAL begin 6251664 reversed operands for vmls.f32
+;; 0 = 0 - (1 * 2)
+; (define_insn "*mulsf3subsf_neon"
+; [(set (match_operand:SF 0 "s_register_operand" "=t")
+; (minus:SF (match_operand:SF 1 "s_register_operand" "0")
+; (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+; (match_operand:SF 3 "s_register_operand" "t"))))]
+; "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_NEON"
+; "vmls.f32\\t%p0, %p2, %p3"
+; [(set_attr "neon_type" "neon_fp_vmla_ddd")]
+; )
+;; APPLE LOCAL end 6251664 reversed operands for vmls.f32
+;; APPLE LOCAL end 6197406 disable vmla.f32 and vmls.f32
+;; APPLE LOCAL 6150859 end use NEON instructions for SF math
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/neon.ml b/gcc-4.2.1-5666.3/gcc/config/arm/neon.ml
new file mode 100644
index 000000000..0d12b671c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/neon.ml
@@ -0,0 +1,1827 @@
+(* APPLE LOCAL file v7 support. Merge from Codesourcery *)
+(* Common code for ARM NEON header file, documentation and test case
+ generators.
+
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. *)
+
+(* Shorthand types for vector elements. *)
+type elts = S8 | S16 | S32 | S64 | F32 | U8 | U16 | U32 | U64 | P8 | P16
+ | I8 | I16 | I32 | I64 | B8 | B16 | B32 | B64 | Conv of elts * elts
+ | Cast of elts * elts | NoElts
+
+type eltclass = Signed | Unsigned | Float | Poly | Int | Bits
+ | ConvClass of eltclass * eltclass | NoType
+
+(* These vector types correspond directly to C types. *)
+type vectype = T_int8x8 | T_int8x16
+ | T_int16x4 | T_int16x8
+ | T_int32x2 | T_int32x4
+ | T_int64x1 | T_int64x2
+ | T_uint8x8 | T_uint8x16
+ | T_uint16x4 | T_uint16x8
+ | T_uint32x2 | T_uint32x4
+ | T_uint64x1 | T_uint64x2
+ | T_float32x2 | T_float32x4
+ | T_poly8x8 | T_poly8x16
+ | T_poly16x4 | T_poly16x8
+ | T_immediate of int * int
+ | T_int8 | T_int16
+ | T_int32 | T_int64
+ | T_uint8 | T_uint16
+ | T_uint32 | T_uint64
+ | T_poly8 | T_poly16
+ | T_float32 | T_arrayof of int * vectype
+ | T_ptrto of vectype | T_const of vectype
+ | T_void | T_intQI
+ | T_intHI | T_intSI
+ | T_intDI
+
+(* The meanings of the following are:
+ TImode : "Tetra", two registers (four words).
+ EImode : "hExa", three registers (six words).
+ OImode : "Octa", four registers (eight words).
+ CImode : "dodeCa", six registers (twelve words).
+ XImode : "heXadeca", eight registers (sixteen words).
+*)
+
+type inttype = B_TImode | B_EImode | B_OImode | B_CImode | B_XImode
+
+type shape_elt = Dreg | Qreg | Corereg | Immed | VecArray of int * shape_elt
+ | PtrTo of shape_elt | CstPtrTo of shape_elt
+ (* These next ones are used only in the test generator. *)
+ | Element_of_dreg (* Used for "lane" variants. *)
+ | Element_of_qreg (* Likewise. *)
+ | All_elements_of_dreg (* Used for "dup" variants. *)
+
+type shape_form = All of int * shape_elt
+ | Long
+ | Long_noreg of shape_elt
+ | Wide
+ | Wide_noreg of shape_elt
+ | Narrow
+ | Long_imm
+ | Narrow_imm
+ | Binary_imm of shape_elt
+ | Use_operands of shape_elt array
+ | By_scalar of shape_elt
+ | Unary_scalar of shape_elt
+ | Wide_lane
+ | Wide_scalar
+ | Pair_result of shape_elt
+
+type arity = Arity0 of vectype
+ | Arity1 of vectype * vectype
+ | Arity2 of vectype * vectype * vectype
+ | Arity3 of vectype * vectype * vectype * vectype
+ | Arity4 of vectype * vectype * vectype * vectype * vectype
+
+type vecmode = V8QI | V4HI | V2SI | V2SF | DI
+ | V16QI | V8HI | V4SI | V4SF | V2DI
+ | QI | HI | SI | SF
+
+type opcode =
+ (* Binary ops. *)
+ Vadd
+ | Vmul
+ | Vmla
+ | Vmls
+ | Vsub
+ | Vceq
+ | Vcge
+ | Vcgt
+ | Vcle
+ | Vclt
+ | Vcage
+ | Vcagt
+ | Vcale
+ | Vcalt
+ | Vtst
+ | Vabd
+ | Vaba
+ | Vmax
+ | Vmin
+ | Vpadd
+ | Vpada
+ | Vpmax
+ | Vpmin
+ | Vrecps
+ | Vrsqrts
+ | Vshl
+ | Vshr_n
+ | Vshl_n
+ | Vsra_n
+ | Vsri
+ | Vsli
+ (* Logic binops. *)
+ | Vand
+ | Vorr
+ | Veor
+ | Vbic
+ | Vorn
+ | Vbsl
+ (* Ops with scalar. *)
+ | Vmul_lane
+ | Vmla_lane
+ | Vmls_lane
+ | Vmul_n
+ | Vmla_n
+ | Vmls_n
+ | Vmull_n
+ | Vmull_lane
+ | Vqdmull_n
+ | Vqdmull_lane
+ | Vqdmulh_n
+ | Vqdmulh_lane
+ (* Unary ops. *)
+ | Vabs
+ | Vneg
+ | Vcls
+ | Vclz
+ | Vcnt
+ | Vrecpe
+ | Vrsqrte
+ | Vmvn
+ (* Vector extract. *)
+ | Vext
+ (* Reverse elements. *)
+ | Vrev64
+ | Vrev32
+ | Vrev16
+ (* Transposition ops. *)
+ | Vtrn
+ | Vzip
+ | Vuzp
+ (* Loads and stores (VLD1/VST1/VLD2...), elements and structures. *)
+ | Vldx of int
+ | Vstx of int
+ | Vldx_lane of int
+ | Vldx_dup of int
+ | Vstx_lane of int
+ (* Set/extract lanes from a vector. *)
+ | Vget_lane
+ | Vset_lane
+ (* Initialise vector from bit pattern. *)
+ | Vcreate
+ (* Set all lanes to same value. *)
+ | Vdup_n
+ | Vmov_n (* Is this the same? *)
+ (* Duplicate scalar to all lanes of vector. *)
+ | Vdup_lane
+ (* Combine vectors. *)
+ | Vcombine
+ (* Get quadword high/low parts. *)
+ | Vget_high
+ | Vget_low
+ (* Convert vectors. *)
+ | Vcvt
+ | Vcvt_n
+ (* Narrow/lengthen vectors. *)
+ | Vmovn
+ | Vmovl
+ (* Table lookup. *)
+ | Vtbl of int
+ | Vtbx of int
+ (* Reinterpret casts. *)
+ | Vreinterp
+
+(* Features used for documentation, to distinguish between some instruction
+ variants, and to signal special requirements (e.g. swapping arguments). *)
+
+type features =
+ Halving
+ | Rounding
+ | Saturating
+ | Dst_unsign
+ | High_half
+ | Doubling
+ | Flipped of string (* Builtin name to use with flipped arguments. *)
+ | InfoWord (* Pass an extra word for signage/rounding etc. (always passed
+ for All _, Long, Wide, Narrow shape_forms. *)
+ | ReturnPtr (* Pass explicit pointer to return value as first argument. *)
+ (* A specification as to the shape of instruction expected upon
+ disassembly, used if it differs from the shape used to build the
+ intrinsic prototype. Multiple entries in the constructor's argument
+ indicate that the intrinsic expands to more than one assembly
+ instruction, each with a corresponding shape specified here. *)
+ | Disassembles_as of shape_form list
+ | Builtin_name of string (* Override the name of the builtin. *)
+ (* Override the name of the instruction. If more than one name
+ is specified, it means that the instruction can have any of those
+ names. *)
+ | Instruction_name of string list
+ (* Mark that the intrinsic yields no instructions, or expands to yield
+ behaviour that the test generator cannot test. *)
+ | No_op
+ (* Mark that the intrinsic has constant arguments that cannot be set
+ to the defaults (zero for pointers and one otherwise) in the test
+ cases. The function supplied must return the integer to be written
+ into the testcase for the argument number (0-based) supplied to it. *)
+ | Const_valuator of (int -> int)
+
+exception MixedMode of elts * elts
+
+let rec elt_width = function
+ S8 | U8 | P8 | I8 | B8 -> 8
+ | S16 | U16 | P16 | I16 | B16 -> 16
+ | S32 | F32 | U32 | I32 | B32 -> 32
+ | S64 | U64 | I64 | B64 -> 64
+ | Conv (a, b) ->
+ let wa = elt_width a and wb = elt_width b in
+ if wa = wb then wa else failwith "element width?"
+ | Cast (a, b) -> raise (MixedMode (a, b))
+ | NoElts -> failwith "No elts"
+
+let rec elt_class = function
+ S8 | S16 | S32 | S64 -> Signed
+ | U8 | U16 | U32 | U64 -> Unsigned
+ | P8 | P16 -> Poly
+ | F32 -> Float
+ | I8 | I16 | I32 | I64 -> Int
+ | B8 | B16 | B32 | B64 -> Bits
+ | Conv (a, b) | Cast (a, b) -> ConvClass (elt_class a, elt_class b)
+ | NoElts -> NoType
+
+let elt_of_class_width c w =
+ match c, w with
+ Signed, 8 -> S8
+ | Signed, 16 -> S16
+ | Signed, 32 -> S32
+ | Signed, 64 -> S64
+ | Float, 32 -> F32
+ | Unsigned, 8 -> U8
+ | Unsigned, 16 -> U16
+ | Unsigned, 32 -> U32
+ | Unsigned, 64 -> U64
+ | Poly, 8 -> P8
+ | Poly, 16 -> P16
+ | Int, 8 -> I8
+ | Int, 16 -> I16
+ | Int, 32 -> I32
+ | Int, 64 -> I64
+ | Bits, 8 -> B8
+ | Bits, 16 -> B16
+ | Bits, 32 -> B32
+ | Bits, 64 -> B64
+ | _ -> failwith "Bad element type"
+
+(* Return unsigned integer element the same width as argument. *)
+let unsigned_of_elt elt =
+ elt_of_class_width Unsigned (elt_width elt)
+
+let signed_of_elt elt =
+ elt_of_class_width Signed (elt_width elt)
+
+(* Return untyped bits element the same width as argument. *)
+let bits_of_elt elt =
+ elt_of_class_width Bits (elt_width elt)
+
+let non_signed_variant = function
+ S8 -> I8
+ | S16 -> I16
+ | S32 -> I32
+ | S64 -> I64
+ | U8 -> I8
+ | U16 -> I16
+ | U32 -> I32
+ | U64 -> I64
+ | x -> x
+
+let poly_unsigned_variant v =
+ let elclass = match elt_class v with
+ Poly -> Unsigned
+ | x -> x in
+ elt_of_class_width elclass (elt_width v)
+
+let widen_elt elt =
+ let w = elt_width elt
+ and c = elt_class elt in
+ elt_of_class_width c (w * 2)
+
+let narrow_elt elt =
+ let w = elt_width elt
+ and c = elt_class elt in
+ elt_of_class_width c (w / 2)
+
+(* If we're trying to find a mode from a "Use_operands" instruction, use the
+ last vector operand as the dominant mode used to invoke the correct builtin.
+ We must stick to this rule in neon.md. *)
+let find_key_operand operands =
+ let rec scan opno =
+ match operands.(opno) with
+ Qreg -> Qreg
+ | Dreg -> Dreg
+ | VecArray (_, Qreg) -> Qreg
+ | VecArray (_, Dreg) -> Dreg
+ | _ -> scan (opno-1)
+ in
+ scan ((Array.length operands) - 1)
+
+let rec mode_of_elt elt shape =
+ let flt = match elt_class elt with
+ Float | ConvClass(_, Float) -> true | _ -> false in
+ let idx =
+ match elt_width elt with
+ 8 -> 0 | 16 -> 1 | 32 -> 2 | 64 -> 3
+ | _ -> failwith "Bad element width"
+ in match shape with
+ All (_, Dreg) | By_scalar Dreg | Pair_result Dreg | Unary_scalar Dreg
+ | Binary_imm Dreg | Long_noreg Dreg | Wide_noreg Dreg ->
+ [| V8QI; V4HI; if flt then V2SF else V2SI; DI |].(idx)
+ | All (_, Qreg) | By_scalar Qreg | Pair_result Qreg | Unary_scalar Qreg
+ | Binary_imm Qreg | Long_noreg Qreg | Wide_noreg Qreg ->
+ [| V16QI; V8HI; if flt then V4SF else V4SI; V2DI |].(idx)
+ | All (_, (Corereg | PtrTo _ | CstPtrTo _)) ->
+ [| QI; HI; if flt then SF else SI; DI |].(idx)
+ | Long | Wide | Wide_lane | Wide_scalar
+ | Long_imm ->
+ [| V8QI; V4HI; V2SI; DI |].(idx)
+ | Narrow | Narrow_imm -> [| V16QI; V8HI; V4SI; V2DI |].(idx)
+ | Use_operands ops -> mode_of_elt elt (All (0, (find_key_operand ops)))
+ | _ -> failwith "invalid shape"
+
+(* Modify an element type dependent on the shape of the instruction and the
+ operand number. *)
+
+let shapemap shape no =
+ let ident = fun x -> x in
+ match shape with
+ All _ | Use_operands _ | By_scalar _ | Pair_result _ | Unary_scalar _
+ | Binary_imm _ -> ident
+ | Long | Long_noreg _ | Wide_scalar | Long_imm ->
+ [| widen_elt; ident; ident |].(no)
+ | Wide | Wide_noreg _ -> [| widen_elt; widen_elt; ident |].(no)
+ | Wide_lane -> [| widen_elt; ident; ident; ident |].(no)
+ | Narrow | Narrow_imm -> [| narrow_elt; ident; ident |].(no)
+
+(* Register type (D/Q) of an operand, based on shape and operand number. *)
+
+let regmap shape no =
+ match shape with
+ All (_, reg) | Long_noreg reg | Wide_noreg reg -> reg
+ | Long -> [| Qreg; Dreg; Dreg |].(no)
+ | Wide -> [| Qreg; Qreg; Dreg |].(no)
+ | Narrow -> [| Dreg; Qreg; Qreg |].(no)
+ | Wide_lane -> [| Qreg; Dreg; Dreg; Immed |].(no)
+ | Wide_scalar -> [| Qreg; Dreg; Corereg |].(no)
+ | By_scalar reg -> [| reg; reg; Dreg; Immed |].(no)
+ | Unary_scalar reg -> [| reg; Dreg; Immed |].(no)
+ | Pair_result reg -> [| VecArray (2, reg); reg; reg |].(no)
+ | Binary_imm reg -> [| reg; reg; Immed |].(no)
+ | Long_imm -> [| Qreg; Dreg; Immed |].(no)
+ | Narrow_imm -> [| Dreg; Qreg; Immed |].(no)
+ | Use_operands these -> these.(no)
+
+let type_for_elt shape elt no =
+ let elt = (shapemap shape no) elt in
+ let reg = regmap shape no in
+ let rec type_for_reg_elt reg elt =
+ match reg with
+ Dreg ->
+ begin match elt with
+ S8 -> T_int8x8
+ | S16 -> T_int16x4
+ | S32 -> T_int32x2
+ | S64 -> T_int64x1
+ | U8 -> T_uint8x8
+ | U16 -> T_uint16x4
+ | U32 -> T_uint32x2
+ | U64 -> T_uint64x1
+ | F32 -> T_float32x2
+ | P8 -> T_poly8x8
+ | P16 -> T_poly16x4
+ | _ -> failwith "Bad elt type"
+ end
+ | Qreg ->
+ begin match elt with
+ S8 -> T_int8x16
+ | S16 -> T_int16x8
+ | S32 -> T_int32x4
+ | S64 -> T_int64x2
+ | U8 -> T_uint8x16
+ | U16 -> T_uint16x8
+ | U32 -> T_uint32x4
+ | U64 -> T_uint64x2
+ | F32 -> T_float32x4
+ | P8 -> T_poly8x16
+ | P16 -> T_poly16x8
+ | _ -> failwith "Bad elt type"
+ end
+ | Corereg ->
+ begin match elt with
+ S8 -> T_int8
+ | S16 -> T_int16
+ | S32 -> T_int32
+ | S64 -> T_int64
+ | U8 -> T_uint8
+ | U16 -> T_uint16
+ | U32 -> T_uint32
+ | U64 -> T_uint64
+ | P8 -> T_poly8
+ | P16 -> T_poly16
+ | F32 -> T_float32
+ | _ -> failwith "Bad elt type"
+ end
+ | Immed ->
+ T_immediate (0, 0)
+ | VecArray (num, sub) ->
+ T_arrayof (num, type_for_reg_elt sub elt)
+ | PtrTo x ->
+ T_ptrto (type_for_reg_elt x elt)
+ | CstPtrTo x ->
+ T_ptrto (T_const (type_for_reg_elt x elt))
+ (* Anything else is solely for the use of the test generator. *)
+ | _ -> assert false
+ in
+ type_for_reg_elt reg elt
+
+(* Return size of a vector type, in bits. *)
+let vectype_size = function
+ T_int8x8 | T_int16x4 | T_int32x2 | T_int64x1
+ | T_uint8x8 | T_uint16x4 | T_uint32x2 | T_uint64x1
+ | T_float32x2 | T_poly8x8 | T_poly16x4 -> 64
+ | T_int8x16 | T_int16x8 | T_int32x4 | T_int64x2
+ | T_uint8x16 | T_uint16x8 | T_uint32x4 | T_uint64x2
+ | T_float32x4 | T_poly8x16 | T_poly16x8 -> 128
+ | _ -> raise Not_found
+
+let inttype_for_array num elttype =
+ let eltsize = vectype_size elttype in
+ let numwords = (num * eltsize) / 32 in
+ match numwords with
+ 4 -> B_TImode
+ | 6 -> B_EImode
+ | 8 -> B_OImode
+ | 12 -> B_CImode
+ | 16 -> B_XImode
+ | _ -> failwith ("no int type for size " ^ string_of_int numwords)
+
+(* These functions return pairs of (internal, external) types, where "internal"
+ types are those seen by GCC, and "external" are those seen by the assembler.
+ These types aren't necessarily the same, since the intrinsics can munge more
+ than one C type into each assembler opcode. *)
+
+let make_sign_invariant func shape elt =
+ let arity, elt' = func shape elt in
+ arity, non_signed_variant elt'
+
+(* Don't restrict any types. *)
+
+let elts_same make_arity shape elt =
+ let vtype = type_for_elt shape elt in
+ make_arity vtype, elt
+
+(* As sign_invar_*, but when sign matters. *)
+let elts_same_io_lane =
+ elts_same (fun vtype -> Arity4 (vtype 0, vtype 0, vtype 1, vtype 2, vtype 3))
+
+let elts_same_io =
+ elts_same (fun vtype -> Arity3 (vtype 0, vtype 0, vtype 1, vtype 2))
+
+let elts_same_2_lane =
+ elts_same (fun vtype -> Arity3 (vtype 0, vtype 1, vtype 2, vtype 3))
+
+let elts_same_3 = elts_same_2_lane
+
+let elts_same_2 =
+ elts_same (fun vtype -> Arity2 (vtype 0, vtype 1, vtype 2))
+
+let elts_same_1 =
+ elts_same (fun vtype -> Arity1 (vtype 0, vtype 1))
+
+(* Use for signed/unsigned invariant operations (i.e. where the operation
+ doesn't depend on the sign of the data. *)
+
+let sign_invar_io_lane = make_sign_invariant elts_same_io_lane
+let sign_invar_io = make_sign_invariant elts_same_io
+let sign_invar_2_lane = make_sign_invariant elts_same_2_lane
+let sign_invar_2 = make_sign_invariant elts_same_2
+let sign_invar_1 = make_sign_invariant elts_same_1
+
+(* Sign-sensitive comparison. *)
+
+let cmp_sign_matters shape elt =
+ let vtype = type_for_elt shape elt
+ and rtype = type_for_elt shape (unsigned_of_elt elt) 0 in
+ Arity2 (rtype, vtype 1, vtype 2), elt
+
+(* Signed/unsigned invariant comparison. *)
+
+let cmp_sign_invar shape elt =
+ let shape', elt' = cmp_sign_matters shape elt in
+ let elt'' =
+ match non_signed_variant elt' with
+ P8 -> I8
+ | x -> x
+ in
+ shape', elt''
+
+(* Comparison (VTST) where only the element width matters. *)
+
+let cmp_bits shape elt =
+ let vtype = type_for_elt shape elt
+ and rtype = type_for_elt shape (unsigned_of_elt elt) 0
+ and bits_only = bits_of_elt elt in
+ Arity2 (rtype, vtype 1, vtype 2), bits_only
+
+let reg_shift shape elt =
+ let vtype = type_for_elt shape elt
+ and op2type = type_for_elt shape (signed_of_elt elt) 2 in
+ Arity2 (vtype 0, vtype 1, op2type), elt
+
+(* Genericised constant-shift type-generating function. *)
+
+let const_shift mkimm ?arity ?result shape elt =
+ let op2type = (shapemap shape 2) elt in
+ let op2width = elt_width op2type in
+ let op2 = mkimm op2width
+ and op1 = type_for_elt shape elt 1
+ and r_elt =
+ match result with
+ None -> elt
+ | Some restriction -> restriction elt in
+ let rtype = type_for_elt shape r_elt 0 in
+ match arity with
+ None -> Arity2 (rtype, op1, op2), elt
+ | Some mkarity -> mkarity rtype op1 op2, elt
+
+(* Use for immediate right-shifts. *)
+
+let shift_right shape elt =
+ const_shift (fun imm -> T_immediate (1, imm)) shape elt
+
+let shift_right_acc shape elt =
+ const_shift (fun imm -> T_immediate (1, imm))
+ ~arity:(fun dst op1 op2 -> Arity3 (dst, dst, op1, op2)) shape elt
+
+(* Use for immediate right-shifts when the operation doesn't care about
+ signedness. *)
+
+let shift_right_sign_invar =
+ make_sign_invariant shift_right
+
+(* Immediate right-shift; result is unsigned even when operand is signed. *)
+
+let shift_right_to_uns shape elt =
+ const_shift (fun imm -> T_immediate (1, imm)) ~result:unsigned_of_elt
+ shape elt
+
+(* Immediate left-shift. *)
+
+let shift_left shape elt =
+ const_shift (fun imm -> T_immediate (0, imm - 1)) shape elt
+
+(* Immediate left-shift, unsigned result. *)
+
+let shift_left_to_uns shape elt =
+ const_shift (fun imm -> T_immediate (0, imm - 1)) ~result:unsigned_of_elt
+ shape elt
+
+(* Immediate left-shift, don't care about signs. *)
+
+let shift_left_sign_invar =
+ make_sign_invariant shift_left
+
+(* Shift left/right and insert: only element size matters. *)
+
+let shift_insert shape elt =
+ let arity, elt =
+ const_shift (fun imm -> T_immediate (1, imm))
+ ~arity:(fun dst op1 op2 -> Arity3 (dst, dst, op1, op2)) shape elt in
+ arity, bits_of_elt elt
+
+(* Get/set lane. *)
+
+let get_lane shape elt =
+ let vtype = type_for_elt shape elt in
+ Arity2 (vtype 0, vtype 1, vtype 2),
+ (match elt with P8 -> U8 | P16 -> U16 | x -> x)
+
+let set_lane shape elt =
+ let vtype = type_for_elt shape elt in
+ Arity3 (vtype 0, vtype 1, vtype 2, vtype 3), bits_of_elt elt
+
+let set_lane_notype shape elt =
+ let vtype = type_for_elt shape elt in
+ Arity3 (vtype 0, vtype 1, vtype 2, vtype 3), NoElts
+
+let create_vector shape elt =
+ let vtype = type_for_elt shape U64 1
+ and rtype = type_for_elt shape elt 0 in
+ Arity1 (rtype, vtype), elt
+
+let conv make_arity shape elt =
+ let edest, esrc = match elt with
+ Conv (edest, esrc) | Cast (edest, esrc) -> edest, esrc
+ | _ -> failwith "Non-conversion element in conversion" in
+ let vtype = type_for_elt shape esrc
+ and rtype = type_for_elt shape edest 0 in
+ make_arity rtype vtype, elt
+
+let conv_1 = conv (fun rtype vtype -> Arity1 (rtype, vtype 1))
+let conv_2 = conv (fun rtype vtype -> Arity2 (rtype, vtype 1, vtype 2))
+
+(* Operation has an unsigned result even if operands are signed. *)
+
+let dst_unsign make_arity shape elt =
+ let vtype = type_for_elt shape elt
+ and rtype = type_for_elt shape (unsigned_of_elt elt) 0 in
+ make_arity rtype vtype, elt
+
+let dst_unsign_1 = dst_unsign (fun rtype vtype -> Arity1 (rtype, vtype 1))
+
+let make_bits_only func shape elt =
+ let arity, elt' = func shape elt in
+ arity, bits_of_elt elt'
+
+(* Extend operation. *)
+
+let extend shape elt =
+ let vtype = type_for_elt shape elt in
+ Arity3 (vtype 0, vtype 1, vtype 2, vtype 3), bits_of_elt elt
+
+(* Table look-up operations. Operand 2 is signed/unsigned for signed/unsigned
+ integer ops respectively, or unsigned for polynomial ops. *)
+
+let table mkarity shape elt =
+ let vtype = type_for_elt shape elt in
+ let op2 = type_for_elt shape (poly_unsigned_variant elt) 2 in
+ mkarity vtype op2, bits_of_elt elt
+
+let table_2 = table (fun vtype op2 -> Arity2 (vtype 0, vtype 1, op2))
+let table_io = table (fun vtype op2 -> Arity3 (vtype 0, vtype 0, vtype 1, op2))
+
+(* Operations where only bits matter. *)
+
+let bits_1 = make_bits_only elts_same_1
+let bits_2 = make_bits_only elts_same_2
+let bits_3 = make_bits_only elts_same_3
+
+(* Store insns. *)
+let store_1 shape elt =
+ let vtype = type_for_elt shape elt in
+ Arity2 (T_void, vtype 0, vtype 1), bits_of_elt elt
+
+let store_3 shape elt =
+ let vtype = type_for_elt shape elt in
+ Arity3 (T_void, vtype 0, vtype 1, vtype 2), bits_of_elt elt
+
+let make_notype func shape elt =
+ let arity, _ = func shape elt in
+ arity, NoElts
+
+let notype_1 = make_notype elts_same_1
+let notype_2 = make_notype elts_same_2
+let notype_3 = make_notype elts_same_3
+
+(* Bit-select operations (first operand is unsigned int). *)
+
+let bit_select shape elt =
+ let vtype = type_for_elt shape elt
+ and itype = type_for_elt shape (unsigned_of_elt elt) in
+ Arity3 (vtype 0, itype 1, vtype 2, vtype 3), NoElts
+
+(* Common lists of supported element types. *)
+
+let su_8_32 = [S8; S16; S32; U8; U16; U32]
+let su_8_64 = S64 :: U64 :: su_8_32
+let su_16_64 = [S16; S32; S64; U16; U32; U64]
+let pf_su_8_32 = P8 :: P16 :: F32 :: su_8_32
+let pf_su_8_64 = P8 :: P16 :: F32 :: su_8_64
+
+let ops =
+ [
+ (* Addition. *)
+ Vadd, [], All (3, Dreg), "vadd", sign_invar_2, F32 :: su_8_64;
+ Vadd, [], All (3, Qreg), "vaddQ", sign_invar_2, F32 :: su_8_64;
+ Vadd, [], Long, "vaddl", elts_same_2, su_8_32;
+ Vadd, [], Wide, "vaddw", elts_same_2, su_8_32;
+ Vadd, [Halving], All (3, Dreg), "vhadd", elts_same_2, su_8_32;
+ Vadd, [Halving], All (3, Qreg), "vhaddQ", elts_same_2, su_8_32;
+ Vadd, [Instruction_name ["vrhadd"]; Rounding; Halving],
+ All (3, Dreg), "vRhadd", elts_same_2, su_8_32;
+ Vadd, [Instruction_name ["vrhadd"]; Rounding; Halving],
+ All (3, Qreg), "vRhaddQ", elts_same_2, su_8_32;
+ Vadd, [Saturating], All (3, Dreg), "vqadd", elts_same_2, su_8_64;
+ Vadd, [Saturating], All (3, Qreg), "vqaddQ", elts_same_2, su_8_64;
+ Vadd, [High_half], Narrow, "vaddhn", sign_invar_2, su_16_64;
+ Vadd, [Instruction_name ["vraddhn"]; Rounding; High_half],
+ Narrow, "vRaddhn", sign_invar_2, su_16_64;
+
+ (* Multiplication. *)
+ Vmul, [], All (3, Dreg), "vmul", sign_invar_2, P8 :: F32 :: su_8_32;
+ Vmul, [], All (3, Qreg), "vmulQ", sign_invar_2, P8 :: F32 :: su_8_32;
+ Vmul, [Saturating; Doubling; High_half], All (3, Dreg), "vqdmulh",
+ elts_same_2, [S16; S32];
+ Vmul, [Saturating; Doubling; High_half], All (3, Qreg), "vqdmulhQ",
+ elts_same_2, [S16; S32];
+ Vmul,
+ [Saturating; Rounding; Doubling; High_half;
+ Instruction_name ["vqrdmulh"]],
+ All (3, Dreg), "vqRdmulh",
+ elts_same_2, [S16; S32];
+ Vmul,
+ [Saturating; Rounding; Doubling; High_half;
+ Instruction_name ["vqrdmulh"]],
+ All (3, Qreg), "vqRdmulhQ",
+ elts_same_2, [S16; S32];
+ Vmul, [], Long, "vmull", elts_same_2, P8 :: su_8_32;
+ Vmul, [Saturating; Doubling], Long, "vqdmull", elts_same_2, [S16; S32];
+
+ (* Multiply-accumulate. *)
+ Vmla, [], All (3, Dreg), "vmla", sign_invar_io, F32 :: su_8_32;
+ Vmla, [], All (3, Qreg), "vmlaQ", sign_invar_io, F32 :: su_8_32;
+ Vmla, [], Long, "vmlal", elts_same_io, su_8_32;
+ Vmla, [Saturating; Doubling], Long, "vqdmlal", elts_same_io, [S16; S32];
+
+ (* Multiply-subtract. *)
+ Vmls, [], All (3, Dreg), "vmls", sign_invar_io, F32 :: su_8_32;
+ Vmls, [], All (3, Qreg), "vmlsQ", sign_invar_io, F32 :: su_8_32;
+ Vmls, [], Long, "vmlsl", elts_same_io, su_8_32;
+ Vmls, [Saturating; Doubling], Long, "vqdmlsl", elts_same_io, [S16; S32];
+
+ (* Subtraction. *)
+ Vsub, [], All (3, Dreg), "vsub", sign_invar_2, F32 :: su_8_64;
+ Vsub, [], All (3, Qreg), "vsubQ", sign_invar_2, F32 :: su_8_64;
+ Vsub, [], Long, "vsubl", elts_same_2, su_8_32;
+ Vsub, [], Wide, "vsubw", elts_same_2, su_8_32;
+ Vsub, [Halving], All (3, Dreg), "vhsub", elts_same_2, su_8_32;
+ Vsub, [Halving], All (3, Qreg), "vhsubQ", elts_same_2, su_8_32;
+ Vsub, [Saturating], All (3, Dreg), "vqsub", elts_same_2, su_8_64;
+ Vsub, [Saturating], All (3, Qreg), "vqsubQ", elts_same_2, su_8_64;
+ Vsub, [High_half], Narrow, "vsubhn", sign_invar_2, su_16_64;
+ Vsub, [Instruction_name ["vrsubhn"]; Rounding; High_half],
+ Narrow, "vRsubhn", sign_invar_2, su_16_64;
+
+ (* Comparison, equal. *)
+ Vceq, [], All (3, Dreg), "vceq", cmp_sign_invar, P8 :: F32 :: su_8_32;
+ Vceq, [], All (3, Qreg), "vceqQ", cmp_sign_invar, P8 :: F32 :: su_8_32;
+
+ (* Comparison, greater-than or equal. *)
+ Vcge, [], All (3, Dreg), "vcge", cmp_sign_matters, F32 :: su_8_32;
+ Vcge, [], All (3, Qreg), "vcgeQ", cmp_sign_matters, F32 :: su_8_32;
+
+ (* Comparison, less-than or equal. *)
+ Vcle, [Flipped "vcge"], All (3, Dreg), "vcle", cmp_sign_matters,
+ F32 :: su_8_32;
+ Vcle, [Instruction_name ["vcge"]; Flipped "vcgeQ"],
+ All (3, Qreg), "vcleQ", cmp_sign_matters,
+ F32 :: su_8_32;
+
+ (* Comparison, greater-than. *)
+ Vcgt, [], All (3, Dreg), "vcgt", cmp_sign_matters, F32 :: su_8_32;
+ Vcgt, [], All (3, Qreg), "vcgtQ", cmp_sign_matters, F32 :: su_8_32;
+
+ (* Comparison, less-than. *)
+ Vclt, [Flipped "vcgt"], All (3, Dreg), "vclt", cmp_sign_matters,
+ F32 :: su_8_32;
+ Vclt, [Instruction_name ["vcgt"]; Flipped "vcgtQ"],
+ All (3, Qreg), "vcltQ", cmp_sign_matters,
+ F32 :: su_8_32;
+
+ (* Compare absolute greater-than or equal. *)
+ Vcage, [Instruction_name ["vacge"]],
+ All (3, Dreg), "vcage", cmp_sign_matters, [F32];
+ Vcage, [Instruction_name ["vacge"]],
+ All (3, Qreg), "vcageQ", cmp_sign_matters, [F32];
+
+ (* Compare absolute less-than or equal. *)
+ Vcale, [Instruction_name ["vacge"]; Flipped "vcage"],
+ All (3, Dreg), "vcale", cmp_sign_matters, [F32];
+ Vcale, [Instruction_name ["vacge"]; Flipped "vcageQ"],
+ All (3, Qreg), "vcaleQ", cmp_sign_matters, [F32];
+
+ (* Compare absolute greater-than or equal. *)
+ Vcagt, [Instruction_name ["vacgt"]],
+ All (3, Dreg), "vcagt", cmp_sign_matters, [F32];
+ Vcagt, [Instruction_name ["vacgt"]],
+ All (3, Qreg), "vcagtQ", cmp_sign_matters, [F32];
+
+ (* Compare absolute less-than or equal. *)
+ Vcalt, [Instruction_name ["vacgt"]; Flipped "vcagt"],
+ All (3, Dreg), "vcalt", cmp_sign_matters, [F32];
+ Vcalt, [Instruction_name ["vacgt"]; Flipped "vcagtQ"],
+ All (3, Qreg), "vcaltQ", cmp_sign_matters, [F32];
+
+ (* Test bits. *)
+ Vtst, [], All (3, Dreg), "vtst", cmp_bits, P8 :: su_8_32;
+ Vtst, [], All (3, Qreg), "vtstQ", cmp_bits, P8 :: su_8_32;
+
+ (* Absolute difference. *)
+ Vabd, [], All (3, Dreg), "vabd", elts_same_2, F32 :: su_8_32;
+ Vabd, [], All (3, Qreg), "vabdQ", elts_same_2, F32 :: su_8_32;
+ Vabd, [], Long, "vabdl", elts_same_2, su_8_32;
+
+ (* Absolute difference and accumulate. *)
+ Vaba, [], All (3, Dreg), "vaba", elts_same_io, su_8_32;
+ Vaba, [], All (3, Qreg), "vabaQ", elts_same_io, su_8_32;
+ Vaba, [], Long, "vabal", elts_same_io, su_8_32;
+
+ (* Max. *)
+ Vmax, [], All (3, Dreg), "vmax", elts_same_2, F32 :: su_8_32;
+ Vmax, [], All (3, Qreg), "vmaxQ", elts_same_2, F32 :: su_8_32;
+
+ (* Min. *)
+ Vmin, [], All (3, Dreg), "vmin", elts_same_2, F32 :: su_8_32;
+ Vmin, [], All (3, Qreg), "vminQ", elts_same_2, F32 :: su_8_32;
+
+ (* Pairwise add. *)
+ Vpadd, [], All (3, Dreg), "vpadd", sign_invar_2, F32 :: su_8_32;
+ Vpadd, [], Long_noreg Dreg, "vpaddl", elts_same_1, su_8_32;
+ Vpadd, [], Long_noreg Qreg, "vpaddlQ", elts_same_1, su_8_32;
+
+ (* Pairwise add, widen and accumulate. *)
+ Vpada, [], Wide_noreg Dreg, "vpadal", elts_same_2, su_8_32;
+ Vpada, [], Wide_noreg Qreg, "vpadalQ", elts_same_2, su_8_32;
+
+ (* Folding maximum, minimum. *)
+ Vpmax, [], All (3, Dreg), "vpmax", elts_same_2, F32 :: su_8_32;
+ Vpmin, [], All (3, Dreg), "vpmin", elts_same_2, F32 :: su_8_32;
+
+ (* Reciprocal step. *)
+ Vrecps, [], All (3, Dreg), "vrecps", elts_same_2, [F32];
+ Vrecps, [], All (3, Qreg), "vrecpsQ", elts_same_2, [F32];
+ Vrsqrts, [], All (3, Dreg), "vrsqrts", elts_same_2, [F32];
+ Vrsqrts, [], All (3, Qreg), "vrsqrtsQ", elts_same_2, [F32];
+
+ (* Vector shift left. *)
+ Vshl, [], All (3, Dreg), "vshl", reg_shift, su_8_64;
+ Vshl, [], All (3, Qreg), "vshlQ", reg_shift, su_8_64;
+ Vshl, [Instruction_name ["vrshl"]; Rounding],
+ All (3, Dreg), "vRshl", reg_shift, su_8_64;
+ Vshl, [Instruction_name ["vrshl"]; Rounding],
+ All (3, Qreg), "vRshlQ", reg_shift, su_8_64;
+ Vshl, [Saturating], All (3, Dreg), "vqshl", reg_shift, su_8_64;
+ Vshl, [Saturating], All (3, Qreg), "vqshlQ", reg_shift, su_8_64;
+ Vshl, [Instruction_name ["vqrshl"]; Saturating; Rounding],
+ All (3, Dreg), "vqRshl", reg_shift, su_8_64;
+ Vshl, [Instruction_name ["vqrshl"]; Saturating; Rounding],
+ All (3, Qreg), "vqRshlQ", reg_shift, su_8_64;
+
+ (* Vector shift right by constant. *)
+ Vshr_n, [], Binary_imm Dreg, "vshr_n", shift_right, su_8_64;
+ Vshr_n, [], Binary_imm Qreg, "vshrQ_n", shift_right, su_8_64;
+ Vshr_n, [Instruction_name ["vrshr"]; Rounding], Binary_imm Dreg,
+ "vRshr_n", shift_right, su_8_64;
+ Vshr_n, [Instruction_name ["vrshr"]; Rounding], Binary_imm Qreg,
+ "vRshrQ_n", shift_right, su_8_64;
+ Vshr_n, [], Narrow_imm, "vshrn_n", shift_right_sign_invar, su_16_64;
+ Vshr_n, [Instruction_name ["vrshrn"]; Rounding], Narrow_imm, "vRshrn_n",
+ shift_right_sign_invar, su_16_64;
+ Vshr_n, [Saturating], Narrow_imm, "vqshrn_n", shift_right, su_16_64;
+ Vshr_n, [Instruction_name ["vqrshrn"]; Saturating; Rounding], Narrow_imm,
+ "vqRshrn_n", shift_right, su_16_64;
+ Vshr_n, [Saturating; Dst_unsign], Narrow_imm, "vqshrun_n",
+ shift_right_to_uns, [S16; S32; S64];
+ Vshr_n, [Instruction_name ["vqrshrun"]; Saturating; Dst_unsign; Rounding],
+ Narrow_imm, "vqRshrun_n", shift_right_to_uns, [S16; S32; S64];
+
+ (* Vector shift left by constant. *)
+ Vshl_n, [], Binary_imm Dreg, "vshl_n", shift_left_sign_invar, su_8_64;
+ Vshl_n, [], Binary_imm Qreg, "vshlQ_n", shift_left_sign_invar, su_8_64;
+ Vshl_n, [Saturating], Binary_imm Dreg, "vqshl_n", shift_left, su_8_64;
+ Vshl_n, [Saturating], Binary_imm Qreg, "vqshlQ_n", shift_left, su_8_64;
+ Vshl_n, [Saturating; Dst_unsign], Binary_imm Dreg, "vqshlu_n",
+ shift_left_to_uns, [S8; S16; S32; S64];
+ Vshl_n, [Saturating; Dst_unsign], Binary_imm Qreg, "vqshluQ_n",
+ shift_left_to_uns, [S8; S16; S32; S64];
+ Vshl_n, [], Long_imm, "vshll_n", shift_left, su_8_32;
+
+ (* Vector shift right by constant and accumulate. *)
+ Vsra_n, [], Binary_imm Dreg, "vsra_n", shift_right_acc, su_8_64;
+ Vsra_n, [], Binary_imm Qreg, "vsraQ_n", shift_right_acc, su_8_64;
+ Vsra_n, [Instruction_name ["vrsra"]; Rounding], Binary_imm Dreg,
+ "vRsra_n", shift_right_acc, su_8_64;
+ Vsra_n, [Instruction_name ["vrsra"]; Rounding], Binary_imm Qreg,
+ "vRsraQ_n", shift_right_acc, su_8_64;
+
+ (* Vector shift right and insert. *)
+ Vsri, [], Use_operands [| Dreg; Dreg; Immed |], "vsri_n", shift_insert,
+ P8 :: P16 :: su_8_64;
+ Vsri, [], Use_operands [| Qreg; Qreg; Immed |], "vsriQ_n", shift_insert,
+ P8 :: P16 :: su_8_64;
+
+ (* Vector shift left and insert. *)
+ Vsli, [], Use_operands [| Dreg; Dreg; Immed |], "vsli_n", shift_insert,
+ P8 :: P16 :: su_8_64;
+ Vsli, [], Use_operands [| Qreg; Qreg; Immed |], "vsliQ_n", shift_insert,
+ P8 :: P16 :: su_8_64;
+
+ (* Absolute value. *)
+ Vabs, [], All (2, Dreg), "vabs", elts_same_1, [S8; S16; S32; F32];
+ Vabs, [], All (2, Qreg), "vabsQ", elts_same_1, [S8; S16; S32; F32];
+ Vabs, [Saturating], All (2, Dreg), "vqabs", elts_same_1, [S8; S16; S32];
+ Vabs, [Saturating], All (2, Qreg), "vqabsQ", elts_same_1, [S8; S16; S32];
+
+ (* Negate. *)
+ Vneg, [], All (2, Dreg), "vneg", elts_same_1, [S8; S16; S32; F32];
+ Vneg, [], All (2, Qreg), "vnegQ", elts_same_1, [S8; S16; S32; F32];
+ Vneg, [Saturating], All (2, Dreg), "vqneg", elts_same_1, [S8; S16; S32];
+ Vneg, [Saturating], All (2, Qreg), "vqnegQ", elts_same_1, [S8; S16; S32];
+
+ (* Bitwise not. *)
+ Vmvn, [], All (2, Dreg), "vmvn", notype_1, P8 :: su_8_32;
+ Vmvn, [], All (2, Qreg), "vmvnQ", notype_1, P8 :: su_8_32;
+
+ (* Count leading sign bits. *)
+ Vcls, [], All (2, Dreg), "vcls", elts_same_1, [S8; S16; S32];
+ Vcls, [], All (2, Qreg), "vclsQ", elts_same_1, [S8; S16; S32];
+
+ (* Count leading zeros. *)
+ Vclz, [], All (2, Dreg), "vclz", sign_invar_1, su_8_32;
+ Vclz, [], All (2, Qreg), "vclzQ", sign_invar_1, su_8_32;
+
+ (* Count number of set bits. *)
+ Vcnt, [], All (2, Dreg), "vcnt", bits_1, [P8; S8; U8];
+ Vcnt, [], All (2, Qreg), "vcntQ", bits_1, [P8; S8; U8];
+
+ (* Reciprocal estimate. *)
+ Vrecpe, [], All (2, Dreg), "vrecpe", elts_same_1, [U32; F32];
+ Vrecpe, [], All (2, Qreg), "vrecpeQ", elts_same_1, [U32; F32];
+
+ (* Reciprocal square-root estimate. *)
+ Vrsqrte, [], All (2, Dreg), "vrsqrte", elts_same_1, [U32; F32];
+ Vrsqrte, [], All (2, Qreg), "vrsqrteQ", elts_same_1, [U32; F32];
+
+ (* Get lanes from a vector. *)
+ Vget_lane,
+ [InfoWord; Disassembles_as [Use_operands [| Corereg; Element_of_dreg |]];
+ Instruction_name ["vmov"]],
+ Use_operands [| Corereg; Dreg; Immed |],
+ "vget_lane", get_lane, pf_su_8_32;
+ Vget_lane,
+ [InfoWord;
+ Disassembles_as [Use_operands [| Corereg; Corereg; Dreg |]];
+ Instruction_name ["vmov"]; Const_valuator (fun _ -> 0)],
+ Use_operands [| Corereg; Dreg; Immed |],
+ "vget_lane", notype_2, [S64; U64];
+ Vget_lane,
+ [InfoWord; Disassembles_as [Use_operands [| Corereg; Element_of_dreg |]];
+ Instruction_name ["vmov"]],
+ Use_operands [| Corereg; Qreg; Immed |],
+ "vgetQ_lane", get_lane, pf_su_8_32;
+ Vget_lane,
+ [InfoWord;
+ Disassembles_as [Use_operands [| Corereg; Corereg; Dreg |]];
+ Instruction_name ["vmov"]; Const_valuator (fun _ -> 0)],
+ Use_operands [| Corereg; Qreg; Immed |],
+ "vgetQ_lane", notype_2, [S64; U64];
+
+ (* Set lanes in a vector. *)
+ Vset_lane, [Disassembles_as [Use_operands [| Element_of_dreg; Corereg |]];
+ Instruction_name ["vmov"]],
+ Use_operands [| Dreg; Corereg; Dreg; Immed |], "vset_lane",
+ set_lane, pf_su_8_32;
+ Vset_lane, [Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]];
+ Instruction_name ["vmov"]; Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; Corereg; Dreg; Immed |], "vset_lane",
+ set_lane_notype, [S64; U64];
+ Vset_lane, [Disassembles_as [Use_operands [| Element_of_dreg; Corereg |]];
+ Instruction_name ["vmov"]],
+ Use_operands [| Qreg; Corereg; Qreg; Immed |], "vsetQ_lane",
+ set_lane, pf_su_8_32;
+ Vset_lane, [Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]];
+ Instruction_name ["vmov"]; Const_valuator (fun _ -> 0)],
+ Use_operands [| Qreg; Corereg; Qreg; Immed |], "vsetQ_lane",
+ set_lane_notype, [S64; U64];
+
+ (* Create vector from literal bit pattern. *)
+ Vcreate,
+ [No_op], (* Not really, but it can yield various things that are too
+ hard for the test generator at this time. *)
+ Use_operands [| Dreg; Corereg |], "vcreate", create_vector,
+ pf_su_8_64;
+
+ (* Set all lanes to the same value. *)
+ Vdup_n, [],
+ Use_operands [| Dreg; Corereg |], "vdup_n", bits_1,
+ pf_su_8_32;
+ Vdup_n,
+ [Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]]],
+ Use_operands [| Dreg; Corereg |], "vdup_n", notype_1,
+ [S64; U64];
+ Vdup_n, [],
+ Use_operands [| Qreg; Corereg |], "vdupQ_n", bits_1,
+ pf_su_8_32;
+ Vdup_n,
+ [Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |];
+ Use_operands [| Dreg; Corereg; Corereg |]]],
+ Use_operands [| Qreg; Corereg |], "vdupQ_n", notype_1,
+ [S64; U64];
+
+ (* These are just aliases for the above. *)
+ Vmov_n,
+ [Builtin_name "vdup_n"],
+ Use_operands [| Dreg; Corereg |],
+ "vmov_n", bits_1, pf_su_8_32;
+ Vmov_n,
+ [Builtin_name "vdup_n";
+ Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]]],
+ Use_operands [| Dreg; Corereg |],
+ "vmov_n", notype_1, [S64; U64];
+ Vmov_n,
+ [Builtin_name "vdupQ_n"],
+ Use_operands [| Qreg; Corereg |],
+ "vmovQ_n", bits_1, pf_su_8_32;
+ Vmov_n,
+ [Builtin_name "vdupQ_n";
+ Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |];
+ Use_operands [| Dreg; Corereg; Corereg |]]],
+ Use_operands [| Qreg; Corereg |],
+ "vmovQ_n", notype_1, [S64; U64];
+
+ (* Duplicate, lane version. We can't use Use_operands here because the
+ rightmost register (always Dreg) would be picked up by find_key_operand,
+ when we want the leftmost register to be used in this case (otherwise
+ the modes are indistinguishable in neon.md, etc. *)
+ Vdup_lane,
+ [Disassembles_as [Use_operands [| Dreg; Element_of_dreg |]]],
+ Unary_scalar Dreg, "vdup_lane", bits_2, pf_su_8_32;
+ Vdup_lane,
+ [No_op; Const_valuator (fun _ -> 0)],
+ Unary_scalar Dreg, "vdup_lane", bits_2, [S64; U64];
+ Vdup_lane,
+ [Disassembles_as [Use_operands [| Qreg; Element_of_dreg |]]],
+ Unary_scalar Qreg, "vdupQ_lane", bits_2, pf_su_8_32;
+ Vdup_lane,
+ [No_op; Const_valuator (fun _ -> 0)],
+ Unary_scalar Qreg, "vdupQ_lane", bits_2, [S64; U64];
+
+ (* Combining vectors. *)
+ Vcombine, [No_op],
+ Use_operands [| Qreg; Dreg; Dreg |], "vcombine", notype_2,
+ pf_su_8_64;
+
+ (* Splitting vectors. *)
+ Vget_high, [No_op],
+ Use_operands [| Dreg; Qreg |], "vget_high",
+ notype_1, pf_su_8_64;
+ Vget_low, [Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Dreg |]]],
+ Use_operands [| Dreg; Qreg |], "vget_low",
+ notype_1, pf_su_8_64;
+
+ (* Conversions. *)
+ Vcvt, [InfoWord], All (2, Dreg), "vcvt", conv_1,
+ [Conv (S32, F32); Conv (U32, F32); Conv (F32, S32); Conv (F32, U32)];
+ Vcvt, [InfoWord], All (2, Qreg), "vcvtQ", conv_1,
+ [Conv (S32, F32); Conv (U32, F32); Conv (F32, S32); Conv (F32, U32)];
+ Vcvt_n, [InfoWord], Use_operands [| Dreg; Dreg; Immed |], "vcvt_n", conv_2,
+ [Conv (S32, F32); Conv (U32, F32); Conv (F32, S32); Conv (F32, U32)];
+ Vcvt_n, [InfoWord], Use_operands [| Qreg; Qreg; Immed |], "vcvtQ_n", conv_2,
+ [Conv (S32, F32); Conv (U32, F32); Conv (F32, S32); Conv (F32, U32)];
+
+ (* Move, narrowing. *)
+ Vmovn, [Disassembles_as [Use_operands [| Dreg; Qreg |]]],
+ Narrow, "vmovn", sign_invar_1, su_16_64;
+ Vmovn, [Disassembles_as [Use_operands [| Dreg; Qreg |]]; Saturating],
+ Narrow, "vqmovn", elts_same_1, su_16_64;
+ Vmovn,
+ [Disassembles_as [Use_operands [| Dreg; Qreg |]]; Saturating; Dst_unsign],
+ Narrow, "vqmovun", dst_unsign_1,
+ [S16; S32; S64];
+
+ (* Move, long. *)
+ Vmovl, [Disassembles_as [Use_operands [| Qreg; Dreg |]]],
+ Long, "vmovl", elts_same_1, su_8_32;
+
+ (* Table lookup. *)
+ Vtbl 1,
+ [Instruction_name ["vtbl"];
+ Disassembles_as [Use_operands [| Dreg; VecArray (1, Dreg); Dreg |]]],
+ Use_operands [| Dreg; Dreg; Dreg |], "vtbl1", table_2, [U8; S8; P8];
+ Vtbl 2, [Instruction_name ["vtbl"]],
+ Use_operands [| Dreg; VecArray (2, Dreg); Dreg |], "vtbl2", table_2,
+ [U8; S8; P8];
+ Vtbl 3, [Instruction_name ["vtbl"]],
+ Use_operands [| Dreg; VecArray (3, Dreg); Dreg |], "vtbl3", table_2,
+ [U8; S8; P8];
+ Vtbl 4, [Instruction_name ["vtbl"]],
+ Use_operands [| Dreg; VecArray (4, Dreg); Dreg |], "vtbl4", table_2,
+ [U8; S8; P8];
+
+ (* Extended table lookup. *)
+ Vtbx 1,
+ [Instruction_name ["vtbx"];
+ Disassembles_as [Use_operands [| Dreg; VecArray (1, Dreg); Dreg |]]],
+ Use_operands [| Dreg; Dreg; Dreg |], "vtbx1", table_io, [U8; S8; P8];
+ Vtbx 2, [Instruction_name ["vtbx"]],
+ Use_operands [| Dreg; VecArray (2, Dreg); Dreg |], "vtbx2", table_io,
+ [U8; S8; P8];
+ Vtbx 3, [Instruction_name ["vtbx"]],
+ Use_operands [| Dreg; VecArray (3, Dreg); Dreg |], "vtbx3", table_io,
+ [U8; S8; P8];
+ Vtbx 4, [Instruction_name ["vtbx"]],
+ Use_operands [| Dreg; VecArray (4, Dreg); Dreg |], "vtbx4", table_io,
+ [U8; S8; P8];
+
+ (* Multiply, lane. (note: these were undocumented at the time of
+ writing). *)
+ Vmul_lane, [], By_scalar Dreg, "vmul_lane", sign_invar_2_lane,
+ [S16; S32; U16; U32; F32];
+ Vmul_lane, [], By_scalar Qreg, "vmulQ_lane", sign_invar_2_lane,
+ [S16; S32; U16; U32; F32];
+
+ (* Multiply-accumulate, lane. *)
+ Vmla_lane, [], By_scalar Dreg, "vmla_lane", sign_invar_io_lane,
+ [S16; S32; U16; U32; F32];
+ Vmla_lane, [], By_scalar Qreg, "vmlaQ_lane", sign_invar_io_lane,
+ [S16; S32; U16; U32; F32];
+ Vmla_lane, [], Wide_lane, "vmlal_lane", elts_same_io_lane,
+ [S16; S32; U16; U32];
+ Vmla_lane, [Saturating; Doubling], Wide_lane, "vqdmlal_lane",
+ elts_same_io_lane, [S16; S32];
+
+ (* Multiply-subtract, lane. *)
+ Vmls_lane, [], By_scalar Dreg, "vmls_lane", sign_invar_io_lane,
+ [S16; S32; U16; U32; F32];
+ Vmls_lane, [], By_scalar Qreg, "vmlsQ_lane", sign_invar_io_lane,
+ [S16; S32; U16; U32; F32];
+ Vmls_lane, [], Wide_lane, "vmlsl_lane", elts_same_io_lane,
+ [S16; S32; U16; U32];
+ Vmls_lane, [Saturating; Doubling], Wide_lane, "vqdmlsl_lane",
+ elts_same_io_lane, [S16; S32];
+
+ (* Long multiply, lane. *)
+ Vmull_lane, [],
+ Wide_lane, "vmull_lane", elts_same_2_lane, [S16; S32; U16; U32];
+
+ (* Saturating doubling long multiply, lane. *)
+ Vqdmull_lane, [Saturating; Doubling],
+ Wide_lane, "vqdmull_lane", elts_same_2_lane, [S16; S32];
+
+ (* Saturating doubling long multiply high, lane. *)
+ Vqdmulh_lane, [Saturating; Halving],
+ By_scalar Qreg, "vqdmulhQ_lane", elts_same_2_lane, [S16; S32];
+ Vqdmulh_lane, [Saturating; Halving],
+ By_scalar Dreg, "vqdmulh_lane", elts_same_2_lane, [S16; S32];
+ Vqdmulh_lane, [Saturating; Halving; Rounding;
+ Instruction_name ["vqrdmulh"]],
+ By_scalar Qreg, "vqRdmulhQ_lane", elts_same_2_lane, [S16; S32];
+ Vqdmulh_lane, [Saturating; Halving; Rounding;
+ Instruction_name ["vqrdmulh"]],
+ By_scalar Dreg, "vqRdmulh_lane", elts_same_2_lane, [S16; S32];
+
+ (* Vector multiply by scalar. *)
+ Vmul_n, [InfoWord;
+ Disassembles_as [Use_operands [| Dreg; Dreg; Element_of_dreg |]]],
+ Use_operands [| Dreg; Dreg; Corereg |], "vmul_n",
+ sign_invar_2, [S16; S32; U16; U32; F32];
+ Vmul_n, [InfoWord;
+ Disassembles_as [Use_operands [| Qreg; Qreg; Element_of_dreg |]]],
+ Use_operands [| Qreg; Qreg; Corereg |], "vmulQ_n",
+ sign_invar_2, [S16; S32; U16; U32; F32];
+
+ (* Vector long multiply by scalar. *)
+ Vmull_n, [Instruction_name ["vmull"];
+ Disassembles_as [Use_operands [| Qreg; Dreg; Element_of_dreg |]]],
+ Wide_scalar, "vmull_n",
+ elts_same_2, [S16; S32; U16; U32];
+
+ (* Vector saturating doubling long multiply by scalar. *)
+ Vqdmull_n, [Saturating; Doubling;
+ Disassembles_as [Use_operands [| Qreg; Dreg;
+ Element_of_dreg |]]],
+ Wide_scalar, "vqdmull_n",
+ elts_same_2, [S16; S32];
+
+ (* Vector saturating doubling long multiply high by scalar. *)
+ Vqdmulh_n,
+ [Saturating; Halving; InfoWord;
+ Disassembles_as [Use_operands [| Qreg; Qreg; Element_of_dreg |]]],
+ Use_operands [| Qreg; Qreg; Corereg |],
+ "vqdmulhQ_n", elts_same_2, [S16; S32];
+ Vqdmulh_n,
+ [Saturating; Halving; InfoWord;
+ Disassembles_as [Use_operands [| Dreg; Dreg; Element_of_dreg |]]],
+ Use_operands [| Dreg; Dreg; Corereg |],
+ "vqdmulh_n", elts_same_2, [S16; S32];
+ Vqdmulh_n,
+ [Saturating; Halving; Rounding; InfoWord;
+ Instruction_name ["vqrdmulh"];
+ Disassembles_as [Use_operands [| Qreg; Qreg; Element_of_dreg |]]],
+ Use_operands [| Qreg; Qreg; Corereg |],
+ "vqRdmulhQ_n", elts_same_2, [S16; S32];
+ Vqdmulh_n,
+ [Saturating; Halving; Rounding; InfoWord;
+ Instruction_name ["vqrdmulh"];
+ Disassembles_as [Use_operands [| Dreg; Dreg; Element_of_dreg |]]],
+ Use_operands [| Dreg; Dreg; Corereg |],
+ "vqRdmulh_n", elts_same_2, [S16; S32];
+
+ (* Vector multiply-accumulate by scalar. *)
+ Vmla_n, [InfoWord;
+ Disassembles_as [Use_operands [| Dreg; Dreg; Element_of_dreg |]]],
+ Use_operands [| Dreg; Dreg; Corereg |], "vmla_n",
+ sign_invar_io, [S16; S32; U16; U32; F32];
+ Vmla_n, [InfoWord;
+ Disassembles_as [Use_operands [| Qreg; Qreg; Element_of_dreg |]]],
+ Use_operands [| Qreg; Qreg; Corereg |], "vmlaQ_n",
+ sign_invar_io, [S16; S32; U16; U32; F32];
+ Vmla_n, [], Wide_scalar, "vmlal_n", elts_same_io, [S16; S32; U16; U32];
+ Vmla_n, [Saturating; Doubling], Wide_scalar, "vqdmlal_n", elts_same_io,
+ [S16; S32];
+
+ (* Vector multiply subtract by scalar. *)
+ Vmls_n, [InfoWord;
+ Disassembles_as [Use_operands [| Dreg; Dreg; Element_of_dreg |]]],
+ Use_operands [| Dreg; Dreg; Corereg |], "vmls_n",
+ sign_invar_io, [S16; S32; U16; U32; F32];
+ Vmls_n, [InfoWord;
+ Disassembles_as [Use_operands [| Qreg; Qreg; Element_of_dreg |]]],
+ Use_operands [| Qreg; Qreg; Corereg |], "vmlsQ_n",
+ sign_invar_io, [S16; S32; U16; U32; F32];
+ Vmls_n, [], Wide_scalar, "vmlsl_n", elts_same_io, [S16; S32; U16; U32];
+ Vmls_n, [Saturating; Doubling], Wide_scalar, "vqdmlsl_n", elts_same_io,
+ [S16; S32];
+
+ (* Vector extract. *)
+ Vext, [Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; Dreg; Dreg; Immed |], "vext", extend,
+ pf_su_8_64;
+ Vext, [Const_valuator (fun _ -> 0)],
+ Use_operands [| Qreg; Qreg; Qreg; Immed |], "vextQ", extend,
+ pf_su_8_64;
+
+ (* Reverse elements. *)
+ Vrev64, [], All (2, Dreg), "vrev64", bits_1, P8 :: P16 :: F32 :: su_8_32;
+ Vrev64, [], All (2, Qreg), "vrev64Q", bits_1, P8 :: P16 :: F32 :: su_8_32;
+ Vrev32, [], All (2, Dreg), "vrev32", bits_1, [P8; P16; S8; U8; S16; U16];
+ Vrev32, [], All (2, Qreg), "vrev32Q", bits_1, [P8; P16; S8; U8; S16; U16];
+ Vrev16, [], All (2, Dreg), "vrev16", bits_1, [P8; S8; U8];
+ Vrev16, [], All (2, Qreg), "vrev16Q", bits_1, [P8; S8; U8];
+
+ (* Bit selection. *)
+ Vbsl,
+ [Instruction_name ["vbsl"; "vbit"; "vbif"];
+ Disassembles_as [Use_operands [| Dreg; Dreg; Dreg |]]],
+ Use_operands [| Dreg; Dreg; Dreg; Dreg |], "vbsl", bit_select,
+ pf_su_8_64;
+ Vbsl,
+ [Instruction_name ["vbsl"; "vbit"; "vbif"];
+ Disassembles_as [Use_operands [| Qreg; Qreg; Qreg |]]],
+ Use_operands [| Qreg; Qreg; Qreg; Qreg |], "vbslQ", bit_select,
+ pf_su_8_64;
+
+ (* Transpose elements. **NOTE** ReturnPtr goes some of the way towards
+ generating good code for intrinsics which return structure types --
+ builtins work well by themselves (and understand that the values being
+ stored on e.g. the stack also reside in registers, so can optimise the
+ stores away entirely if the results are used immediately), but
+ intrinsics are very much less efficient. Maybe something can be improved
+ re: inlining, or tweaking the ABI used for intrinsics (a special call
+ attribute?).
+ *)
+ Vtrn, [ReturnPtr], Pair_result Dreg, "vtrn", bits_2, pf_su_8_32;
+ Vtrn, [ReturnPtr], Pair_result Qreg, "vtrnQ", bits_2, pf_su_8_32;
+
+ (* Zip elements. *)
+ Vzip, [ReturnPtr], Pair_result Dreg, "vzip", bits_2, pf_su_8_32;
+ Vzip, [ReturnPtr], Pair_result Qreg, "vzipQ", bits_2, pf_su_8_32;
+
+ (* Unzip elements. *)
+ Vuzp, [ReturnPtr], Pair_result Dreg, "vuzp", bits_2, pf_su_8_32;
+ Vuzp, [ReturnPtr], Pair_result Qreg, "vuzpQ", bits_2, pf_su_8_32;
+
+ (* Element/structure loads. VLD1 variants. *)
+ Vldx 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1", bits_1,
+ pf_su_8_64;
+ Vldx 1, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q", bits_1,
+ pf_su_8_64;
+
+ Vldx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg; Dreg; Immed |],
+ "vld1_lane", bits_3, pf_su_8_32;
+ Vldx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]];
+ Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; CstPtrTo Corereg; Dreg; Immed |],
+ "vld1_lane", bits_3, [S64; U64];
+ Vldx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
+ "vld1Q_lane", bits_3, pf_su_8_32;
+ Vldx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
+ "vld1Q_lane", bits_3, [S64; U64];
+
+ Vldx_dup 1,
+ [Disassembles_as [Use_operands [| VecArray (1, All_elements_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
+ bits_1, pf_su_8_32;
+ Vldx_dup 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
+ bits_1, [S64; U64];
+ Vldx_dup 1,
+ [Disassembles_as [Use_operands [| VecArray (2, All_elements_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q_dup",
+ bits_1, pf_su_8_32;
+ Vldx_dup 1,
+ [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q_dup",
+ bits_1, [S64; U64];
+
+ (* VST1 variants. *)
+ Vstx 1, [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Dreg |], "vst1",
+ store_1, pf_su_8_64;
+ Vstx 1, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg |], "vst1Q",
+ store_1, pf_su_8_64;
+
+ Vstx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Dreg; Immed |],
+ "vst1_lane", store_3, pf_su_8_32;
+ Vstx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]];
+ Const_valuator (fun _ -> 0)],
+ Use_operands [| PtrTo Corereg; Dreg; Immed |],
+ "vst1_lane", store_3, [U64; S64];
+ Vstx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg; Immed |],
+ "vst1Q_lane", store_3, pf_su_8_32;
+ Vstx_lane 1,
+ [Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg; Immed |],
+ "vst1Q_lane", store_3, [U64; S64];
+
+ (* VLD2 variants. *)
+ Vldx 2, [], Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2", bits_1, pf_su_8_32;
+ Vldx 2, [Instruction_name ["vld1"]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2", bits_1, [S64; U64];
+ Vldx 2, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ CstPtrTo Corereg |];
+ Use_operands [| VecArray (2, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Qreg); CstPtrTo Corereg |],
+ "vld2Q", bits_1, pf_su_8_32;
+
+ Vldx_lane 2,
+ [Disassembles_as [Use_operands
+ [| VecArray (2, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg;
+ VecArray (2, Dreg); Immed |],
+ "vld2_lane", bits_3, P8 :: P16 :: F32 :: su_8_32;
+ Vldx_lane 2,
+ [Disassembles_as [Use_operands
+ [| VecArray (2, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Qreg); CstPtrTo Corereg;
+ VecArray (2, Qreg); Immed |],
+ "vld2Q_lane", bits_3, [P16; F32; U16; U32; S16; S32];
+
+ Vldx_dup 2,
+ [Disassembles_as [Use_operands
+ [| VecArray (2, All_elements_of_dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2_dup", bits_1, pf_su_8_32;
+ Vldx_dup 2,
+ [Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (2, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2_dup", bits_1, [S64; U64];
+
+ (* VST2 variants. *)
+ Vstx 2, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Dreg) |], "vst2",
+ store_1, pf_su_8_32;
+ Vstx 2, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Dreg) |], "vst2",
+ store_1, [S64; U64];
+ Vstx 2, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |];
+ Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Qreg) |], "vst2Q",
+ store_1, pf_su_8_32;
+
+ Vstx_lane 2,
+ [Disassembles_as [Use_operands
+ [| VecArray (2, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Dreg); Immed |], "vst2_lane",
+ store_3, P8 :: P16 :: F32 :: su_8_32;
+ Vstx_lane 2,
+ [Disassembles_as [Use_operands
+ [| VecArray (2, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Qreg); Immed |], "vst2Q_lane",
+ store_3, [P16; F32; U16; U32; S16; S32];
+
+ (* VLD3 variants. *)
+ Vldx 3, [], Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3", bits_1, pf_su_8_32;
+ Vldx 3, [Instruction_name ["vld1"]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3", bits_1, [S64; U64];
+ Vldx 3, [Disassembles_as [Use_operands [| VecArray (3, Dreg);
+ CstPtrTo Corereg |];
+ Use_operands [| VecArray (3, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Qreg); CstPtrTo Corereg |],
+ "vld3Q", bits_1, P8 :: P16 :: F32 :: su_8_32;
+
+ Vldx_lane 3,
+ [Disassembles_as [Use_operands
+ [| VecArray (3, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg;
+ VecArray (3, Dreg); Immed |],
+ "vld3_lane", bits_3, P8 :: P16 :: F32 :: su_8_32;
+ Vldx_lane 3,
+ [Disassembles_as [Use_operands
+ [| VecArray (3, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Qreg); CstPtrTo Corereg;
+ VecArray (3, Qreg); Immed |],
+ "vld3Q_lane", bits_3, [P16; F32; U16; U32; S16; S32];
+
+ Vldx_dup 3,
+ [Disassembles_as [Use_operands
+ [| VecArray (3, All_elements_of_dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3_dup", bits_1, pf_su_8_32;
+ Vldx_dup 3,
+ [Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (3, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3_dup", bits_1, [S64; U64];
+
+ (* VST3 variants. *)
+ Vstx 3, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Dreg) |], "vst3",
+ store_1, pf_su_8_32;
+ Vstx 3, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Dreg) |], "vst3",
+ store_1, [S64; U64];
+ Vstx 3, [Disassembles_as [Use_operands [| VecArray (3, Dreg);
+ PtrTo Corereg |];
+ Use_operands [| VecArray (3, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Qreg) |], "vst3Q",
+ store_1, pf_su_8_32;
+
+ Vstx_lane 3,
+ [Disassembles_as [Use_operands
+ [| VecArray (3, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Dreg); Immed |], "vst3_lane",
+ store_3, P8 :: P16 :: F32 :: su_8_32;
+ Vstx_lane 3,
+ [Disassembles_as [Use_operands
+ [| VecArray (3, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Qreg); Immed |], "vst3Q_lane",
+ store_3, [P16; F32; U16; U32; S16; S32];
+
+ (* VLD4/VST4 variants. *)
+ Vldx 4, [], Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4", bits_1, pf_su_8_32;
+ Vldx 4, [Instruction_name ["vld1"]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4", bits_1, [S64; U64];
+ Vldx 4, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ CstPtrTo Corereg |];
+ Use_operands [| VecArray (4, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Qreg); CstPtrTo Corereg |],
+ "vld4Q", bits_1, P8 :: P16 :: F32 :: su_8_32;
+
+ Vldx_lane 4,
+ [Disassembles_as [Use_operands
+ [| VecArray (4, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg;
+ VecArray (4, Dreg); Immed |],
+ "vld4_lane", bits_3, P8 :: P16 :: F32 :: su_8_32;
+ Vldx_lane 4,
+ [Disassembles_as [Use_operands
+ [| VecArray (4, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Qreg); CstPtrTo Corereg;
+ VecArray (4, Qreg); Immed |],
+ "vld4Q_lane", bits_3, [P16; F32; U16; U32; S16; S32];
+
+ Vldx_dup 4,
+ [Disassembles_as [Use_operands
+ [| VecArray (4, All_elements_of_dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4_dup", bits_1, pf_su_8_32;
+ Vldx_dup 4,
+ [Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (4, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4_dup", bits_1, [S64; U64];
+
+ Vstx 4, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Dreg) |], "vst4",
+ store_1, pf_su_8_32;
+ Vstx 4, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Dreg) |], "vst4",
+ store_1, [S64; U64];
+ Vstx 4, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |];
+ Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Qreg) |], "vst4Q",
+ store_1, pf_su_8_32;
+
+ Vstx_lane 4,
+ [Disassembles_as [Use_operands
+ [| VecArray (4, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Dreg); Immed |], "vst4_lane",
+ store_3, P8 :: P16 :: F32 :: su_8_32;
+ Vstx_lane 4,
+ [Disassembles_as [Use_operands
+ [| VecArray (4, Element_of_dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Qreg); Immed |], "vst4Q_lane",
+ store_3, [P16; F32; U16; U32; S16; S32];
+
+ (* Logical operations. And. *)
+ Vand, [], All (3, Dreg), "vand", notype_2, su_8_64;
+ Vand, [], All (3, Qreg), "vandQ", notype_2, su_8_64;
+
+ (* Or. *)
+ Vorr, [], All (3, Dreg), "vorr", notype_2, su_8_64;
+ Vorr, [], All (3, Qreg), "vorrQ", notype_2, su_8_64;
+
+ (* Eor. *)
+ Veor, [], All (3, Dreg), "veor", notype_2, su_8_64;
+ Veor, [], All (3, Qreg), "veorQ", notype_2, su_8_64;
+
+ (* Bic (And-not). *)
+ Vbic, [], All (3, Dreg), "vbic", notype_2, su_8_64;
+ Vbic, [], All (3, Qreg), "vbicQ", notype_2, su_8_64;
+
+ (* Or-not. *)
+ Vorn, [], All (3, Dreg), "vorn", notype_2, su_8_64;
+ Vorn, [], All (3, Qreg), "vornQ", notype_2, su_8_64;
+ ]
+
+let reinterp =
+ let elems = P8 :: P16 :: F32 :: su_8_64 in
+ List.fold_right
+ (fun convto acc ->
+ let types = List.fold_right
+ (fun convfrom acc ->
+ if convfrom <> convto then
+ Cast (convto, convfrom) :: acc
+ else
+ acc)
+ elems
+ []
+ in
+ let dconv = Vreinterp, [No_op], Use_operands [| Dreg; Dreg |],
+ "vreinterpret", conv_1, types
+ and qconv = Vreinterp, [No_op], Use_operands [| Qreg; Qreg |],
+ "vreinterpretQ", conv_1, types in
+ dconv :: qconv :: acc)
+ elems
+ []
+
+(* Output routines. *)
+
+let rec string_of_elt = function
+ S8 -> "s8" | S16 -> "s16" | S32 -> "s32" | S64 -> "s64"
+ | U8 -> "u8" | U16 -> "u16" | U32 -> "u32" | U64 -> "u64"
+ | I8 -> "i8" | I16 -> "i16" | I32 -> "i32" | I64 -> "i64"
+ | B8 -> "8" | B16 -> "16" | B32 -> "32" | B64 -> "64"
+ | F32 -> "f32" | P8 -> "p8" | P16 -> "p16"
+ | Conv (a, b) | Cast (a, b) -> string_of_elt a ^ "_" ^ string_of_elt b
+ | NoElts -> failwith "No elts"
+
+let string_of_elt_dots elt =
+ match elt with
+ Conv (a, b) | Cast (a, b) -> string_of_elt a ^ "." ^ string_of_elt b
+ | _ -> string_of_elt elt
+
+let string_of_vectype vt =
+ let rec name affix = function
+ T_int8x8 -> affix "int8x8"
+ | T_int8x16 -> affix "int8x16"
+ | T_int16x4 -> affix "int16x4"
+ | T_int16x8 -> affix "int16x8"
+ | T_int32x2 -> affix "int32x2"
+ | T_int32x4 -> affix "int32x4"
+ | T_int64x1 -> affix "int64x1"
+ | T_int64x2 -> affix "int64x2"
+ | T_uint8x8 -> affix "uint8x8"
+ | T_uint8x16 -> affix "uint8x16"
+ | T_uint16x4 -> affix "uint16x4"
+ | T_uint16x8 -> affix "uint16x8"
+ | T_uint32x2 -> affix "uint32x2"
+ | T_uint32x4 -> affix "uint32x4"
+ | T_uint64x1 -> affix "uint64x1"
+ | T_uint64x2 -> affix "uint64x2"
+ | T_float32x2 -> affix "float32x2"
+ | T_float32x4 -> affix "float32x4"
+ | T_poly8x8 -> affix "poly8x8"
+ | T_poly8x16 -> affix "poly8x16"
+ | T_poly16x4 -> affix "poly16x4"
+ | T_poly16x8 -> affix "poly16x8"
+ | T_int8 -> affix "int8"
+ | T_int16 -> affix "int16"
+ | T_int32 -> affix "int32"
+ | T_int64 -> affix "int64"
+ | T_uint8 -> affix "uint8"
+ | T_uint16 -> affix "uint16"
+ | T_uint32 -> affix "uint32"
+ | T_uint64 -> affix "uint64"
+ | T_poly8 -> affix "poly8"
+ | T_poly16 -> affix "poly16"
+ | T_float32 -> affix "float32"
+ | T_immediate _ -> "const int"
+ | T_void -> "void"
+ | T_intQI -> "__builtin_neon_qi"
+ | T_intHI -> "__builtin_neon_hi"
+ | T_intSI -> "__builtin_neon_si"
+ | T_intDI -> "__builtin_neon_di"
+ | T_arrayof (num, base) ->
+ let basename = name (fun x -> x) base in
+ affix (Printf.sprintf "%sx%d" basename num)
+ | T_ptrto x ->
+ let basename = name affix x in
+ Printf.sprintf "%s *" basename
+ | T_const x ->
+ let basename = name affix x in
+ Printf.sprintf "const %s" basename
+ in
+ name (fun x -> x ^ "_t") vt
+
+let string_of_inttype = function
+ B_TImode -> "__builtin_neon_ti"
+ | B_EImode -> "__builtin_neon_ei"
+ | B_OImode -> "__builtin_neon_oi"
+ | B_CImode -> "__builtin_neon_ci"
+ | B_XImode -> "__builtin_neon_xi"
+
+let string_of_mode = function
+ V8QI -> "v8qi" | V4HI -> "v4hi" | V2SI -> "v2si" | V2SF -> "v2sf"
+ | DI -> "di" | V16QI -> "v16qi" | V8HI -> "v8hi" | V4SI -> "v4si"
+ | V4SF -> "v4sf" | V2DI -> "v2di" | QI -> "qi" | HI -> "hi" | SI -> "si"
+ | SF -> "sf"
+
+(* Use uppercase chars for letters which form part of the intrinsic name, but
+ should be omitted from the builtin name (the info is passed in an extra
+ argument, instead). *)
+let intrinsic_name name = String.lowercase name
+
+(* Allow the name of the builtin to be overridden by things (e.g. Flipped)
+ found in the features list. *)
+let builtin_name features name =
+ let name = List.fold_right
+ (fun el name ->
+ match el with
+ Flipped x | Builtin_name x -> x
+ | _ -> name)
+ features name in
+ let islower x = let str = String.make 1 x in (String.lowercase str) = str
+ and buf = Buffer.create (String.length name) in
+ String.iter (fun c -> if islower c then Buffer.add_char buf c) name;
+ Buffer.contents buf
+
+(* Transform an arity into a list of strings. *)
+let strings_of_arity a =
+ match a with
+ | Arity0 vt -> [string_of_vectype vt]
+ | Arity1 (vt1, vt2) -> [string_of_vectype vt1; string_of_vectype vt2]
+ | Arity2 (vt1, vt2, vt3) -> [string_of_vectype vt1;
+ string_of_vectype vt2;
+ string_of_vectype vt3]
+ | Arity3 (vt1, vt2, vt3, vt4) -> [string_of_vectype vt1;
+ string_of_vectype vt2;
+ string_of_vectype vt3;
+ string_of_vectype vt4]
+ | Arity4 (vt1, vt2, vt3, vt4, vt5) -> [string_of_vectype vt1;
+ string_of_vectype vt2;
+ string_of_vectype vt3;
+ string_of_vectype vt4;
+ string_of_vectype vt5]
+
+(* Suffixes on the end of builtin names that are to be stripped in order
+ to obtain the name used as an instruction. They are only stripped if
+ preceded immediately by an underscore. *)
+let suffixes_to_strip = [ "n"; "lane"; "dup" ]
+
+(* Get the possible names of an instruction corresponding to a "name" from the
+ ops table. This is done by getting the equivalent builtin name and
+ stripping any suffixes from the list at the top of this file, unless
+ the features list presents with an Instruction_name entry, in which
+ case that is used; or unless the features list presents with a Flipped
+ entry, in which case that is used. If both such entries are present,
+ the first in the list will be chosen. *)
+let get_insn_names features name =
+ let names = try
+ begin
+ match List.find (fun feature -> match feature with
+ Instruction_name _ -> true
+ | Flipped _ -> true
+ | _ -> false) features
+ with
+ Instruction_name names -> names
+ | Flipped name -> [name]
+ | _ -> assert false
+ end
+ with Not_found -> [builtin_name features name]
+ in
+ begin
+ List.map (fun name' ->
+ try
+ let underscore = String.rindex name' '_' in
+ let our_suffix = String.sub name' (underscore + 1)
+ ((String.length name') - underscore - 1)
+ in
+ let rec strip remaining_suffixes =
+ match remaining_suffixes with
+ [] -> name'
+ | s::ss when our_suffix = s -> String.sub name' 0 underscore
+ | _::ss -> strip ss
+ in
+ strip suffixes_to_strip
+ with (Not_found | Invalid_argument _) -> name') names
+ end
+
+(* Apply a function to each element of a list and then comma-separate
+ the resulting strings. *)
+let rec commas f elts acc =
+ match elts with
+ [] -> acc
+ | [elt] -> acc ^ (f elt)
+ | elt::elts ->
+ commas f elts (acc ^ (f elt) ^ ", ")
+
+(* Given a list of features and the shape specified in the "ops" table, apply
+ a function to each possible shape that the instruction may have.
+ By default, this is the "shape" entry in "ops". If the features list
+ contains a Disassembles_as entry, the shapes contained in that entry are
+ mapped to corresponding outputs and returned in a list. If there is more
+ than one Disassembles_as entry, only the first is used. *)
+let analyze_all_shapes features shape f =
+ try
+ match List.find (fun feature ->
+ match feature with Disassembles_as _ -> true
+ | _ -> false)
+ features with
+ Disassembles_as shapes -> List.map f shapes
+ | _ -> assert false
+ with Not_found -> [f shape]
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/netbsd-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/netbsd-elf.h
new file mode 100644
index 000000000..8a01b0fcc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/netbsd-elf.h
@@ -0,0 +1,158 @@
+/* Definitions of target machine for GNU compiler, NetBSD/arm ELF version.
+ Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Wasabi Systems, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (NetBSD/arm ELF)", stderr);
+
+/* arm.h defaults to ARM6 CPU. */
+
+/* This defaults us to little-endian. */
+#ifndef TARGET_ENDIAN_DEFAULT
+#define TARGET_ENDIAN_DEFAULT 0
+#endif
+
+#undef MULTILIB_DEFAULTS
+
+/* Default it to use ATPCS with soft-VFP. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_APCS_FRAME \
+ | TARGET_ENDIAN_DEFAULT)
+
+#undef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_ATPCS
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ } \
+ while (0)
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC NETBSD_CPP_SPEC
+
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ "-matpcs %{fpic|fpie:-k} %{fPIC|fPIE:-k}"
+
+/* Default to full VFP if -mhard-float is specified. */
+#undef SUBTARGET_ASM_FLOAT_SPEC
+#define SUBTARGET_ASM_FLOAT_SPEC \
+ "%{mhard-float:{!mfpu=*:-mfpu=vfp}} \
+ %{mfloat-abi=hard:{!mfpu=*:-mfpu=vfp}}"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "subtarget_extra_asm_spec", SUBTARGET_EXTRA_ASM_SPEC }, \
+ { "subtarget_asm_float_spec", SUBTARGET_ASM_FLOAT_SPEC }, \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+#define NETBSD_ENTRY_POINT "__start"
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "-X %{mbig-endian:-EB} %{mlittle-endian:-EL} \
+ %(netbsd_link_spec)"
+
+/* Make GCC agree with <machine/ansi.h>. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* We don't have any limit on the length as out debugger is GDB. */
+#undef DBX_CONTIN_LENGTH
+
+/* NetBSD does its profiling differently to the Acorn compiler. We
+ don't need a word following the mcount call; and to skip it
+ requires either an assembly stub or use of fomit-frame-pointer when
+ compiling the profiling functions. Since we break Acorn CC
+ compatibility below a little more won't hurt. */
+
+#undef ARM_FUNCTION_PROFILER
+#define ARM_FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ asm_fprintf (STREAM, "\tmov\t%Rip, %Rlr\n"); \
+ asm_fprintf (STREAM, "\tbl\t__mcount%s\n", \
+ (TARGET_ARM && NEED_PLT_RELOC) \
+ ? "(PLT)" : ""); \
+}
+
+/* VERY BIG NOTE: Change of structure alignment for NetBSD/arm.
+ There are consequences you should be aware of...
+
+ Normally GCC/arm uses a structure alignment of 32 for compatibility
+ with armcc. This means that structures are padded to a word
+ boundary. However this causes problems with bugged NetBSD kernel
+ code (possibly userland code as well - I have not checked every
+ binary). The nature of this bugged code is to rely on sizeof()
+ returning the correct size of various structures rounded to the
+ nearest byte (SCSI and ether code are two examples, the vm system
+ is another). This code breaks when the structure alignment is 32
+ as sizeof() will report a word=rounded size. By changing the
+ structure alignment to 8. GCC will conform to what is expected by
+ NetBSD.
+
+ This has several side effects that should be considered.
+ 1. Structures will only be aligned to the size of the largest member.
+ i.e. structures containing only bytes will be byte aligned.
+ structures containing shorts will be half word aligned.
+ structures containing ints will be word aligned.
+
+ This means structures should be padded to a word boundary if
+ alignment of 32 is required for byte structures etc.
+
+ 2. A potential performance penalty may exist if strings are no longer
+ word aligned. GCC will not be able to use word load/stores to copy
+ short strings.
+
+ This modification is not encouraged but with the present state of the
+ NetBSD source tree it is currently the only solution that meets the
+ requirements. */
+
+#undef DEFAULT_STRUCTURE_SIZE_BOUNDARY
+#define DEFAULT_STRUCTURE_SIZE_BOUNDARY 8
+
+/* Clear the instruction cache from `BEG' to `END'. This makes a
+ call to the ARM_SYNC_ICACHE architecture specific syscall. */
+#define CLEAR_INSN_CACHE(BEG, END) \
+do \
+ { \
+ extern int sysarch(int number, void *args); \
+ struct \
+ { \
+ unsigned int addr; \
+ int len; \
+ } s; \
+ s.addr = (unsigned int)(BEG); \
+ s.len = (END) - (BEG); \
+ (void) sysarch (0, &s); \
+ } \
+while (0)
+
+#undef FPUTYPE_DEFAULT
+#define FPUTYPE_DEFAULT FPUTYPE_VFP
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/netbsd.h b/gcc-4.2.1-5666.3/gcc/config/arm/netbsd.h
new file mode 100644
index 000000000..4d147442c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/netbsd.h
@@ -0,0 +1,153 @@
+/* NetBSD/arm a.out version.
+ Copyright (C) 1993, 1994, 1997, 1998, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Mark Brinicombe (amb@physig.ph.kcl.ac.uk)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/NetBSD)", stderr);
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Since we always use GAS as our assembler we support stabs. */
+#define DBX_DEBUGGING_INFO 1
+
+/*#undef ASM_DECLARE_FUNCTION_NAME*/
+
+/* ARM6 family default cpu. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm6
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+
+/* Some defines for CPP.
+ arm32 is the NetBSD port name, so we always define arm32 and __arm32__. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ NETBSD_OS_CPP_BUILTINS_AOUT(); \
+ builtin_define_std ("arm32"); \
+ builtin_define_std ("unix"); \
+ builtin_define_std ("riscbsd"); \
+ } while (0)
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_cpp_spec", NETBSD_CPP_SPEC }, \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_AOUT },
+
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_float) %(cpp_endian) %(netbsd_cpp_spec) \
+"
+
+/* Because TARGET_DEFAULT sets MASK_SOFT_FLOAT */
+#undef CPP_FLOAT_DEFAULT_SPEC
+#define CPP_FLOAT_DEFAULT_SPEC "-D__SOFTFP__"
+
+/* Pass -X to the linker so that it will strip symbols starting with 'L' */
+#undef LINK_SPEC
+#define LINK_SPEC "-X %(netbsd_link_spec)"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#define HANDLE_SYSV_PRAGMA 1
+
+/* We don't have any limit on the length as out debugger is GDB. */
+#undef DBX_CONTIN_LENGTH
+
+/* NetBSD does its profiling differently to the Acorn compiler. We
+ don't need a word following the mcount call; and to skip it
+ requires either an assembly stub or use of fomit-frame-pointer when
+ compiling the profiling functions. Since we break Acorn CC
+ compatibility below a little more won't hurt. */
+
+#undef ARM_FUNCTION_PROFILER
+#define ARM_FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+}
+
+/* On the ARM `@' introduces a comment, so we must use something else
+ for .type directives. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+/* NetBSD uses the old PCC style aggregate returning conventions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+
+/* Although not normally relevant (since by default, all aggregates
+ are returned in memory) compiling some parts of libc requires
+ non-APCS style struct returns. */
+#undef RETURN_IN_MEMORY
+
+/* VERY BIG NOTE : Change of structure alignment for RiscBSD.
+ There are consequences you should be aware of...
+
+ Normally GCC/arm uses a structure alignment of 32 for compatibility
+ with armcc. This means that structures are padded to a word
+ boundary. However this causes problems with bugged NetBSD kernel
+ code (possibly userland code as well - I have not checked every
+ binary). The nature of this bugged code is to rely on sizeof()
+ returning the correct size of various structures rounded to the
+ nearest byte (SCSI and ether code are two examples, the vm system
+ is another). This code breaks when the structure alignment is 32
+ as sizeof() will report a word=rounded size. By changing the
+ structure alignment to 8. GCC will conform to what is expected by
+ NetBSD.
+
+ This has several side effects that should be considered.
+ 1. Structures will only be aligned to the size of the largest member.
+ i.e. structures containing only bytes will be byte aligned.
+ structures containing shorts will be half word aligned.
+ structures containing ints will be word aligned.
+
+ This means structures should be padded to a word boundary if
+ alignment of 32 is required for byte structures etc.
+
+ 2. A potential performance penalty may exist if strings are no longer
+ word aligned. GCC will not be able to use word load/stores to copy
+ short strings.
+
+ This modification is not encouraged but with the present state of the
+ NetBSD source tree it is currently the only solution that meets the
+ requirements. */
+#undef DEFAULT_STRUCTURE_SIZE_BOUNDARY
+#define DEFAULT_STRUCTURE_SIZE_BOUNDARY 8
+
+/* Clear the instruction cache from `BEG' to `END'. This makes a
+ call to the ARM32_SYNC_ICACHE architecture specific syscall. */
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ extern int sysarch(int number, void *args); \
+ struct { \
+ unsigned int addr; \
+ int len; \
+ } s; \
+ s.addr = (unsigned int)(BEG); \
+ s.len = (END) - (BEG); \
+ (void)sysarch(0, &s); \
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/pe.c b/gcc-4.2.1-5666.3/gcc/config/arm/pe.c
new file mode 100644
index 000000000..f2f67d5a9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/pe.c
@@ -0,0 +1,269 @@
+/* Routines for GCC for ARM/pe.
+ Copyright (C) 1995, 1996, 2000, 2001, 2002, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "output.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+#include "toplev.h"
+#include "tm_p.h"
+
+extern int current_function_anonymous_args;
+
+
+/* Return nonzero if DECL is a dllexport'd object. */
+
+tree current_class_type; /* FIXME */
+
+int
+arm_dllexport_p (decl)
+ tree decl;
+{
+ tree exp;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ exp = lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl));
+ if (exp)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if DECL is a dllimport'd object. */
+
+int
+arm_dllimport_p (decl)
+ tree decl;
+{
+ tree imp;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && TARGET_NOP_FUN_DLLIMPORT)
+ return 0;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ imp = lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl));
+ if (imp)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if SYMBOL is marked as being dllexport'd. */
+
+int
+arm_dllexport_name_p (symbol)
+ const char * symbol;
+{
+ return symbol[0] == ARM_PE_FLAG_CHAR && symbol[1] == 'e' && symbol[2] == '.';
+}
+
+/* Return nonzero if SYMBOL is marked as being dllimport'd. */
+
+int
+arm_dllimport_name_p (symbol)
+ const char * symbol;
+{
+ return symbol[0] == ARM_PE_FLAG_CHAR && symbol[1] == 'i' && symbol[2] == '.';
+}
+
+/* Mark a DECL as being dllexport'd.
+ Note that we override the previous setting (e.g.: dllimport). */
+
+void
+arm_mark_dllexport (decl)
+ tree decl;
+{
+ const char * oldname;
+ char * newname;
+ rtx rtlname;
+ tree idp;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == MEM)
+ rtlname = XEXP (rtlname, 0);
+ gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
+ oldname = XSTR (rtlname, 0);
+
+ if (arm_dllimport_name_p (oldname))
+ oldname += 9;
+ else if (arm_dllexport_name_p (oldname))
+ return; /* already done */
+
+ newname = alloca (strlen (oldname) + 4);
+ sprintf (newname, "%ce.%s", ARM_PE_FLAG_CHAR, oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ XEXP (DECL_RTL (decl), 0) =
+ gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
+}
+
+/* Mark a DECL as being dllimport'd. */
+
+void
+arm_mark_dllimport (decl)
+ tree decl;
+{
+ const char * oldname;
+ char * newname;
+ tree idp;
+ rtx rtlname, newrtl;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+
+ if (GET_CODE (rtlname) == MEM)
+ rtlname = XEXP (rtlname, 0);
+ gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
+ oldname = XSTR (rtlname, 0);
+
+ gcc_assert (!arm_dllexport_name_p (oldname));
+ if (arm_dllimport_name_p (oldname))
+ return; /* already done */
+
+ /* ??? One can well ask why we're making these checks here,
+ and that would be a good question. */
+
+ /* Imported variables can't be initialized. */
+ if (TREE_CODE (decl) == VAR_DECL
+ && !DECL_VIRTUAL_P (decl)
+ && DECL_INITIAL (decl))
+ {
+ error ("initialized variable %q+D is marked dllimport", decl);
+ return;
+ }
+ /* Nor can they be static. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl)
+ && 0 /*???*/)
+ {
+ error ("static variable %q+D is marked dllimport", decl);
+ return;
+ }
+
+ /* `extern' needn't be specified with dllimport.
+ Specify `extern' now and hope for the best. Sigh. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl))
+ {
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ }
+
+ newname = alloca (strlen (oldname) + 11);
+ sprintf (newname, "%ci.__imp_%s", ARM_PE_FLAG_CHAR, oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ newrtl = gen_rtx_MEM (Pmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ IDENTIFIER_POINTER (idp)));
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+}
+
+void
+arm_pe_encode_section_info (decl, rtl, first)
+ tree decl;
+ rtx rtl;
+ int first ATTRIBUTE_UNUSED;
+{
+ /* This bit is copied from arm_encode_section_info. */
+ if (optimize > 0 && TREE_CONSTANT (decl))
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+
+ /* Mark the decl so we can tell from the rtl whether the object is
+ dllexport'd or dllimport'd. */
+ if (arm_dllexport_p (decl))
+ arm_mark_dllexport (decl);
+ else if (arm_dllimport_p (decl))
+ arm_mark_dllimport (decl);
+ /* It might be that DECL has already been marked as dllimport, but a
+ subsequent definition nullified that. The attribute is gone but
+ DECL_RTL still has @i.__imp_foo. We need to remove that. */
+ else if ((TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != NULL_RTX
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
+ && arm_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
+ {
+ const char *oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
+ tree idp = get_identifier (oldname + 9);
+ rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
+
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+
+ /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
+ ??? We leave these alone for now. */
+ }
+}
+
+void
+arm_pe_unique_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int len;
+ const char * name;
+ char * string;
+ const char * prefix;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ name = arm_strip_name_encoding (name);
+
+ /* The object is put in, for example, section .text$foo.
+ The linker will then ultimately place them in .text
+ (everything from the $ on is stripped). */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ prefix = ".text$";
+ else if (decl_readonly_section (decl, reloc))
+ prefix = ".rdata$";
+ else
+ prefix = ".data$";
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+ sprintf (string, "%s%s", prefix, name);
+
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/pe.h b/gcc-4.2.1-5666.3/gcc/config/arm/pe.h
new file mode 100644
index 000000000..f96cd66a9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/pe.h
@@ -0,0 +1,149 @@
+/* Definitions of target machine for GNU compiler, for ARM with PE obj format.
+ Copyright (C) 1995, 1996, 1999, 2000, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Enable PE specific code. */
+#define ARM_PE 1
+
+#define ARM_PE_FLAG_CHAR '@'
+
+/* Ensure that @x. will be stripped from the function name. */
+#undef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS \
+ case ARM_PE_FLAG_CHAR: return 3;
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/pe)", stderr)
+
+/* Get tree.c to declare a target-specific specialization of
+ merge_decl_attributes. */
+#define TARGET_DLLIMPORT_DECL_ATTRIBUTES 1
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "-D__pe__"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_NOP_FUN_DLLIMPORT)
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "marm", "mlittle-endian", "msoft-float", "mno-thumb-interwork" }
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* r11 is fixed. */
+#undef SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#define SUBTARGET_CONDITIONAL_REGISTER_USAGE \
+ fixed_regs [11] = 1; \
+ call_used_regs [11] = 1;
+
+
+/* PE/COFF uses explicit import from shared libraries. */
+#define MULTIPLE_SYMBOL_SPACES 1
+
+#define TARGET_ASM_UNIQUE_SECTION arm_pe_unique_section
+#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* Switch into a generic section. */
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION default_pe_asm_named_section
+
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+/* Output a reference to a label. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+ asm_fprintf (STREAM, "%U%s", arm_strip_name_encoding (NAME))
+
+/* Output a function definition label. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ arm_strip_name_encoding (NAME)); \
+ switch_to_section (function_section (DECL)); \
+ } \
+ ARM_DECLARE_FUNCTION_NAME (STREAM, NAME, DECL); \
+ if (TARGET_THUMB) \
+ fprintf (STREAM, "\t.code 16\n"); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ while (0)
+
+/* Output a common block. */
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ do \
+ { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf ((STREAM), "\t.ascii \" -export:%s\"\n",\
+ arm_strip_name_encoding (NAME)); \
+ } \
+ if (! arm_dllimport_name_p (NAME)) \
+ { \
+ fprintf ((STREAM), "\t.comm\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ asm_fprintf ((STREAM), ", %d\t%@ %d\n", \
+ (int)(ROUNDED), (int)(SIZE)); \
+ } \
+ } \
+ while (0)
+
+/* Output the label for an initialized variable. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ section *save_section = in_section; \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n",\
+ arm_strip_name_encoding (NAME)); \
+ switch_to_section (save_section); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+ } \
+ while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#define DRECTVE_SECTION_ASM_OP "\t.section .drectve"
+
+#define drectve_section() \
+ (fprintf (asm_out_file, "%s\n", DRECTVE_SECTION_ASM_OP), \
+ in_section = NULL)
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/pe.opt b/gcc-4.2.1-5666.3/gcc/config/arm/pe.opt
new file mode 100644
index 000000000..f3d6d8b53
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/pe.opt
@@ -0,0 +1,24 @@
+; PE-specific options for the ARM port
+
+; Copyright (C) 2005 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mnop-fun-dllimport
+Target Report Mask(NOP_FUN_DLLIMPORT)
+Ignore dllimport attribute for functions
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/pr-support.c b/gcc-4.2.1-5666.3/gcc/config/arm/pr-support.c
new file mode 100644
index 000000000..97f91519d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/pr-support.c
@@ -0,0 +1,409 @@
+/* ARM EABI compliant unwinding routines
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+#include "unwind.h"
+
+/* We add a prototype for abort here to avoid creating a dependency on
+ target headers. */
+extern void abort (void);
+
+typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
+
+/* Misc constants. */
+#define R_IP 12
+#define R_SP 13
+#define R_LR 14
+#define R_PC 15
+
+#define uint32_highbit (((_uw) 1) << 31)
+
+void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
+
+/* Unwind descriptors. */
+
+typedef struct
+{
+ _uw16 length;
+ _uw16 offset;
+} EHT16;
+
+typedef struct
+{
+ _uw length;
+ _uw offset;
+} EHT32;
+
+/* Calculate the address encoded by a 31-bit self-relative offset at address
+ P. Copy of routine in unwind-arm.c. */
+
+static inline _uw
+selfrel_offset31 (const _uw *p)
+{
+ _uw offset;
+
+ offset = *p;
+ /* Sign extend to 32 bits. */
+ if (offset & (1 << 30))
+ offset |= 1u << 31;
+
+ return offset + (_uw) p;
+}
+
+
+/* Personality routine helper functions. */
+
+#define CODE_FINISH (0xb0)
+
+/* Return the next byte of unwinding information, or CODE_FINISH if there is
+ no data remaining. */
+static inline _uw8
+next_unwind_byte (__gnu_unwind_state * uws)
+{
+ _uw8 b;
+
+ if (uws->bytes_left == 0)
+ {
+ /* Load another word */
+ if (uws->words_left == 0)
+ return CODE_FINISH; /* Nothing left. */
+ uws->words_left--;
+ uws->data = *(uws->next++);
+ uws->bytes_left = 3;
+ }
+ else
+ uws->bytes_left--;
+
+ /* Extract the most significant byte. */
+ b = (uws->data >> 24) & 0xff;
+ uws->data <<= 8;
+ return b;
+}
+
+/* Execute the unwinding instructions described by UWS. */
+_Unwind_Reason_Code
+__gnu_unwind_execute (_Unwind_Context * context, __gnu_unwind_state * uws)
+{
+ _uw op;
+ int set_pc;
+ _uw reg;
+
+ set_pc = 0;
+ for (;;)
+ {
+ op = next_unwind_byte (uws);
+ if (op == CODE_FINISH)
+ {
+ /* If we haven't already set pc then copy it from lr. */
+ if (!set_pc)
+ {
+ _Unwind_VRS_Get (context, _UVRSC_CORE, R_LR, _UVRSD_UINT32,
+ &reg);
+ _Unwind_VRS_Set (context, _UVRSC_CORE, R_PC, _UVRSD_UINT32,
+ &reg);
+ set_pc = 1;
+ }
+ /* Drop out of the loop. */
+ break;
+ }
+ if ((op & 0x80) == 0)
+ {
+ /* vsp = vsp +- (imm6 << 2 + 4). */
+ _uw offset;
+
+ offset = ((op & 0x3f) << 2) + 4;
+ _Unwind_VRS_Get (context, _UVRSC_CORE, R_SP, _UVRSD_UINT32, &reg);
+ if (op & 0x40)
+ reg -= offset;
+ else
+ reg += offset;
+ _Unwind_VRS_Set (context, _UVRSC_CORE, R_SP, _UVRSD_UINT32, &reg);
+ continue;
+ }
+
+ if ((op & 0xf0) == 0x80)
+ {
+ op = (op << 8) | next_unwind_byte (uws);
+ if (op == 0x8000)
+ {
+ /* Refuse to unwind. */
+ return _URC_FAILURE;
+ }
+ /* Pop r4-r15 under mask. */
+ op = (op << 4) & 0xfff0;
+ if (_Unwind_VRS_Pop (context, _UVRSC_CORE, op, _UVRSD_UINT32)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ if (op & (1 << R_PC))
+ set_pc = 1;
+ continue;
+ }
+ if ((op & 0xf0) == 0x90)
+ {
+ op &= 0xf;
+ if (op == 13 || op == 15)
+ /* Reserved. */
+ return _URC_FAILURE;
+ /* vsp = r[nnnn]. */
+ _Unwind_VRS_Get (context, _UVRSC_CORE, op, _UVRSD_UINT32, &reg);
+ _Unwind_VRS_Set (context, _UVRSC_CORE, R_SP, _UVRSD_UINT32, &reg);
+ continue;
+ }
+ if ((op & 0xf0) == 0xa0)
+ {
+ /* Pop r4-r[4+nnn], [lr]. */
+ _uw mask;
+
+ mask = (0xff0 >> (7 - (op & 7))) & 0xff0;
+ if (op & 8)
+ mask |= (1 << R_LR);
+ if (_Unwind_VRS_Pop (context, _UVRSC_CORE, mask, _UVRSD_UINT32)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if ((op & 0xf0) == 0xb0)
+ {
+ /* op == 0xb0 already handled. */
+ if (op == 0xb1)
+ {
+ op = next_unwind_byte (uws);
+ if (op == 0 || ((op & 0xf0) != 0))
+ /* Spare. */
+ return _URC_FAILURE;
+ /* Pop r0-r4 under mask. */
+ if (_Unwind_VRS_Pop (context, _UVRSC_CORE, op, _UVRSD_UINT32)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if (op == 0xb2)
+ {
+ /* vsp = vsp + 0x204 + (uleb128 << 2). */
+ int shift;
+
+ _Unwind_VRS_Get (context, _UVRSC_CORE, R_SP, _UVRSD_UINT32,
+ &reg);
+ op = next_unwind_byte (uws);
+ shift = 2;
+ while (op & 0x80)
+ {
+ reg += ((op & 0x7f) << shift);
+ shift += 7;
+ op = next_unwind_byte (uws);
+ }
+ reg += ((op & 0x7f) << shift) + 0x204;
+ _Unwind_VRS_Set (context, _UVRSC_CORE, R_SP, _UVRSD_UINT32,
+ &reg);
+ continue;
+ }
+ if (op == 0xb3)
+ {
+ /* Pop VFP registers with fldmx. */
+ op = next_unwind_byte (uws);
+ op = ((op & 0xf0) << 12) | ((op & 0xf) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_VFP, op, _UVRSD_VFPX)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if ((op & 0xfc) == 0xb4)
+ {
+ /* Pop FPA E[4]-E[4+nn]. */
+ op = 0x40000 | ((op & 3) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_FPA, op, _UVRSD_FPAX)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ /* op & 0xf8 == 0xb8. */
+ /* Pop VFP D[8]-D[8+nnn] with fldmx. */
+ op = 0x80000 | ((op & 7) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_VFP, op, _UVRSD_VFPX)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if ((op & 0xf0) == 0xc0)
+ {
+ if (op == 0xc6)
+ {
+ /* Pop iWMMXt D registers. */
+ op = next_unwind_byte (uws);
+ op = ((op & 0xf0) << 12) | ((op & 0xf) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_WMMXD, op, _UVRSD_UINT64)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if (op == 0xc7)
+ {
+ op = next_unwind_byte (uws);
+ if (op == 0 || (op & 0xf0) != 0)
+ /* Spare. */
+ return _URC_FAILURE;
+ /* Pop iWMMXt wCGR{3,2,1,0} under mask. */
+ if (_Unwind_VRS_Pop (context, _UVRSC_WMMXC, op, _UVRSD_UINT32)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if ((op & 0xf8) == 0xc0)
+ {
+ /* Pop iWMMXt wR[10]-wR[10+nnn]. */
+ op = 0xa0000 | ((op & 0xf) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_WMMXD, op, _UVRSD_UINT64)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ if (op == 0xc8)
+ {
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#ifndef __VFP_FP__
+ /* Pop FPA registers. */
+ op = next_unwind_byte (uws);
+ op = ((op & 0xf0) << 12) | ((op & 0xf) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_FPA, op, _UVRSD_FPAX)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+#else
+ /* Pop VFPv3 registers D[16+ssss]-D[16+ssss+cccc] with vldm. */
+ op = next_unwind_byte (uws);
+ op = (((op & 0xf0) + 16) << 12) | ((op & 0xf) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_VFP, op, _UVRSD_DOUBLE)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+#endif
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+ if (op == 0xc9)
+ {
+ /* Pop VFP registers with fldmd. */
+ op = next_unwind_byte (uws);
+ op = ((op & 0xf0) << 12) | ((op & 0xf) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_VFP, op, _UVRSD_DOUBLE)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ /* Spare. */
+ return _URC_FAILURE;
+ }
+ if ((op & 0xf8) == 0xd0)
+ {
+ /* Pop VFP D[8]-D[8+nnn] with fldmd. */
+ op = 0x80000 | ((op & 7) + 1);
+ if (_Unwind_VRS_Pop (context, _UVRSC_VFP, op, _UVRSD_DOUBLE)
+ != _UVRSR_OK)
+ return _URC_FAILURE;
+ continue;
+ }
+ /* Spare. */
+ return _URC_FAILURE;
+ }
+ return _URC_OK;
+}
+
+
+/* Execute the unwinding instructions associated with a frame. UCBP and
+ CONTEXT are the current exception object and virtual CPU state
+ respectively. */
+
+_Unwind_Reason_Code
+__gnu_unwind_frame (_Unwind_Control_Block * ucbp, _Unwind_Context * context)
+{
+ _uw *ptr;
+ __gnu_unwind_state uws;
+
+ ptr = (_uw *) ucbp->pr_cache.ehtp;
+ /* Skip over the personality routine address. */
+ ptr++;
+ /* Setup the unwinder state. */
+ uws.data = (*ptr) << 8;
+ uws.next = ptr + 1;
+ uws.bytes_left = 3;
+ uws.words_left = ((*ptr) >> 24) & 0xff;
+
+ return __gnu_unwind_execute (context, &uws);
+}
+
+/* Get the _Unwind_Control_Block from an _Unwind_Context. */
+
+static inline _Unwind_Control_Block *
+unwind_UCB_from_context (_Unwind_Context * context)
+{
+ return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP);
+}
+
+/* Get the start address of the function being unwound. */
+
+_Unwind_Ptr
+_Unwind_GetRegionStart (_Unwind_Context * context)
+{
+ _Unwind_Control_Block *ucbp;
+
+ ucbp = unwind_UCB_from_context (context);
+ return (_Unwind_Ptr) ucbp->pr_cache.fnstart;
+}
+
+/* Find the Language specific exception data. */
+
+void *
+_Unwind_GetLanguageSpecificData (_Unwind_Context * context)
+{
+ _Unwind_Control_Block *ucbp;
+ _uw *ptr;
+
+ /* Get a pointer to the exception table entry. */
+ ucbp = unwind_UCB_from_context (context);
+ ptr = (_uw *) ucbp->pr_cache.ehtp;
+ /* Skip the personality routine address. */
+ ptr++;
+ /* Skip the unwind opcodes. */
+ ptr += (((*ptr) >> 24) & 0xff) + 1;
+
+ return ptr;
+}
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+
+/* These two should never be used. */
+
+_Unwind_Ptr
+_Unwind_GetDataRelBase (_Unwind_Context *context __attribute__ ((unused)))
+{
+ abort ();
+}
+
+_Unwind_Ptr
+_Unwind_GetTextRelBase (_Unwind_Context *context __attribute__ ((unused)))
+{
+ abort ();
+}
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/predicates.md b/gcc-4.2.1-5666.3/gcc/config/arm/predicates.md
new file mode 100644
index 000000000..941627be9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/predicates.md
@@ -0,0 +1,589 @@
+;; Predicate definitions for ARM and Thumb
+;; Copyright (C) 2004 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_predicate "s_register_operand"
+ (match_code "reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+})
+
+;; Any hard register.
+(define_predicate "arm_hard_register_operand"
+ (match_code "reg")
+{
+ return REGNO (op) < FIRST_PSEUDO_REGISTER;
+})
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; A low register.
+(define_predicate "low_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) <= LAST_LO_REGNUM")))
+
+;; A low register or const_int.
+(define_predicate "low_reg_or_int_operand"
+ (ior (match_code "const_int")
+ (match_operand 0 "low_register_operand")))
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; Any core register, or any pseudo. */
+(define_predicate "arm_general_register_operand"
+ (match_code "reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ return (GET_CODE (op) == REG
+ && (REGNO (op) <= LAST_ARM_REGNUM
+ || REGNO (op) >= FIRST_PSEUDO_REGISTER));
+})
+
+;; APPLE LOCAL begin ARM add this peephole
+;; Any Thumb low register.
+(define_predicate "thumb_low_register_operand"
+ (match_code "reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ return (GET_CODE (op) == REG
+ && REGNO (op) <= LAST_LO_REGNUM);
+})
+;; APPLE LOCAL end ARM add this peephole
+
+(define_predicate "f_register_operand"
+ (match_code "reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
+})
+
+;; Reg, subreg(reg) or const_int.
+(define_predicate "reg_or_int_operand"
+ (ior (match_code "const_int")
+ (match_operand 0 "s_register_operand")))
+
+(define_predicate "arm_immediate_operand"
+ (and (match_code "const_int")
+ (match_test "const_ok_for_arm (INTVAL (op))")))
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_predicate "arm_immediate64_operand"
+ (and (match_code "const_int,const_double")
+ (match_test "const64_ok_for_arm_immediate (op)")))
+
+(define_predicate "arm_add_immediate64_operand"
+ (and (match_code "const_int,const_double")
+ (match_test "const64_ok_for_arm_add (op)")))
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_predicate "arm_neg_immediate_operand"
+ (and (match_code "const_int")
+ (match_test "const_ok_for_arm (-INTVAL (op))")))
+
+(define_predicate "arm_not_immediate_operand"
+ (and (match_code "const_int")
+ (match_test "const_ok_for_arm (~INTVAL (op))")))
+
+;; Something valid on the RHS of an ARM data-processing instruction
+(define_predicate "arm_rhs_operand"
+ (ior (match_operand 0 "s_register_operand")
+ (match_operand 0 "arm_immediate_operand")))
+
+;; APPLE LOCAL begin 5831562 long long constants
+(define_predicate "arm_rhs64_operand"
+ (ior (match_operand 0 "s_register_operand")
+ (match_operand 0 "arm_immediate64_operand")))
+
+(define_predicate "arm_add64_operand"
+ (ior (match_operand 0 "s_register_operand")
+ (match_operand 0 "arm_add_immediate64_operand")))
+;; APPLE LOCAL end 5831562 long long constants
+
+(define_predicate "arm_rhsm_operand"
+ (ior (match_operand 0 "arm_rhs_operand")
+ (match_operand 0 "memory_operand")))
+
+(define_predicate "arm_add_operand"
+ (ior (match_operand 0 "arm_rhs_operand")
+ (match_operand 0 "arm_neg_immediate_operand")))
+
+(define_predicate "arm_addimm_operand"
+ (ior (match_operand 0 "arm_immediate_operand")
+ (match_operand 0 "arm_neg_immediate_operand")))
+
+(define_predicate "arm_not_operand"
+ (ior (match_operand 0 "arm_rhs_operand")
+ (match_operand 0 "arm_not_immediate_operand")))
+
+;; True if the operand is a memory reference which contains an
+;; offsettable address.
+(define_predicate "offsettable_memory_operand"
+ (and (match_code "mem")
+ (match_test
+ "offsettable_address_p (reload_completed | reload_in_progress,
+ mode, XEXP (op, 0))")))
+
+;; True if the operand is a memory operand that does not have an
+;; automodified base register (and thus will not generate output reloads).
+(define_predicate "call_memory_operand"
+ (and (match_code "mem")
+ (and (match_test "GET_RTX_CLASS (GET_CODE (XEXP (op, 0)))
+ != RTX_AUTOINC")
+ (match_operand 0 "memory_operand"))))
+
+(define_predicate "arm_reload_memory_operand"
+ (and (match_code "mem,reg,subreg")
+ (match_test "(!CONSTANT_P (op)
+ && (true_regnum(op) == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)))")))
+
+;; APPLE LOCAL begin 6160917
+;; Allow any mem reference through here. By doing this, instead of just
+;; ignoring unhandled cases in SECONDARY_*_RELOAD_CLASS macros we will
+;; get an assertion failure in neon_reload_{in,out}.
+;; We don't use memory_operand because it fails for out-of-range
+;; indexed addressing.
+(define_predicate "neon_reload_mem_operand"
+ (match_code "mem"))
+;; APPLE LOCAL end 6160917
+
+;; True for valid operands for the rhs of an floating point insns.
+;; Allows regs or certain consts on FPA, just regs for everything else.
+(define_predicate "arm_float_rhs_operand"
+ (ior (match_operand 0 "s_register_operand")
+ (and (match_code "const_double")
+ (match_test "TARGET_FPA && arm_const_double_rtx (op)"))))
+
+(define_predicate "arm_float_add_operand"
+ (ior (match_operand 0 "arm_float_rhs_operand")
+ (and (match_code "const_double")
+ (match_test "TARGET_FPA && neg_const_double_rtx_ok_for_fpa (op)"))))
+
+(define_predicate "vfp_compare_operand"
+ (ior (match_operand 0 "s_register_operand")
+ (and (match_code "const_double")
+ (match_test "arm_const_double_rtx (op)"))))
+
+(define_predicate "arm_float_compare_operand"
+ (if_then_else (match_test "TARGET_VFP")
+ (match_operand 0 "vfp_compare_operand")
+ (match_operand 0 "arm_float_rhs_operand")))
+
+;; True for valid index operands.
+(define_predicate "index_operand"
+ (ior (match_operand 0 "s_register_operand")
+ (and (match_operand 0 "immediate_operand")
+ (match_test "(GET_CODE (op) != CONST_INT
+ || (INTVAL (op) < 4096 && INTVAL (op) > -4096))"))))
+
+;; True for operators that can be combined with a shift in ARM state.
+(define_special_predicate "shiftable_operator"
+ (and (match_code "plus,minus,ior,xor,and")
+ (match_test "mode == GET_MODE (op)")))
+
+;; APPLE LOCAL begin ARM 4382996 improve assignments of NE
+;; True for binary operators that can set the condition codes as a side effect,
+;; and that don't have early clobber semantics.
+(define_special_predicate "binary_cc_noclobber_operator"
+ (and (match_code "plus,minus,ior,xor,and,ashift,ashiftrt,lshiftrt")
+ (match_test "mode == GET_MODE (op)")))
+;; APPLE LOCAL end ARM 4382996 improve assignments of NE
+
+;; True for logical binary operators.
+(define_special_predicate "logical_binary_operator"
+ (and (match_code "ior,xor,and")
+ (match_test "mode == GET_MODE (op)")))
+
+;; True for shift operators.
+(define_special_predicate "shift_operator"
+ (and (ior (ior (and (match_code "mult")
+ (match_test "power_of_two_operand (XEXP (op, 1), mode)"))
+ (and (match_code "rotate")
+ (match_test "GET_CODE (XEXP (op, 1)) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) < 32")))
+ (match_code "ashift,ashiftrt,lshiftrt,rotatert"))
+ (match_test "mode == GET_MODE (op)")))
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; True for operators that have 16-bit thumb variants. */
+(define_special_predicate "thumb_16bit_operator"
+ (match_code "plus,minus,and,ior,xor"))
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+;; True for EQ & NE
+(define_special_predicate "equality_operator"
+ (match_code "eq,ne"))
+
+;; True for comparisons other than LTGT or UNEQ.
+(define_special_predicate "arm_comparison_operator"
+ (match_code "eq,ne,le,lt,ge,gt,geu,gtu,leu,ltu,unordered,ordered,unlt,unle,unge,ungt"))
+
+(define_special_predicate "minmax_operator"
+ (and (match_code "smin,smax,umin,umax")
+ (match_test "mode == GET_MODE (op)")))
+
+(define_special_predicate "cc_register"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) == CC_REGNUM")
+ (ior (match_test "mode == GET_MODE (op)")
+ (match_test "mode == VOIDmode && GET_MODE_CLASS (GET_MODE (op)) == MODE_CC")))))
+
+(define_special_predicate "dominant_cc_register"
+ (match_code "reg")
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (op);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return false;
+ }
+
+ return (cc_register (op, mode)
+ && (mode == CC_DNEmode
+ || mode == CC_DEQmode
+ || mode == CC_DLEmode
+ || mode == CC_DLTmode
+ || mode == CC_DGEmode
+ || mode == CC_DGTmode
+ || mode == CC_DLEUmode
+ || mode == CC_DLTUmode
+ || mode == CC_DGEUmode
+ || mode == CC_DGTUmode));
+})
+
+(define_special_predicate "arm_extendqisi_mem_op"
+ (and (match_operand 0 "memory_operand")
+ (match_test "arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND,
+ 0)")))
+
+(define_predicate "power_of_two_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT value = INTVAL (op);
+
+ return value != 0 && (value & (value - 1)) == 0;
+})
+
+(define_predicate "nonimmediate_di_operand"
+ (match_code "reg,subreg,mem")
+{
+ if (s_register_operand (op, mode))
+ return true;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ return GET_CODE (op) == MEM && memory_address_p (DImode, XEXP (op, 0));
+})
+
+(define_predicate "di_operand"
+ (ior (match_code "const_int,const_double")
+ (and (match_code "reg,subreg,mem")
+ (match_operand 0 "nonimmediate_di_operand"))))
+
+(define_predicate "nonimmediate_soft_df_operand"
+ (match_code "reg,subreg,mem")
+{
+ if (s_register_operand (op, mode))
+ return true;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ return GET_CODE (op) == MEM && memory_address_p (DFmode, XEXP (op, 0));
+})
+
+(define_predicate "soft_df_operand"
+ (ior (match_code "const_double")
+ (and (match_code "reg,subreg,mem")
+ (match_operand 0 "nonimmediate_soft_df_operand"))))
+
+(define_predicate "const_shift_operand"
+ (and (match_code "const_int")
+ (ior (match_operand 0 "power_of_two_operand")
+ (match_test "((unsigned HOST_WIDE_INT) INTVAL (op)) < 32"))))
+
+
+(define_special_predicate "load_multiple_operation"
+ (match_code "parallel")
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int dest_regno;
+ rtx src_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return false;
+
+ /* Check to see if this might be a write-back. */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully. */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
+ return false;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
+ return false;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
+ return false;
+ }
+
+ return true;
+})
+
+(define_special_predicate "store_multiple_operation"
+ (match_code "parallel")
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int src_regno;
+ rtx dest_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return false;
+
+ /* Check to see if this might be a write-back. */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully. */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
+ return false;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
+ return false;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
+ return false;
+ }
+
+ return true;
+})
+
+(define_special_predicate "multi_register_push"
+ (match_code "parallel")
+{
+ if ((GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
+ || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
+ return false;
+
+ return true;
+})
+
+;;-------------------------------------------------------------------------
+;;
+;; Thumb predicates
+;;
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_predicate "thumb1_cmp_operand"
+ (ior (and (match_code "reg,subreg")
+ (match_operand 0 "s_register_operand"))
+ (and (match_code "const_int")
+ (match_test "((unsigned HOST_WIDE_INT) INTVAL (op)) < 256"))))
+
+;; APPLE LOCAL v7 support. Merge from mainline
+(define_predicate "thumb1_cmpneg_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) < 0 && INTVAL (op) > -256")))
+
+;; Return TRUE if a result can be stored in OP without clobbering the
+;; condition code register. Prior to reload we only accept a
+;; register. After reload we have to be able to handle memory as
+;; well, since a pseudo may not get a hard reg and reload cannot
+;; handle output-reloads on jump insns.
+
+;; We could possibly handle mem before reload as well, but that might
+;; complicate things with the need to handle increment
+;; side-effects.
+(define_predicate "thumb_cbrch_target_operand"
+ (and (match_code "reg,subreg,mem")
+ (ior (match_operand 0 "s_register_operand")
+ (and (match_test "reload_in_progress || reload_completed")
+ (match_operand 0 "memory_operand")))))
+
+;;-------------------------------------------------------------------------
+;;
+;; MAVERICK predicates
+;;
+
+(define_predicate "cirrus_register_operand"
+ (match_code "reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ return (GET_CODE (op) == REG
+ && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
+ || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
+})
+
+(define_predicate "cirrus_fp_register"
+ (match_code "reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
+})
+
+(define_predicate "cirrus_shift_const"
+ (and (match_code "const_int")
+ (match_test "((unsigned HOST_WIDE_INT) INTVAL (op)) < 64")))
+
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+
+;; Neon predicates
+
+(define_predicate "const_multiple_of_8_operand"
+ (match_code "const_int")
+{
+ unsigned HOST_WIDE_INT val = INTVAL (op);
+ return (val & 7) == 0;
+})
+
+(define_predicate "imm_for_neon_mov_operand"
+ (match_code "const_vector")
+{
+ return neon_immediate_valid_for_move (op, mode, NULL, NULL);
+})
+
+(define_predicate "imm_for_neon_logic_operand"
+ (match_code "const_vector")
+{
+ return neon_immediate_valid_for_logic (op, mode, 0, NULL, NULL);
+})
+
+(define_predicate "imm_for_neon_inv_logic_operand"
+ (match_code "const_vector")
+{
+ return neon_immediate_valid_for_logic (op, mode, 1, NULL, NULL);
+})
+
+(define_predicate "neon_logic_op2"
+ (ior (match_operand 0 "imm_for_neon_logic_operand")
+ (match_operand 0 "s_register_operand")))
+
+(define_predicate "neon_inv_logic_op2"
+ (ior (match_operand 0 "imm_for_neon_inv_logic_operand")
+ (match_operand 0 "s_register_operand")))
+
+;; TODO: We could check lane numbers more precisely based on the mode.
+(define_predicate "neon_lane_number"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 7")))
+
+
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; APPLE LOCAL begin ARM pic support
+;; Allow local symbols and stub references
+(define_predicate "arm_branch_target"
+ (match_code "reg,symbol_ref")
+{
+#if TARGET_MACHO
+ return GET_CODE (op) == REG
+ || ! (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+ || machopic_data_defined_p (op)
+ || machopic_lookup_stub_or_non_lazy_ptr (XSTR (op, 0));
+#else
+ return 1;
+#endif
+})
+;; APPLE LOCAL end ARM pic support
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/rtems-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/rtems-elf.h
new file mode 100644
index 000000000..f71e582ed
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/rtems-elf.h
@@ -0,0 +1,46 @@
+/* Definitions for RTEMS based ARM systems using ELF
+ Copyright (C) 2000, 2002, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF RTEMS)", stderr);
+
+#define HAS_INIT_SECTION
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__rtems__"); \
+ builtin_assert ("system=rtems"); \
+ } while (0)
+
+/*
+ * The default in gcc now is soft-float, but gcc misses it to
+ * pass it to the assembler.
+ */
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC "\
+ %{!mhard-float: %{!msoft-float:-mfpu=softfpa}}"
+
+/*
+ * The default includes --start-group and --end-group which conflicts
+ * with how this used to be defined.
+ */
+#undef LINK_GCC_C_SEQUENCE_SPEC
+#define LINK_GCC_C_SEQUENCE_SPEC "%G %L"
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/semi.h b/gcc-4.2.1-5666.3/gcc/config/arm/semi.h
new file mode 100644
index 000000000..0de57d67a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/semi.h
@@ -0,0 +1,76 @@
+/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
+ Copyright (C) 1994, 1995, 1996, 1997, 2001, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (richard.earnshaw@arm.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#define STARTFILE_SPEC "crt0.o%s"
+
+#ifndef LIB_SPEC
+#define LIB_SPEC "-lc"
+#endif
+
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "-D__semi__"
+#endif
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+#endif
+
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/semi-hosted)", stderr);
+#endif
+
+#ifndef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_HARD
+#endif
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+#endif
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "subtarget_extra_asm_spec", SUBTARGET_EXTRA_ASM_SPEC },
+#endif
+
+#ifndef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC ""
+#endif
+
+/* The compiler supports PIC code generation, even though the binutils
+ may not. If we are asked to compile position independent code, we
+ always pass -k to the assembler. If it doesn't recognize it, then
+ it will barf, which probably means that it doesn't know how to
+ assemble PIC code. This is what we want, since otherwise tools
+ may incorrectly assume we support PIC compilation even if the
+ binutils can't. */
+#ifndef ASM_SPEC
+#define ASM_SPEC "\
+%{fpic|fpie: -k} %{fPIC|fPIE: -k} \
+%{mbig-endian:-EB} \
+%{mcpu=*:-mcpu=%*} \
+%{march=*:-march=%*} \
+%{mapcs-float:-mfloat} \
+%{msoft-float:-mfloat-abi=soft} %{mhard-float:-mfloat-abi=hard} \
+%{mfloat-abi=*} %{mfpu=*} \
+%{mthumb-interwork:-mthumb-interwork} \
+%(subtarget_extra_asm_spec)"
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/semiaof.h b/gcc-4.2.1-5666.3/gcc/config/arm/semiaof.h
new file mode 100644
index 000000000..9038f0ddd
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/semiaof.h
@@ -0,0 +1,40 @@
+/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
+ AOF Syntax assembler.
+ Copyright (C) 1995, 1996, 1997, 2004 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (richard.earnshaw@armltd.co.uk)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define_std ("arm"); \
+ builtin_define_std ("semi"); \
+ } while (0)
+
+#define ASM_SPEC "%{g -g} -arch 4 -apcs 3/32bit"
+
+#define LIB_SPEC "%{Eb: armlib_h.32b%s}%{!Eb: armlib_h.32l%s}"
+
+#define TARGET_VERSION fputs (" (ARM/semi-hosted)", stderr);
+
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_HARD
+
+#define TARGET_DEFAULT (0)
+
+/* The Norcroft C library defines size_t as "unsigned int". */
+#define SIZE_TYPE "unsigned int"
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-coff.h b/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-coff.h
new file mode 100644
index 000000000..0ba32ceaa
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-coff.h
@@ -0,0 +1,28 @@
+/* Definitions for StrongARM systems using COFF
+ Copyright (C) 1999 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#ifndef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_strongarm
+#endif
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (StrongARM/COFF)", stderr);
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-elf.h
new file mode 100644
index 000000000..84c20996a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-elf.h
@@ -0,0 +1,30 @@
+/* Definitions for non-Linux based StrongARM systems using ELF
+ Copyright (C) 1999, 2001 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (StrongARM/ELF non-Linux)", stderr);
+#endif
+
+#ifndef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_strongarm
+#endif
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-pe.h b/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-pe.h
new file mode 100644
index 000000000..f1a13c0dd
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/strongarm-pe.h
@@ -0,0 +1,23 @@
+/* Definitions of target machine for GNU compiler, for ARM with PE obj format.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (StrongARM/PE)", stderr);
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/symbian.h b/gcc-4.2.1-5666.3/gcc/config/arm/symbian.h
new file mode 100644
index 000000000..af1ba9a64
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/symbian.h
@@ -0,0 +1,101 @@
+/* Configuration file for Symbian OS on ARM processors.
+ Copyright (C) 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Do not expand builtin functions (unless explicitly prefixed with
+ "__builtin"). Symbian OS code relies on properties of the standard
+ library that go beyond those guaranteed by the ANSI/ISO standard.
+ For example, "memcpy" works even with overlapping memory, like
+ "memmove". We cannot simply set flag_no_builtin in arm.c because
+ (a) flag_no_builtin is not declared in language-independent code,
+ and (b) that would prevent users from explicitly overriding the
+ default with -fbuiltin, which may sometimes be useful.
+
+ Make all symbols hidden by default. Symbian OS expects that all
+ exported symbols will be explicitly marked with
+ "__declspec(dllexport)".
+
+ Enumeration types use 4 bytes, even if the enumerals are small,
+ unless explicitly overridden.
+
+ The wchar_t type is a 2-byte type, unless explicitly
+ overridden. */
+#define CC1_SPEC \
+ "%{!fbuiltin:%{!fno-builtin:-fno-builtin}} " \
+ "%{!fvisibility=*:-fvisibility=hidden} " \
+ "%{!fshort-enums:%{!fno-short-enums:-fno-short-enums}} " \
+ "%{!fshort-wchar:%{!fno-short-wchar:-fshort-wchar}} "
+#define CC1PLUS_SPEC CC1_SPEC
+
+/* Symbian OS does not use crt*.o, unlike the generic unknown-elf
+ configuration. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+
+/* Do not link with any libraries by default. On Symbian OS, the user
+ must supply all required libraries on the command line. */
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* Support the "dllimport" attribute. */
+#define TARGET_DLLIMPORT_DECL_ATTRIBUTES 1
+
+/* Symbian OS assumes ARM V5 or above. Since -march=armv5 is
+ equivalent to making the ARM 10TDMI core the default, we can set
+ SUBTARGET_CPU_DEFAULT and get an equivalent effect. */
+#undef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm10tdmi
+
+/* The assembler should assume VFP FPU format, and armv5t. */
+#undef SUBTARGET_ASM_FLOAT_SPEC
+#define SUBTARGET_ASM_FLOAT_SPEC \
+ "%{!mfpu=*:-mfpu=vfp} %{!mcpu=*:%{!march=*:-march=armv5t}}"
+
+/* SymbianOS provides the BPABI routines in a separate library.
+ Therefore, we do not need to define any of them in libgcc. */
+#undef RENAME_LIBRARY
+#define RENAME_LIBRARY(GCC_NAME, AEABI_NAME) /* empty */
+
+/* Define the __symbian__ macro. */
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ /* Include the default BPABI stuff. */ \
+ TARGET_BPABI_CPP_BUILTINS (); \
+ builtin_define ("__symbian__"); \
+ } \
+ while (false)
+
+/* On SymbianOS, these sections are not writable, so we use "a",
+ rather than "aw", for the section attributes. */
+#undef ARM_EABI_CTORS_SECTION_OP
+#define ARM_EABI_CTORS_SECTION_OP \
+ "\t.section\t.init_array,\"a\",%init_array"
+#undef ARM_EABI_DTORS_SECTION_OP
+#define ARM_EABI_DTORS_SECTION_OP \
+ "\t.section\t.fini_array,\"a\",%fini_array"
+
+/* SymbianOS cannot merge entities with vague linkage at runtime. */
+#define TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P false
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/sync.md b/gcc-4.2.1-5666.3/gcc/config/arm/sync.md
new file mode 100644
index 000000000..999490762
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/sync.md
@@ -0,0 +1,192 @@
+;; APPLE LOCAL file 6258536 atomic builtins
+;; Machine description for ARM synchronization instructions.
+;; Copyright (C) 2009 Free Software Foundation, Inc.
+;; Contributed by Apple, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+
+(define_mode_macro IMODE [QI HI SI DI])
+(define_mode_attr ldrex [(QI "ldrexb") (HI "ldrexh")
+ (SI "ldrex") (DI "ldrexd")])
+(define_mode_attr strex [(QI "strexb") (HI "strexh")
+ (SI "strex") (DI "strexd")])
+
+(define_expand "memory_barrier"
+ [(set (mem:BLK (match_dup 0))
+ (unspec:BLK [(mem:BLK (match_dup 0))] UNSPEC_BARRIER))]
+ "TARGET_32BIT && arm_arch6"
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+ if (arm_arch6 && !arm_arch7a)
+ {
+ rtx par;
+ par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(2));
+ /* v6 needs a scratch register */
+ XVECEXP (par, 0, 0) =
+ gen_rtx_SET (VOIDmode, gen_rtx_MEM (BLKmode, operands[0]),
+ gen_rtx_UNSPEC (BLKmode,
+ gen_rtvec (1, gen_rtx_MEM (BLKmode,
+ operands[0])),
+ UNSPEC_BARRIER));
+ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH(SImode));
+ emit_insn (par);
+ DONE;
+ }
+})
+
+(define_insn "arm_memory_barrier_v7"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_operand:BLK 1 "" "")] UNSPEC_BARRIER))]
+ "TARGET_32BIT && arm_arch7a"
+ "dmb\tish"
+ [(set_attr "length" "4")]
+)
+
+;; This version matches the define_expand above.
+(define_insn "arm_memory_barrier_v6_scratch"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_operand:BLK 1 "" "")] UNSPEC_BARRIER))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "TARGET_32BIT && arm_arch6 && !arm_arch7a"
+ "mov\t%2, #0\n\tmcr\tp15, 0, %2, c7, c10, 5"
+ [(set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 6)
+ (const_int 8)))]
+)
+
+;; This version is used directly by the compare_and_swap splitter below.
+;; That runs after reload is complete, so we cannot use a new define_scratch.
+;; reload is not available to allocate one for us.
+(define_insn "arm_memory_barrier_v6_explicit"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_operand:BLK 1 "" "")] UNSPEC_BARRIER))
+ (clobber (match_operand:SI 2 "register_operand" "=&r"))]
+ "TARGET_32BIT && arm_arch6 && !arm_arch7a"
+ "mov\t%2, #0\n\tmcr\tp15, 0, %2, c7, c10, 5"
+ [(set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 6)
+ (const_int 8)))]
+)
+
+(define_expand "memory_sync"
+ [(set (mem:BLK (match_dup 0))
+ (unspec_volatile:BLK [(mem:BLK (match_dup 0))] UNSPEC_SYNC))]
+ "TARGET_32BIT && arm_arch6"
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+ if (arm_arch6 && !arm_arch7a)
+ {
+ rtx par;
+ par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(2));
+ /* v6 needs a scratch register */
+ XVECEXP (par, 0, 0) =
+ gen_rtx_SET (BLKmode, operands[0],
+ gen_rtx_UNSPEC_VOLATILE (BLKmode,
+ gen_rtvec (1, operands[0]),
+ UNSPEC_SYNC));
+ XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH(SImode));
+ emit_insn (par);
+ DONE;
+ }
+})
+
+(define_insn "arm_memory_sync_v7"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec_volatile:BLK [(mem:BLK (match_operand 1))] UNSPEC_SYNC))]
+ "TARGET_32BIT && arm_arch7a"
+ "dsb"
+ [(set_attr "length" "4")]
+)
+
+;; This version matches the define_expand above.
+(define_insn "arm_memory_sync_v6_scratch"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec_volatile:BLK [(mem:BLK (match_operand 1))] UNSPEC_SYNC))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "TARGET_32BIT && arm_arch6 && !arm_arch7a"
+ "mov\t%2, #0\n\tmcr\tp15, 0, %2, c7, c10, 4"
+ [(set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 6)
+ (const_int 8)))]
+)
+
+;; This version is used directly by the compare_and_swap splitter below.
+;; That runs after reload is complete, so we cannot use a new define_scratch.
+;; reload is not available to allocate one for us.
+(define_insn "arm_memory_sync_v6_explicit"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec_volatile:BLK [(mem:BLK (match_operand 1))] UNSPEC_SYNC))
+ (clobber (match_operand:SI 2 "register_operand" "=&r"))]
+ "TARGET_32BIT && arm_arch6 && !arm_arch7a"
+ "mov\t%2, #0\n\tmcr\tp15, 0, %2, c7, c10, 4"
+ [(set (attr "length")
+ (if_then_else (eq_attr "is_thumb" "yes")
+ (const_int 6)
+ (const_int 8)))]
+)
+
+(define_insn "load_locked_<mode>"
+ [(set (match_operand:IMODE 0 "register_operand" "=r")
+ (unspec_volatile:IMODE
+ [(match_operand:IMODE 1 "memory_operand" "Q")] VUNSPEC_LL))]
+ "TARGET_32BIT && arm_arch6"
+ "<ldrex>\t%0, %1"
+ [(set_attr "length" "4")]
+)
+
+(define_insn "store_conditional_<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(const_int 0)] VUNSPEC_SC))
+ (set (match_operand:IMODE 1 "memory_operand" "=Q")
+ (match_operand:IMODE 2 "register_operand" "r"))]
+ "TARGET_32BIT && arm_arch6"
+ "<strex>\t%0, %2, %1"
+ [(set_attr "length" "4")]
+)
+
+(define_insn_and_split "sync_compare_and_swap<mode>"
+ [(set (match_operand:IMODE 0 "register_operand" "=&r")
+ (match_operand:IMODE 1 "memory_operand" "+Q"))
+ (set (match_dup 1)
+ (unspec_volatile:IMODE
+ [(match_dup 1)
+ (match_operand:IMODE 2 "register_operand" "r")
+ (match_operand:IMODE 3 "register_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_32BIT && arm_arch6"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ arm_split_compare_and_swap(operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+}
+ [(set_attr "length" "48")]
+)
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-arm b/gcc-4.2.1-5666.3/gcc/config/arm/t-arm
new file mode 100644
index 000000000..ea032ee8b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-arm
@@ -0,0 +1,28 @@
+# Rules common to all arm targets
+
+# APPLE LOCAL begin v7 support. Merge from Codesourcery
+MD_INCLUDES= $(srcdir)/config/arm/arm-tune.md \
+ $(srcdir)/config/arm/predicates.md \
+ $(srcdir)/config/arm/arm-generic.md \
+ $(srcdir)/config/arm/arm1020e.md \
+ $(srcdir)/config/arm/arm1026ejs.md \
+ $(srcdir)/config/arm/arm1136jfs.md \
+ $(srcdir)/config/arm/arm926ejs.md \
+ $(srcdir)/config/arm/cirrus.md \
+ $(srcdir)/config/arm/fpa.md \
+ $(srcdir)/config/arm/vec-common.md \
+ $(srcdir)/config/arm/iwmmxt.md \
+ $(srcdir)/config/arm/vfp.md \
+ $(srcdir)/config/arm/neon.md \
+ $(srcdir)/config/arm/thumb2.md \
+ $(srcdir)/config/arm/hwdiv.md
+# APPLE LOCAL end v7 support. Merge from Codesourcery
+
+s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
+ s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
+
+$(srcdir)/config/arm/arm-tune.md: $(srcdir)/config/arm/gentune.sh \
+ $(srcdir)/config/arm/arm-cores.def
+ $(SHELL) $(srcdir)/config/arm/gentune.sh \
+ $(srcdir)/config/arm/arm-cores.def > \
+ $(srcdir)/config/arm/arm-tune.md
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-arm-coff b/gcc-4.2.1-5666.3/gcc/config/arm/t-arm-coff
new file mode 100644
index 000000000..763add31d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-arm-coff
@@ -0,0 +1,34 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float marm/mthumb mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be fpu soft arm thumb normal interwork
+MULTILIB_MATCHES =
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -fno-inline
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-arm-elf b/gcc-4.2.1-5666.3/gcc/config/arm/t-arm-elf
new file mode 100644
index 000000000..3e9914d6c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-arm-elf
@@ -0,0 +1,92 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func \
+ _call_via_rX _interwork_call_via_rX \
+ _lshrdi3 _ashrdi3 _ashldi3 \
+ _negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \
+ _truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \
+ _fixsfsi _fixunssfsi _floatdidf _floatdisf _floatundidf _floatundisf
+
+MULTILIB_OPTIONS = marm/mthumb
+MULTILIB_DIRNAMES = arm thumb
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES =
+
+# APPLE LOCAL begin v7 support. Merge from mainline
+#MULTILIB_OPTIONS += march=armv7
+#MULTILIB_DIRNAMES += thumb2
+#MULTILIB_EXCEPTIONS += march=armv7* marm/*march=armv7*
+#MULTILIB_MATCHES += march?armv7=march?armv7-a
+#MULTILIB_MATCHES += march?armv7=march?armv7-r
+#MULTILIB_MATCHES += march?armv7=march?armv7-m
+#MULTILIB_MATCHES += march?armv7=mcpu?cortex-a8
+#MULTILIB_MATCHES += march?armv7=mcpu?cortex-r4
+#MULTILIB_MATCHES += march?armv7=mcpu?cortex-m3
+# APPLE LOCAL end v7 support. Merge from mainline
+
+# MULTILIB_OPTIONS += mcpu=ep9312
+# MULTILIB_DIRNAMES += ep9312
+# MULTILIB_EXCEPTIONS += *mthumb/*mcpu=ep9312*
+#
+# MULTILIB_OPTIONS += mlittle-endian/mbig-endian
+# MULTILIB_DIRNAMES += le be
+# MULTILIB_MATCHES += mbig-endian=mbe mlittle-endian=mle
+#
+# MULTILIB_OPTIONS += mhard-float/msoft-float
+# MULTILIB_DIRNAMES += fpu soft
+# MULTILIB_EXCEPTIONS += *mthumb/*mhard-float*
+#
+# MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
+# MULTILIB_DIRNAMES += normal interwork
+#
+# MULTILIB_OPTIONS += fno-leading-underscore/fleading-underscore
+# MULTILIB_DIRNAMES += elf under
+#
+# MULTILIB_OPTIONS += mcpu=arm7
+# MULTILIB_DIRNAMES += nofmult
+# MULTILIB_EXCEPTIONS += *mthumb*/*mcpu=arm7*
+# # Note: the multilib_exceptions matches both -mthumb and
+# # -mthumb-interwork
+# #
+# # We have to match all the arm cpu variants which do not have the
+# # multiply instruction and treat them as if the user had specified
+# # -mcpu=arm7. Note that in the following the ? is interpreted as
+# # an = for the purposes of matching command line options.
+# # FIXME: There ought to be a better way to do this.
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm7d
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm7di
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm70
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm700
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm700i
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm710
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm710c
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm7100
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm7500
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm7500fe
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm6
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm60
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm600
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm610
+# MULTILIB_MATCHES += mcpu?arm7=mcpu?arm620
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
+
+# If EXTRA_MULTILIB_PARTS is not defined above then define EXTRA_PARTS here
+# EXTRA_PARTS = crtbegin.o crtend.o crti.o crtn.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -fno-inline
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/arm/crti.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/arm/crti.asm
+
+$(T)crtn.o: $(srcdir)/config/arm/crtn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/arm/crtn.asm
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-bpabi b/gcc-4.2.1-5666.3/gcc/config/arm/t-bpabi
new file mode 100644
index 000000000..b5c6a0b61
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-bpabi
@@ -0,0 +1,16 @@
+# Add the bpabi.S functions.
+LIB1ASMFUNCS += _aeabi_lcmp _aeabi_ulcmp _aeabi_ldivmod _aeabi_uldivmod
+
+# Add the BPABI C functions.
+LIB2FUNCS_EXTRA = $(srcdir)/config/arm/bpabi.c \
+ $(srcdir)/config/arm/unaligned-funcs.c
+
+UNWIND_H = $(srcdir)/config/arm/unwind-arm.h
+LIB2ADDEH = $(srcdir)/config/arm/unwind-arm.c \
+ $(srcdir)/config/arm/libunwind.S \
+ $(srcdir)/config/arm/pr-support.c $(srcdir)/unwind-c.c
+LIB2ADDEHDEP = $(UNWIND_H) $(srcdir)/config/$(LIB1ASMSRC)
+
+# Add the BPABI names.
+SHLIB_MAPFILES += $(srcdir)/config/arm/libgcc-bpabi.ver
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-darwin b/gcc-4.2.1-5666.3/gcc/config/arm/t-darwin
new file mode 100644
index 000000000..2d79020ea
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-darwin
@@ -0,0 +1,75 @@
+# APPLE LOCAL file ARM darwin target
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func \
+ _lshrdi3 _ashrdi3 _ashldi3 \
+ _negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \
+ _truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \
+ _fixsfsi _fixunssfsi _floatdidf _floatdisf _floatundidf _floatundisf \
+ _muldf3vfp _adddf3vfp _subdf3vfp _divdf3vfp \
+ _eqdf2vfp _nedf2vfp _ltdf2vfp _gtdf2vfp _ledf2vfp _gedf2vfp _unorddf2vfp \
+ _fixdfsivfp _fixunsdfsivfp _extendsfdf2vfp _truncdfsf2vfp \
+ _floatsidfvfp _floatunssidfvfp \
+ _mulsf3vfp _addsf3vfp _subsf3vfp _divsf3vfp \
+ _eqsf2vfp _nesf2vfp _ltsf2vfp _gtsf2vfp _lesf2vfp _gesf2vfp _unordsf2vfp \
+ _fixsfsivfp _fixunssfsivfp _floatsisfvfp _floatunssisfvfp \
+ _switchu8 _switch8 _switch16 _switch32
+# APPLE LOCAL 6465387 exception handling interworking VFP save
+LIB1ASMFUNCS += _save_vfp_d8_d15_regs _restore_vfp_d8_d15__regs
+
+# APPLE LOCAL begin 5316398 improved float/double -> int64 functions
+LIB2FUNCS_EXCLUDE = _fixdfdi _fixunsdfdi _fixsfdi _fixunssfdi
+LIB2FUNCS_EXTRA = $(srcdir)/config/arm/_fixdfdi.c \
+ $(srcdir)/config/arm/_fixunsdfdi.c \
+ $(srcdir)/config/arm/_fixsfdi.c \
+ $(srcdir)/config/arm/_fixunssfdi.c
+# APPLE LOCAL end 5316398 improved float/double -> int64 functions
+
+# APPLE LOCAL begin 6611402 configurable multilib architectures
+ifndef ARM_MULTILIB_ARCHS
+ARM_MULTILIB_ARCHS:=armv5 armv6 armv7
+endif
+
+MULTILIB_OPTIONS:=$(shell echo $(strip $(ARM_MULTILIB_ARCHS)) | \
+ sed -e s/armv5/march=armv5tej/ \
+ -e s/armv6/march=armv6k/ \
+ -e s/armv7/march=armv7a/ \
+ -e 's| |/|g')
+MULTILIB_DIRNAMES:=$(shell echo $(ARM_MULTILIB_ARCHS) | sed -e s/arm//g)
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES =
+
+ifneq (,$(findstring armv5,$(ARM_MULTILIB_ARCHS)))
+MULTILIB_MATCHES += \
+ march?armv5tej=march?armv5te \
+ march?armv5tej=mcpu?arm9e \
+ march?armv5tej=mcpu?arm946e-s \
+ march?armv5tej=mcpu?arm966e-s \
+ march?armv5tej=mcpu?arm968e-s \
+ march?armv5tej=mcpu?arm10e \
+ march?armv5tej=mcpu?arm1020e \
+ march?armv5tej=mcpu?arm1022e \
+ march?armv5tej=mcpu?xscale \
+ march?armv5tej=mcpu?iwmmxt \
+ march?armv5tej=mcpu?arm926ej-s \
+ march?armv5tej=mcpu?arm1026ej-s
+endif
+
+ifneq (,$(findstring armv6,$(ARM_MULTILIB_ARCHS)))
+MULTILIB_MATCHES += \
+ march?armv6k=march?armv6zk \
+ march?armv6k=mcpu?arm1176jz-s \
+ march?armv6k=mcpu?arm1176jzf-s \
+ march?armv6k=mcpu?mpcorenovfp \
+ march?armv6k=mcpu?mpcore
+endif
+
+ifneq (,$(findstring armv7,$(ARM_MULTILIB_ARCHS)))
+MULTILIB_MATCHES += \
+ march?armv7a=march?armv7-a \
+ march?armv7a=mcpu?cortex-a8
+endif
+
+# APPLE LOCAL end 6611402 configurable multilib architectures
+# APPLE LOCAL 7442004 Always build multilib libgcc functions ARM mode.
+TARGET_LIBGCC2_CFLAGS = -fno-inline -marm
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-linux b/gcc-4.2.1-5666.3/gcc/config/arm/t-linux
new file mode 100644
index 000000000..b2697e66e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-linux
@@ -0,0 +1,15 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+TARGET_LIBGCC2_CFLAGS = -fomit-frame-pointer -fPIC
+LIBGCC2_DEBUG_CFLAGS = -g0
+
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx
+
+# MULTILIB_OPTIONS = mhard-float/msoft-float
+# MULTILIB_DIRNAMES = hard-float soft-float
+
+# EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+# LIBGCC = stmp-multilib
+# INSTALL_LIBGCC = install-multilib
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-linux-eabi b/gcc-4.2.1-5666.3/gcc/config/arm/t-linux-eabi
new file mode 100644
index 000000000..5e8d94d7a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-linux-eabi
@@ -0,0 +1,14 @@
+# These functions are included in shared libraries.
+TARGET_LIBGCC2_CFLAGS = -fPIC
+
+# We do not build a Thumb multilib for Linux because the definition of
+# CLEAR_INSN_CACHE in linux-gas.h does not work in Thumb mode.
+MULTILIB_OPTIONS =
+MULTILIB_DIRNAMES =
+
+# Use a version of div0 which raises SIGFPE.
+LIB1ASMFUNCS := $(filter-out _dvmd_tls,$(LIB1ASMFUNCS)) _dvmd_lnx
+
+# Multilib the standard Linux files. Don't include crti.o or crtn.o,
+# which are provided by glibc.
+EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-netbsd b/gcc-4.2.1-5666.3/gcc/config/arm/t-netbsd
new file mode 100644
index 000000000..7d0724cc8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-netbsd
@@ -0,0 +1,28 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+TARGET_LIBGCC2_CFLAGS = -fomit-frame-pointer -fpic
+LIBGCC2_DEBUG_CFLAGS = -g0
+LIB2FUNCS_EXTRA = $(srcdir)/config/floatunsidf.c $(srcdir)/config/floatunsisf.c
+
+# Build a shared libgcc library.
+SHLIB_EXT = .so
+SHLIB_NAME = @shlib_base_name@.so
+SHLIB_SONAME = @shlib_base_name@.so.1
+SHLIB_OBJS = @shlib_objs@
+
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-soname,$(SHLIB_SONAME) \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ rm -f $(SHLIB_SONAME) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
+ $(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir); \
+ $(INSTALL_DATA) $(SHLIB_NAME) $$(DESTDIR)$$(slibdir)/$(SHLIB_SONAME); \
+ rm -f $$(DESTDIR)$$(slibdir)/$(SHLIB_NAME); \
+ $(LN_S) $(SHLIB_SONAME) $$(DESTDIR)$$(slibdir)/$(SHLIB_NAME)
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-pe b/gcc-4.2.1-5666.3/gcc/config/arm/t-pe
new file mode 100644
index 000000000..a77a7327b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-pe
@@ -0,0 +1,33 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+pe.o: $(srcdir)/config/arm/pe.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) output.h flags.h $(TREE_H) expr.h toplev.h $(TM_P_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+MULTILIB_OPTIONS = mhard-float mthumb
+MULTILIB_DIRNAMES = fpu thumb
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+# APPLE LOCAL v7 support
+TARGET_LIBGCC2_CFLAGS =
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-rtems b/gcc-4.2.1-5666.3/gcc/config/arm/t-rtems
new file mode 100644
index 000000000..52d14bab0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-rtems
@@ -0,0 +1,10 @@
+# Custom rtems multilibs
+
+MULTILIB_OPTIONS = marm/mthumb
+MULTILIB_DIRNAMES = arm thumb
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = marm=mno-thumb
+
+MULTILIB_OPTIONS += msoft-float/mhard-float
+MULTILIB_DIRNAMES += soft fpu
+MULTILIB_EXCEPTIONS += *mthumb/*mhard-float*
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-semi b/gcc-4.2.1-5666.3/gcc/config/arm/t-semi
new file mode 100644
index 000000000..f5d8f1a6f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-semi
@@ -0,0 +1,37 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+TARGET_LIBGCC2_CFLAGS = -fomit-frame-pointer
+LIBGCC2_DEBUG_CFLAGS = -g0
+
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __SOFTFP__' > fp-bit.c
+ echo '#define FLOAT' >> fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __SOFTFP__' > dp-bit.c
+ echo '#ifndef __ARMEB__' >> dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+
+MULTILIB_OPTIONS = msoft-float mbig-endian mwords-little-endian
+MULTILIB_DIRNAMES = soft big wlittle
+MULTILIB_EXCEPTIONS = mwords-little-endian msoft-float/mwords-little-endian
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-slibgcc-iphoneos b/gcc-4.2.1-5666.3/gcc/config/arm/t-slibgcc-iphoneos
new file mode 100644
index 000000000..9bb6c2483
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-slibgcc-iphoneos
@@ -0,0 +1,64 @@
+# APPLE LOCAL file ARM 5681645
+# Build a shared libgcc library with the darwin linker.
+SHLIB_SOVERSION = 1
+SHLIB_VERSTRING = -compatibility_version $(SHLIB_SOVERSION) -current_version $(SHLIB_SOVERSION).0
+SHLIB_EXT = .dylib
+SHLIB_INSTALL_NAME = @shlib_base_name@.$(SHLIB_SOVERSION)$(SHLIB_EXT)
+SHLIB_SONAME = @shlib_base_name@.$(SHLIB_SOVERSION)$(SHLIB_EXT)
+SHLIB_SOLINK = @shlib_base_name@.so
+SHLIB_MAP = @shlib_map_file@
+SHLIB_OBJS = @shlib_objs@
+SHLIB_DIR = @multilib_dir@
+SHLIB_LC = -lc
+
+# Darwin only searches in /usr/lib for shared libraries, not in subdirectories,
+# so the libgcc variants have different names not different locations.
+# Note that this version is used for the loader, not the linker; the linker
+# uses the stub versions named by $(LIBGCC).
+# APPLE LOCAL begin no-libtool
+# APPLE LOCAL begin ARM dead strip libgcc_s
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -dynamiclib -nodefaultlibs \
+ -Wl,-dead_strip \
+ -install_name $(slibdir)/$(SHLIB_INSTALL_NAME) \
+ -single_module -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp \
+ -Wl,-exported_symbols_list,$(SHLIB_MAP) \
+ $(SHLIB_VERSTRING) \
+ @multilib_flags@ $(SHLIB_OBJS) $(SHLIB_LC)
+# APPLE LOCAL end ARM dead strip libgcc_s
+# APPLE LOCAL end no-libtool
+
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir); \
+ $(INSTALL_DATA) $(SHLIB_SONAME) \
+ $$(DESTDIR)$$(slibdir)/$(SHLIB_SONAME)
+
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+SHLIB_MKMAP_OPTS = -v leading_underscore=1
+SHLIB_MAPFILES += $(srcdir)/libgcc-std.ver $(srcdir)/config/arm/libgcc-iphoneos.ver
+
+# Must use a different directive for hidden visibility in assembly sources.
+ASM_HIDDEN_OP = .private_extern
+
+libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT): stmp-multilib
+ # When building multilibbed target libraries, all the required
+ # libraries are expected to exist in the multilib directory.
+ MLIBS=`$(GCC_FOR_TARGET) --print-multi-lib \
+ | sed -e 's/;.*$$//' -e '/^\.$$/d'` ; \
+ if [ -n "$$MLIBS" ] ; then \
+ for mlib in '' $$MLIBS ; do \
+ cp ./$${mlib}/libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT).tmp \
+ ./libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT)_T_$${mlib} || exit 1 ; \
+ done ; \
+ $(LIPO_FOR_TARGET) -output libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT) \
+ -create libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT)_T* ; \
+ rm libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT)_T* ; \
+ else \
+ cp ./libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT).tmp \
+ ./libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT) ; \
+ fi
+
+# From the point-of-view of the Makefiles, libgcc is built by the 'strip'
+# and 'lipo' commands above.
+LIBGCC=libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT) stmp-multilib
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-elf b/gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-elf
new file mode 100644
index 000000000..c8193548a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-elf
@@ -0,0 +1,44 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float
+MULTILIB_DIRNAMES = le be fpu soft
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -fno-inline
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/arm/crti.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/arm/crti.asm
+
+$(T)crtn.o: $(srcdir)/config/arm/crtn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/arm/crtn.asm
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-pe b/gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-pe
new file mode 100644
index 000000000..2e4732fb2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-strongarm-pe
@@ -0,0 +1,38 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+pe.o: $(srcdir)/config/arm/pe.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) output.h flags.h $(TREE_H) expr.h toplev.h $(TM_P_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+MULTILIB_OPTIONS = mhard-float/msoft-float
+MULTILIB_DIRNAMES = fpu soft
+MULTILIB_MATCHES =
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -fno-inline
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-symbian b/gcc-4.2.1-5666.3/gcc/config/arm/t-symbian
new file mode 100644
index 000000000..db8913818
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-symbian
@@ -0,0 +1,32 @@
+LIB1ASMFUNCS = _bb_init_func _call_via_rX _interwork_call_via_rX
+
+# These functions have __aeabi equivalents and will never be called by GCC.
+# By putting them in LIB1ASMFUNCS, we avoid the standard libgcc2.c code being
+# used -- and we make sure that definitions are not available in lib1funcs.asm,
+# either, so they end up undefined.
+LIB1ASMFUNCS += \
+ _ashldi3 _ashrdi3 _divdi3 _floatdidf _udivmoddi4 _umoddi3 \
+ _udivdi3 _lshrdi3 _moddi3 _muldi3 _negdi2 _cmpdi2 \
+ _fixdfdi _fixsfdi _fixunsdfdi _fixunssfdi _floatdisf \
+ _negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \
+ _truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \
+ _fixsfsi _fixunssfsi
+
+# Include the gcc personality routine
+UNWIND_H = $(srcdir)/config/arm/unwind-arm.h
+LIB2ADDEH = $(srcdir)/unwind-c.c $(srcdir)/config/arm/pr-support.c
+LIB2ADDEHDEP = $(UNWIND_H)
+
+# Create a multilib for processors with VFP floating-point, and a
+# multilib for those without -- using the soft-float ABI in both
+# cases. Symbian OS object should be compiled with interworking
+# enabled, so there are no separate thumb-mode libraries.
+MULTILIB_OPTIONS = mfloat-abi=softfp
+MULTILIB_DIRNAMES = softfp
+
+# There is no C library to link against on Symbian OS -- at least when
+# building GCC.
+SHLIB_LC =
+
+# Symbian OS provides its own startup code.
+EXTRA_MULTILIB_PARTS=
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-vxworks b/gcc-4.2.1-5666.3/gcc/config/arm/t-vxworks
new file mode 100644
index 000000000..e620cfdf8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-vxworks
@@ -0,0 +1,10 @@
+# Multilibs for VxWorks.
+
+MULTILIB_OPTIONS = \
+ t4/t4be/t4t/t4tbe/t5/t5be/t5t/t5tbe/txscale/txscalebe
+
+MULTILIB_DIRNAMES = \
+ ARMARCH4gnu ARMARCH4gnube ARMARCH4_Tgnu ARMARCH4_Tgnube \
+ ARMARCH5gnu ARMARCH5gnube ARMARCH5_Tgnu ARMARCH5_Tgnube \
+ XSCALEgnu XSCALEgnube
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-wince-pe b/gcc-4.2.1-5666.3/gcc/config/arm/t-wince-pe
new file mode 100644
index 000000000..fca9853e7
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-wince-pe
@@ -0,0 +1,37 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+pe.o: $(srcdir)/config/arm/pe.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) output.h flags.h $(TREE_H) expr.h toplev.h $(TM_P_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+MULTILIB_OPTIONS = mhard-float
+MULTILIB_DIRNAMES = fpu
+# Note - Thumb multilib omitted because Thumb support for
+# arm-wince-pe target does not appear to be working in binutils
+# yet...
+# MULTILIB_OPTIONS += thumb
+# MULTILIB_DIRNAMES += thumb
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+TARGET_LIBGCC2_CFLAGS =
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-coff b/gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-coff
new file mode 100644
index 000000000..e2331a02e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-coff
@@ -0,0 +1,45 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mbig-endian
+MULTILIB_DIRNAMES = be
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+# Note XScale does not support hard FP
+
+MULTILIB_OPTIONS += mthumb-interwork
+MULTILIB_DIRNAMES += interwork
+
+MULTILIB_OPTIONS += mthumb
+MULTILIB_DIRNAMES += thumb
+MULTILIB_EXCEPTIONS += *mhard-float/*mthumb*
+
+MULTILIB_REDUNDANT_DIRS = interwork/thumb=thumb
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -fno-inline
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-elf b/gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-elf
new file mode 100644
index 000000000..d7a8124ee
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/t-xscale-elf
@@ -0,0 +1,66 @@
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mbig-endian
+MULTILIB_DIRNAMES = be
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+# Note XScale does not support hard FP
+
+MULTILIB_OPTIONS += mthumb-interwork
+MULTILIB_DIRNAMES += interwork
+
+MULTILIB_OPTIONS += mthumb
+MULTILIB_DIRNAMES += thumb
+MULTILIB_EXCEPTIONS += *mhard-float/*mthumb*
+
+MULTILIB_REDUNDANT_DIRS = interwork/thumb=thumb
+
+# The iWMMXt multilibs are suppressed for now because gcc only
+# supports generating them with the IWMMXT or AAPCS ABIs, neither of
+# which is the default. Until GCC can generate code for an iWMMXt
+# which will work with the default ABI it is not possible to safely
+# generate these multilibs.
+#
+# MULTILIB_OPTIONS += mcpu=iwmmxt
+# MULTILIB_DIRNAMES += iwmmxt
+# MULTILIB_REDUNDANT_DIRS += interwork/thumb/iwmmxt=thumb
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in fp-bit.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -fno-inline
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/arm/crti.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/arm/crti.asm
+
+$(T)crtn.o: $(srcdir)/config/arm/crtn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/arm/crtn.asm
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/thumb2.md b/gcc-4.2.1-5666.3/gcc/config/arm/thumb2.md
new file mode 100644
index 000000000..504b06e58
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/thumb2.md
@@ -0,0 +1,1164 @@
+;; APPLE LOCAL file v7 support. Merge from mainline
+;; ARM Thumb-2 Machine Description
+;; Copyright (C) 2007 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+;; 02111-1307, USA. */
+
+;; Note: Thumb-2 is the variant of the Thumb architecture that adds
+;; 32-bit encodings of [almost all of] the Arm instruction set.
+;; Some old documents refer to the relatively minor interworking
+;; changes made in armv5t as "thumb2". These are considered part
+;; the 16-bit Thumb-1 instruction set.
+
+(include "hwdiv.md")
+
+(define_insn "*thumb2_incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%d2\;add%d2\\t%0, %1, #1
+ ite\\t%D2\;mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,10")]
+)
+
+(define_insn "*thumb2_decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%d2\;sub%d2\\t%0, %1, #1
+ ite\\t%D2\;mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,10")]
+)
+
+;; Thumb-2 only allows shift by constant on data processing instructions
+(define_insn "*thumb_andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "const_int_operand" "M")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "bic%?\\t%0, %1, %2%S4"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "2")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tlt\;movlt\\t%0, %2
+ cmp\\t%1, %2\;it\\tge\;movge\\t%0, %1
+ cmp\\t%1, %2\;ite\\tge\;movge\\t%0, %1\;movlt\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb2_sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tge\;movge\\t%0, %2
+ cmp\\t%1, %2\;it\\tlt\;movlt\\t%0, %1
+ cmp\\t%1, %2\;ite\\tlt\;movlt\\t%0, %1\;movge\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb32_umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tcc\;movcc\\t%0, %2
+ cmp\\t%1, %2\;it\\tcs\;movcs\\t%0, %1
+ cmp\\t%1, %2\;ite\\tcs\;movcs\\t%0, %1\;movcc\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb2_uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tcs\;movcs\\t%0, %2
+ cmp\\t%1, %2\;it\\tcc\;movcc\\t%0, %1
+ cmp\\t%1, %2\;ite\\tcc\;movcc\\t%0, %1\;movcs\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb2_notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")])))]
+ "TARGET_THUMB2"
+ "mvn%?\\t%0, %1%S3"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ "TARGET_THUMB2"
+ "mvn%.\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_THUMB2"
+ "mvn%.\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+;; Thumb-2 does not have rsc, so use a clever trick with shifter operands.
+(define_insn "*thumb2_negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "negs\\t%Q0, %Q1\;sbc\\t%R0, %R1, %R1, lsl #1"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*thumb2_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%0, #0\;it\tlt\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+ [(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
+ ;; predicable can't be set based on the variant, so left as no
+ (set_attr "length" "10,8")]
+)
+
+(define_insn "*thumb2_neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%0, #0\;it\\tgt\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+ [(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
+ ;; predicable can't be set based on the variant, so left as no
+ (set_attr "length" "10,8")]
+)
+
+(define_insn "*thumb2_movdi"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, r, r, m")
+ (match_operand:DI 1 "di_operand" "rDa,Db,Dc,mi,r"))]
+ "TARGET_THUMB2
+ && !(TARGET_HARD_FLOAT && (TARGET_MAVERICK || TARGET_VFP))
+ && !TARGET_IWMMXT"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return \"#\";
+ default:
+ return output_move_double (operands);
+ }
+ "
+ [(set_attr "length" "8,12,16,8,8")
+ (set_attr "type" "*,*,*,load2,store2")
+ (set_attr "pool_range" "*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,*,0,*")]
+)
+
+(define_insn "*thumb2_movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r, m")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r"))]
+ "TARGET_THUMB2 && ! TARGET_IWMMXT
+ && !(TARGET_HARD_FLOAT && TARGET_VFP)
+ && ( register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ movw%?\\t%0, %1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+ [(set_attr "type" "*,*,*,load1,store1")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,*,0,*")]
+)
+
+;; ??? We can probably do better with thumb2
+(define_insn "pic_load_addr_thumb2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "" "mX")
+ (label_ref (match_operand 2 "" ""))] UNSPEC_PIC_SYM))]
+ "TARGET_THUMB2 && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "ldr%?\\t%0, %1"
+ [(set_attr "type" "load1")
+ (set_attr "pool_range" "4096")
+ (set_attr "neg_pool_range" "0")]
+)
+
+;; Set reg to the address of this instruction plus four. The low two
+;; bits of the PC are always read as zero, so ensure the instructions is
+;; word aligned.
+(define_insn "pic_load_dot_plus_four"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const (plus:SI (pc) (const_int 4)))]
+ UNSPEC_PIC_BASE))
+ (use (match_operand 1 "" ""))]
+ "TARGET_THUMB2"
+ "*
+ assemble_align(BITS_PER_WORD);
+ (*targetm.asm_out.internal_label) (asm_out_file, \"LPIC\",
+ INTVAL (operands[1]));
+ /* We use adr because some buggy gas assemble add r8, pc, #0
+ to add.w r8, pc, #0, not addw r8, pc, #0. */
+ asm_fprintf (asm_out_file, \"\\tadr\\t%r, %LLPIC%d + 4\\n\",
+ REGNO(operands[0]), (int)INTVAL (operands[1]));
+ return \"\";
+ "
+ [(set_attr "length" "6")]
+)
+
+;; Thumb-2 always has load/store halfword instructions, so we can avoid a lot
+;; of the messyness assocuated with the ARM patterns.
+(define_insn "*thumb2_movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,n,r,m"))]
+ "TARGET_THUMB2"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ movw%?\\t%0, %L1\\t%@ movhi
+ str%(h%)\\t%1, %0\\t%@ movhi
+ ldr%(h%)\\t%0, %1\\t%@ movhi"
+ [(set_attr "type" "*,*,store1,load1")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,*,*,4096")
+ (set_attr "neg_pool_range" "*,*,*,250")]
+)
+
+(define_insn "*thumb2_movsf_soft_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_THUMB2
+ && TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "*,load1,store1")
+ (set_attr "pool_range" "*,4096,*")
+ (set_attr "neg_pool_range" "*,0,*")]
+)
+
+(define_insn "*thumb2_movdf_soft_insn"
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=r,r,r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "rDa,Db,Dc,mF,r"))]
+ "TARGET_THUMB2 && TARGET_SOFT_FLOAT
+ && ( register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return \"#\";
+ default:
+ return output_move_double (operands);
+ }
+ "
+ [(set_attr "length" "8,12,16,8,8")
+ (set_attr "type" "*,*,*,load2,store2")
+ (set_attr "pool_range" "1020")
+ (set_attr "neg_pool_range" "0")]
+)
+
+(define_insn "*thumb2_cmpsi_shiftsi"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")])))]
+ "TARGET_THUMB2"
+ "cmp%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "cmp%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_cmpsi_neg_shiftsi"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")]))))]
+ "TARGET_THUMB2"
+ "cmn%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%D3\;mov%D3\\t%0, %2
+ it\\t%D3\;mvn%D3\\t%0, #%B2
+ it\\t%d3\;mov%d3\\t%0, %1
+ it\\t%d3\;mvn%d3\\t%0, #%B1
+ ite\\t%d3\;mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ ite\\t%d3\;mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ ite\\t%d3\;mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ ite\\t%d3\;mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "6,6,6,6,10,10,10,10")
+ (set_attr "conds" "use")]
+)
+
+(define_insn "*thumb2_movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_THUMB2 && TARGET_SOFT_FLOAT"
+ "@
+ it\\t%D3\;mov%D3\\t%0, %2
+ it\\t%d3\;mov%d3\\t%0, %1"
+ [(set_attr "length" "6,6")
+ (set_attr "conds" "use")]
+)
+
+(define_insn "*call_reg_thumb2"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_THUMB2"
+ "blx%?\\t%0"
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*call_value_reg_thumb2"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_THUMB2"
+ "blx\\t%1"
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*thumb2_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "l*r"))]
+ "TARGET_THUMB2"
+ "bx\\t%0"
+ [(set_attr "conds" "clob")]
+)
+;; Don't define thumb2_load_indirect_jump because we can't guarantee label
+;; addresses will have the thumb bit set correctly.
+
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*thumb2_arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "const_int_operand" "M")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ "TARGET_THUMB2"
+ "%i1%?\\t%0, %2, %4%S3"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "4")
+ (set_attr "type" "alu_shift")]
+)
+
+;; ??? What does this splitter do? Copied from the ARM version
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 2 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "")
+ (match_operand:SI 5 "const_int_operand" "")])
+ (match_operand:SI 6 "s_register_operand" "")])
+ (match_operand:SI 7 "arm_rhs_operand" "")]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+ "TARGET_32BIT"
+ [(set (match_dup 8)
+ (match_op_dup 2 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 6)]))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 8) (match_dup 7)]))]
+ "")
+
+(define_insn "*thumb2_arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "const_int_operand" "M")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ "TARGET_32BIT"
+ "%i1%.\\t%0, %2, %4%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "4")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "const_int_operand" "M")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_THUMB2"
+ "%i1%.\\t%0, %2, %4%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "4")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "M")])))]
+ "TARGET_THUMB2"
+ "sub%?\\t%0, %1, %3%S2"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "3")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "M")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "TARGET_THUMB2"
+ "sub%.\\t%0, %1, %3%S2"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "3")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "M")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_THUMB2"
+ "sub%.\\t%0, %1, %3%S2"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "3")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%d2\;orr%d2\\t%0, %1, #1
+ ite\\t%D2\;mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,10")]
+)
+
+(define_insn "*thumb2_compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (operands[3] == const0_rtx)
+ {
+ if (GET_CODE (operands[1]) == LT)
+ return \"lsr\\t%0, %2, #31\";
+
+ if (GET_CODE (operands[1]) == GE)
+ return \"mvn\\t%0, %2\;lsr\\t%0, %0, #31\";
+
+ if (GET_CODE (operands[1]) == EQ)
+ return \"rsbs\\t%0, %2, #1\;it\\tcc\;movcc\\t%0, #0\";
+ }
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;it\\tne\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;it\\tne\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"ite\\t%D1\;mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "14")]
+)
+
+(define_insn "*thumb2_cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "arm_comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"it\\t%D4\;mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"it\\t%d4\;mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ switch (which_alternative)
+ {
+ case 0:
+ output_asm_insn (\"it\\t%d4\", operands);
+ break;
+ case 1:
+ output_asm_insn (\"it\\t%D4\", operands);
+ break;
+ case 2:
+ output_asm_insn (\"ite\\t%D4\", operands);
+ break;
+ default:
+ abort();
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,6,10")]
+)
+
+(define_insn "*thumb2_cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ }
+ else if (GET_CODE (operands[5]) == MINUS)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ }
+ else if (which_alternative != 0)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ }
+ else
+ output_asm_insn (\"it\\t%d4\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "14")]
+)
+
+(define_insn "*thumb2_cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ }
+ else
+ output_asm_insn (\"it\\t%d4\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,14")]
+)
+
+(define_insn "*thumb2_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"asr\\t%0, %1, #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;it\\tne\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;it\\tne\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"ite\\t%D3\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "14")]
+)
+
+(define_insn "*thumb2_movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;it\\tcc\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;it\\tcs\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants. */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;it\\tcs\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;it\tcc\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants. */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ switch (which_alternative)
+ {
+ case 0:
+ output_asm_insn (\"it\\t%D5\", operands);
+ break;
+ case 1:
+ output_asm_insn (\"it\\t%d5\", operands);
+ break;
+ case 2:
+ output_asm_insn (\"ite\\t%d5\", operands);
+ break;
+ default:
+ abort();
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+;; Zero and sign extension instructions.
+
+(define_insn "*thumb2_zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "*
+ /* ??? Output both instructions unconditionally, otherwise the conditional
+ executon insn counter gets confused.
+ if (REGNO (operands[1])
+ != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0)) */
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+ "
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*thumb2_zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2"
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%(b%)\\t%Q0, %1\;mov%?\\t%R0, #0"
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "*,load_byte")
+ (set_attr "pool_range" "*,4092")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+(define_insn "*thumb2_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "*
+ /* ??? Output both instructions unconditionally, otherwise the conditional
+ executon insn counter gets confused.
+ if (REGNO (operands[1])
+ != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0)) */
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"asr%?\\t%R0, %Q0, #31\";
+ "
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "shift" "1")
+ (set_attr "predicable" "yes")]
+)
+
+;; All supported Thumb2 implementations are armv6, so only that case is
+;; provided.
+(define_insn "*thumb2_extendqisi_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2 && arm_arch6"
+ "@
+ sxtb%?\\t%0, %1
+ ldr%(sb%)\\t%0, %1"
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+(define_insn "*thumb2_zero_extendhisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2 && arm_arch6"
+ "@
+ uxth%?\\t%0, %1
+ ldr%(h%)\\t%0, %1"
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+(define_insn "*thumb2_zero_extendqisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2 && arm_arch6"
+ "@
+ uxtb%(%)\\t%0, %1
+ ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+;; APPLE LOCAL begin 6152801 SImode thumb2 switch table dispatch
+(define_insn "thumb2_casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (use (label_ref (match_dup 2)))])]
+ "TARGET_THUMB2"
+ "* return thumb2_output_casesi(operands);"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "16")]
+)
+
+;; Removed thumb2_casesi_internal_pic
+;; APPLE LOCAL end 6152801 SImode thumb2 switch table dispatch
+
+(define_insn_and_split "thumb2_eh_return"
+ [(unspec_volatile [(match_operand:SI 0 "s_register_operand" "r")]
+ VUNSPEC_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&r"))]
+ "TARGET_THUMB2"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "
+ {
+ thumb_set_return_address (operands[0], operands[1]);
+ DONE;
+ }"
+)
+
+;; Peepholes and insns for 16-bit flag clobbering instructions.
+;; The conditional forms of these instructions do not clobber CC.
+;; However by the time peepholes are run it is probably too late to do
+;; anything useful with this information.
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (match_operator:SI 3 "thumb_16bit_operator"
+ [(match_operand:SI 1 "low_register_operand" "")
+ (match_operand:SI 2 "low_register_operand" "")]))]
+ "TARGET_THUMB2 && rtx_equal_p(operands[0], operands[1])
+ && peep2_regno_dead_p(0, CC_REGNUM)"
+ [(parallel
+ [(set (match_dup 0)
+ (match_op_dup 3
+ [(match_dup 1)
+ (match_dup 2)]))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_alusi3_short"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (match_operator:SI 3 "thumb_16bit_operator"
+ [(match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed"
+ "%I3%!\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "2")]
+)
+
+;; Similarly for 16-bit shift instructions
+;; There is no 16-bit rotate by immediate instruction.
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "low_register_operand" "")
+ (match_operand:SI 2 "low_reg_or_int_operand" "")]))]
+ "TARGET_THUMB2
+ && peep2_regno_dead_p(0, CC_REGNUM)
+ && ((GET_CODE(operands[3]) != ROTATE && GET_CODE(operands[3]) != ROTATERT)
+ || REG_P(operands[2]))"
+ [(parallel
+ [(set (match_dup 0)
+ (match_op_dup 3
+ [(match_dup 1)
+ (match_dup 2)]))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_shiftsi3_short"
+ [(set (match_operand:SI 0 "low_register_operand" "=l")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "low_register_operand" "l")
+ (match_operand:SI 2 "low_reg_or_int_operand" "lM")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed
+ && ((GET_CODE(operands[3]) != ROTATE && GET_CODE(operands[3]) != ROTATERT)
+ || REG_P(operands[2]))"
+ "* return arm_output_shift(operands, 2);"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
+ (set_attr "length" "2")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; 16-bit load immediate
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "TARGET_THUMB2
+ && peep2_regno_dead_p(0, CC_REGNUM)
+ && (unsigned HOST_WIDE_INT) INTVAL(operands[1]) < 256"
+ [(parallel
+ [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_movsi_shortim"
+ [(set (match_operand:SI 0 "low_register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "I"))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed"
+ "mov%!\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "2")]
+)
+
+;; 16-bit add/sub immediate
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (plus:SI (match_operand:SI 1 "low_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "TARGET_THUMB2
+ && peep2_regno_dead_p(0, CC_REGNUM)
+ && ((rtx_equal_p(operands[0], operands[1])
+ && INTVAL(operands[2]) > -256 && INTVAL(operands[2]) < 256)
+ || (INTVAL(operands[2]) > -8 && INTVAL(operands[2]) < 8))"
+ [(parallel
+ [(set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_addsi_shortim"
+ [(set (match_operand:SI 0 "low_register_operand" "=l")
+ (plus:SI (match_operand:SI 1 "low_register_operand" "l")
+ (match_operand:SI 2 "const_int_operand" "IL")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed"
+ "*
+ HOST_WIDE_INT val;
+
+ val = INTVAL(operands[2]);
+ /* We prefer eg. subs rn, rn, #1 over adds rn, rn, #0xffffffff. */
+ if (val < 0 && const_ok_for_arm(ARM_SIGN_EXTEND (-val)))
+ return \"sub%!\\t%0, %1, #%n2\";
+ else
+ return \"add%!\\t%0, %1, %2\";
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "2")]
+)
+
+;; APPLE LOCAL begin 6261739 incorrect insn lengths
+(define_insn "*thumb2_cbz"
+ [(set (pc) (if_then_else
+ (eq (match_operand:SI 0 "s_register_operand" "l,?r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (get_attr_length (insn) == 2 && which_alternative == 0)
+ return \"cbz\\t%0, %l1\";
+ else
+ return \"cmp\\t%0, #0\;beq\\t%l1\";
+ "
+ [(set (attr "length")
+ (if_then_else
+ (and (and (ge (minus (match_dup 1) (pc)) (const_int 2))
+ (le (minus (match_dup 1) (pc)) (const_int 128)))
+ (match_operand:SI 0 "low_register_operand" ""))
+ (const_int 2)
+ (const_int 8)))]
+)
+
+(define_insn "*thumb2_cbnz"
+ [(set (pc) (if_then_else
+ (ne (match_operand:SI 0 "s_register_operand" "l,?r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (get_attr_length (insn) == 2 && which_alternative == 0)
+ return \"cbnz\\t%0, %l1\";
+ else
+ return \"cmp\\t%0, #0\;bne\\t%l1\";
+ "
+ [(set (attr "length")
+ (if_then_else
+ (and (and (ge (minus (match_dup 1) (pc)) (const_int 2))
+ (le (minus (match_dup 1) (pc)) (const_int 128)))
+ (match_operand:SI 0 "low_register_operand" ""))
+ (const_int 2)
+ (const_int 8)))]
+)
+;; APPLE LOCAL end 6261739 incorrect insn lengths
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/uclinux-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/uclinux-elf.h
new file mode 100644
index 000000000..9f112cdda
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/uclinux-elf.h
@@ -0,0 +1,74 @@
+/* Definitions for ARM running ucLinux using ELF
+ Copyright (C) 1999, 2001, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <pb@nexus.co.uk>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* We don't want a PLT. */
+#undef NEED_PLT_RELOC
+#define NEED_PLT_RELOC 0
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF ucLinux)", stderr);
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_SINGLE_PIC_BASE)
+
+/* NOTE: The remaining definitions in this file are needed because uclinux
+ does not use config/linux.h. */
+
+/* Do not assume anything about header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* The GNU C++ standard library requires that these macros be defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
+ %{!p:%{profile:gcrt1.o%s} \
+ %{!profile:crt1.o%s}}}} \
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{profile:-p}"
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/unaligned-funcs.c b/gcc-4.2.1-5666.3/gcc/config/arm/unaligned-funcs.c
new file mode 100644
index 000000000..66cfd3bbb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/unaligned-funcs.c
@@ -0,0 +1,62 @@
+/* EABI unaligned read/write functions.
+
+ Copyright (C) 2005 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+int __aeabi_uread4 (void *);
+int __aeabi_uwrite4 (int, void *);
+long long __aeabi_uread8 (void *);
+long long __aeabi_uwrite8 (long long, void *);
+
+struct __attribute__((packed)) u4 { int data; };
+struct __attribute__((packed)) u8 { long long data; };
+
+int
+__aeabi_uread4 (void *ptr)
+{
+ return ((struct u4 *) ptr)->data;
+}
+
+int
+__aeabi_uwrite4 (int data, void *ptr)
+{
+ ((struct u4 *) ptr)->data = data;
+ return data;
+}
+
+long long
+__aeabi_uread8 (void *ptr)
+{
+ return ((struct u8 *) ptr)->data;
+}
+
+long long
+__aeabi_uwrite8 (long long data, void *ptr)
+{
+ ((struct u8 *) ptr)->data = data;
+ return data;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/unknown-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/unknown-elf.h
new file mode 100644
index 000000000..c9ad99034
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/unknown-elf.h
@@ -0,0 +1,97 @@
+/* Definitions for non-Linux based ARM systems using ELF
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* elfos.h should have already been included. Now just override
+ any conflicting definitions and add any extras. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF)", stderr);
+#endif
+
+/* Default to using software floating point. */
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (0)
+#endif
+
+/* Now we define the strings used to build the spec file. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+/* The __USES_INITFINI__ define is tested in newlib/libc/sys/arm/crt0.S
+ to see if it needs to invoked _init() and _fini(). */
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "-D__USES_INITFINI__"
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Return a nonzero value if DECL has a section attribute. */
+#define IN_NAMED_SECTION(DECL) \
+ ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
+ && DECL_SECTION_NAME (DECL) != NULL_TREE)
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (IN_NAMED_SECTION (DECL)) \
+ switch_to_section (get_named_section (DECL, NULL, 0)); \
+ else \
+ switch_to_section (bss_section); \
+ \
+ ASM_OUTPUT_ALIGN (FILE, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ \
+ last_assemble_variable_decl = DECL; \
+ ASM_DECLARE_OBJECT_NAME (FILE, NAME, DECL); \
+ ASM_OUTPUT_SKIP (FILE, SIZE ? (int)(SIZE) : 1); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if ((DECL) != NULL && IN_NAMED_SECTION (DECL)) \
+ switch_to_section (get_named_section (DECL, NULL, 0)); \
+ else \
+ switch_to_section (bss_section); \
+ \
+ ASM_OUTPUT_ALIGN (FILE, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ fprintf (FILE, "\t.space\t%d\n", SIZE ? (int)(SIZE) : 1); \
+ } \
+ while (0)
+
+#ifndef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm7tdmi
+#endif
+
+/* The libgcc udivmod functions may throw exceptions. If newlib is
+ configured to support long longs in I/O, then printf will depend on
+ udivmoddi4, which will depend on the exception unwind routines,
+ which will depend on abort, which is defined in libc. */
+#undef LINK_GCC_C_SEQUENCE_SPEC
+#define LINK_GCC_C_SEQUENCE_SPEC "--start-group %G %L --end-group"
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.c b/gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.c
new file mode 100644
index 000000000..a8fd1123c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.c
@@ -0,0 +1,1238 @@
+/* ARM EABI compliant unwinding routines.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+#include "unwind.h"
+
+/* We add a prototype for abort here to avoid creating a dependency on
+ target headers. */
+extern void abort (void);
+
+/* Definitions for C++ runtime support routines. We make these weak
+ declarations to avoid pulling in libsupc++ unnecessarily. */
+typedef unsigned char bool;
+
+typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
+
+void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
+bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
+bool __attribute__((weak)) __cxa_type_match(_Unwind_Control_Block *ucbp,
+ const type_info *rttip,
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ bool is_reference,
+ void **matched_object);
+
+_Unwind_Ptr __attribute__((weak))
+__gnu_Unwind_Find_exidx (_Unwind_Ptr, int *);
+
+/* Misc constants. */
+#define R_IP 12
+#define R_SP 13
+#define R_LR 14
+#define R_PC 15
+
+#define EXIDX_CANTUNWIND 1
+#define uint32_highbit (((_uw) 1) << 31)
+
+#define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1)
+#define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
+#define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
+#define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4)
+
+struct core_regs
+{
+ _uw r[16];
+};
+
+/* We use normal integer types here to avoid the compiler generating
+ coprocessor instructions. */
+struct vfp_regs
+{
+ _uw64 d[16];
+ _uw pad;
+};
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+struct vfpv3_regs
+{
+ /* Always populated via VSTM, so no need for the "pad" field from
+ vfp_regs (which is used to store the format word for FSTMX). */
+ _uw64 d[16];
+};
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+struct fpa_reg
+{
+ _uw w[3];
+};
+
+struct fpa_regs
+{
+ struct fpa_reg f[8];
+};
+
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+struct wmmxd_regs
+{
+ _uw64 wd[16];
+};
+
+struct wmmxc_regs
+{
+ _uw wc[4];
+};
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+/* Unwind descriptors. */
+
+typedef struct
+{
+ _uw16 length;
+ _uw16 offset;
+} EHT16;
+
+typedef struct
+{
+ _uw length;
+ _uw offset;
+} EHT32;
+
+/* The ABI specifies that the unwind routines may only use core registers,
+ except when actually manipulating coprocessor state. This allows
+ us to write one implementation that works on all platforms by
+ demand-saving coprocessor registers.
+
+ During unwinding we hold the coprocessor state in the actual hardware
+ registers and allocate demand-save areas for use during phase1
+ unwinding. */
+
+typedef struct
+{
+ /* The first fields must be the same as a phase2_vrs. */
+ _uw demand_save_flags;
+ struct core_regs core;
+ _uw prev_sp; /* Only valid during forced unwinding. */
+ struct vfp_regs vfp;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ struct vfpv3_regs vfp_regs_16_to_31;
+ struct fpa_regs fpa;
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ struct wmmxd_regs wmmxd;
+ struct wmmxc_regs wmmxc;
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
+} phase1_vrs;
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define DEMAND_SAVE_VFP 1 /* VFP state has been saved if not set */
+#define DEMAND_SAVE_VFP_D 2 /* VFP state is for FLDMD/FSTMD if set */
+#define DEMAND_SAVE_VFP_V3 4 /* VFPv3 state for regs 16 .. 31 has
+ been saved if not set */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#define DEMAND_SAVE_WMMXD 8 /* iWMMXt data registers have been
+ saved if not set. */
+#define DEMAND_SAVE_WMMXC 16 /* iWMMXt control registers have been
+ saved if not set. */
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* This must match the structure created by the assembly wrappers. */
+typedef struct
+{
+ _uw demand_save_flags;
+ struct core_regs core;
+} phase2_vrs;
+
+
+/* An exception index table entry. */
+
+typedef struct __EIT_entry
+{
+ _uw fnoffset;
+ _uw content;
+} __EIT_entry;
+
+/* Assembly helper functions. */
+
+/* Restore core register state. Never returns. */
+void __attribute__((noreturn)) restore_core_regs (struct core_regs *);
+
+
+/* Coprocessor register state manipulation functions. */
+
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Routines for FLDMX/FSTMX format... */
+void __gnu_Unwind_Save_VFP (struct vfp_regs * p);
+void __gnu_Unwind_Restore_VFP (struct vfp_regs * p);
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+void __gnu_Unwind_Save_WMMXD (struct wmmxd_regs * p);
+void __gnu_Unwind_Restore_WMMXD (struct wmmxd_regs * p);
+void __gnu_Unwind_Save_WMMXC (struct wmmxc_regs * p);
+void __gnu_Unwind_Restore_WMMXC (struct wmmxc_regs * p);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* ...and those for FLDMD/FSTMD format... */
+void __gnu_Unwind_Save_VFP_D (struct vfp_regs * p);
+void __gnu_Unwind_Restore_VFP_D (struct vfp_regs * p);
+
+/* ...and those for VLDM/VSTM format, saving/restoring only registers
+ 16 through 31. */
+void __gnu_Unwind_Save_VFP_D_16_to_31 (struct vfpv3_regs * p);
+void __gnu_Unwind_Restore_VFP_D_16_to_31 (struct vfpv3_regs * p);
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* Restore coprocessor state after phase1 unwinding. */
+static void
+restore_non_core_regs (phase1_vrs * vrs)
+{
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0)
+ {
+ if (vrs->demand_save_flags & DEMAND_SAVE_VFP_D)
+ __gnu_Unwind_Restore_VFP_D (&vrs->vfp);
+ else
+ __gnu_Unwind_Restore_VFP (&vrs->vfp);
+ }
+
+ if ((vrs->demand_save_flags & DEMAND_SAVE_VFP_V3) == 0)
+ __gnu_Unwind_Restore_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+
+ if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXD) == 0)
+ __gnu_Unwind_Restore_WMMXD (&vrs->wmmxd);
+ if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXC) == 0)
+ __gnu_Unwind_Restore_WMMXC (&vrs->wmmxc);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+}
+
+/* A better way to do this would probably be to compare the absolute address
+ with a segment relative relocation of the same symbol. */
+
+extern int __text_start;
+extern int __data_start;
+
+/* The exception index table location. */
+extern __EIT_entry __exidx_start;
+extern __EIT_entry __exidx_end;
+
+/* ABI defined personality routines. */
+extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr0 (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *);// __attribute__((weak));
+extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr1 (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
+extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr2 (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
+
+/* ABI defined routine to store a virtual register to memory. */
+
+_Unwind_VRS_Result _Unwind_VRS_Get (_Unwind_Context *context,
+ _Unwind_VRS_RegClass regclass,
+ _uw regno,
+ _Unwind_VRS_DataRepresentation representation,
+ void *valuep)
+{
+ phase1_vrs *vrs = (phase1_vrs *) context;
+
+ switch (regclass)
+ {
+ case _UVRSC_CORE:
+ if (representation != _UVRSD_UINT32
+ || regno > 15)
+ return _UVRSR_FAILED;
+ *(_uw *) valuep = vrs->core.r[regno];
+ return _UVRSR_OK;
+
+ case _UVRSC_VFP:
+ case _UVRSC_FPA:
+ case _UVRSC_WMMXD:
+ case _UVRSC_WMMXC:
+ return _UVRSR_NOT_IMPLEMENTED;
+
+ default:
+ return _UVRSR_FAILED;
+ }
+}
+
+
+/* ABI defined function to load a virtual register from memory. */
+
+_Unwind_VRS_Result _Unwind_VRS_Set (_Unwind_Context *context,
+ _Unwind_VRS_RegClass regclass,
+ _uw regno,
+ _Unwind_VRS_DataRepresentation representation,
+ void *valuep)
+{
+ phase1_vrs *vrs = (phase1_vrs *) context;
+
+ switch (regclass)
+ {
+ case _UVRSC_CORE:
+ if (representation != _UVRSD_UINT32
+ || regno > 15)
+ return _UVRSR_FAILED;
+
+ vrs->core.r[regno] = *(_uw *) valuep;
+ return _UVRSR_OK;
+
+ case _UVRSC_VFP:
+ case _UVRSC_FPA:
+ case _UVRSC_WMMXD:
+ case _UVRSC_WMMXC:
+ return _UVRSR_NOT_IMPLEMENTED;
+
+ default:
+ return _UVRSR_FAILED;
+ }
+}
+
+
+/* ABI defined function to pop registers off the stack. */
+
+_Unwind_VRS_Result _Unwind_VRS_Pop (_Unwind_Context *context,
+ _Unwind_VRS_RegClass regclass,
+ _uw discriminator,
+ _Unwind_VRS_DataRepresentation representation)
+{
+ phase1_vrs *vrs = (phase1_vrs *) context;
+
+ switch (regclass)
+ {
+ case _UVRSC_CORE:
+ {
+ _uw *ptr;
+ _uw mask;
+ int i;
+
+ if (representation != _UVRSD_UINT32)
+ return _UVRSR_FAILED;
+
+ mask = discriminator & 0xffff;
+ ptr = (_uw *) vrs->core.r[R_SP];
+ /* Pop the requested registers. */
+ for (i = 0; i < 16; i++)
+ {
+ if (mask & (1 << i))
+ vrs->core.r[i] = *(ptr++);
+ }
+ /* Writeback the stack pointer value if it wasn't restored. */
+ if ((mask & (1 << R_SP)) == 0)
+ vrs->core.r[R_SP] = (_uw) ptr;
+ }
+ return _UVRSR_OK;
+
+ case _UVRSC_VFP:
+ {
+ _uw start = discriminator >> 16;
+ _uw count = discriminator & 0xffff;
+ struct vfp_regs tmp;
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ struct vfpv3_regs tmp_16_to_31;
+ int tmp_count;
+ _uw *sp;
+ _uw *dest;
+ int num_vfpv3_regs = 0;
+
+ /* We use an approximation here by bounding _UVRSD_DOUBLE
+ register numbers at 32 always, since we can't detect if
+ VFPv3 isn't present (in such a case the upper limit is 16). */
+ if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE)
+ || start + count > (representation == _UVRSD_VFPX ? 16 : 32)
+ || (representation == _UVRSD_VFPX && start >= 16))
+ return _UVRSR_FAILED;
+
+ /* Check if we're being asked to pop VFPv3-only registers
+ (numbers 16 through 31). */
+ if (start >= 16)
+ num_vfpv3_regs = count;
+ else if (start + count > 16)
+ num_vfpv3_regs = start + count - 16;
+
+ if (num_vfpv3_regs && representation != _UVRSD_DOUBLE)
+ return _UVRSR_FAILED;
+
+ /* Demand-save coprocessor registers for stage1. */
+ if (start < 16 && (vrs->demand_save_flags & DEMAND_SAVE_VFP))
+ {
+ vrs->demand_save_flags &= ~DEMAND_SAVE_VFP;
+
+ if (representation == _UVRSD_DOUBLE)
+ {
+ /* Save in FLDMD/FSTMD format. */
+ vrs->demand_save_flags |= DEMAND_SAVE_VFP_D;
+ __gnu_Unwind_Save_VFP_D (&vrs->vfp);
+ }
+ else
+ {
+ /* Save in FLDMX/FSTMX format. */
+ vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_D;
+ __gnu_Unwind_Save_VFP (&vrs->vfp);
+ }
+ }
+
+ if (num_vfpv3_regs > 0
+ && (vrs->demand_save_flags & DEMAND_SAVE_VFP_V3))
+ {
+ vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_V3;
+ __gnu_Unwind_Save_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
+ }
+
+ /* Restore the registers from the stack. Do this by saving the
+ current VFP registers to a memory area, moving the in-memory
+ values into that area, and restoring from the whole area.
+ For _UVRSD_VFPX we assume FSTMX standard format 1. */
+ if (representation == _UVRSD_VFPX)
+ __gnu_Unwind_Save_VFP (&tmp);
+ else
+ {
+ /* Save registers 0 .. 15 if required. */
+ if (start < 16)
+ __gnu_Unwind_Save_VFP_D (&tmp);
+
+ /* Save VFPv3 registers 16 .. 31 if required. */
+ if (num_vfpv3_regs)
+ __gnu_Unwind_Save_VFP_D_16_to_31 (&tmp_16_to_31);
+ }
+
+ /* Work out how many registers below register 16 need popping. */
+ tmp_count = num_vfpv3_regs > 0 ? 16 - start : count;
+
+ /* Copy registers below 16, if needed.
+ The stack address is only guaranteed to be word aligned, so
+ we can't use doubleword copies. */
+ sp = (_uw *) vrs->core.r[R_SP];
+ if (tmp_count > 0)
+ {
+ tmp_count *= 2;
+ dest = (_uw *) &tmp.d[start];
+ while (tmp_count--)
+ *(dest++) = *(sp++);
+ }
+
+ /* Copy VFPv3 registers numbered >= 16, if needed. */
+ if (num_vfpv3_regs > 0)
+ {
+ /* num_vfpv3_regs is needed below, so copy it. */
+ int tmp_count_2 = num_vfpv3_regs * 2;
+ int vfpv3_start = start < 16 ? 16 : start;
+
+ dest = (_uw *) &tmp_16_to_31.d[vfpv3_start - 16];
+ while (tmp_count_2--)
+ *(dest++) = *(sp++);
+ }
+
+ /* Skip the format word space if using FLDMX/FSTMX format. */
+ if (representation == _UVRSD_VFPX)
+ sp++;
+
+ /* Set the new stack pointer. */
+ vrs->core.r[R_SP] = (_uw) sp;
+
+ /* Reload the registers. */
+ if (representation == _UVRSD_VFPX)
+ __gnu_Unwind_Restore_VFP (&tmp);
+ else
+ {
+ /* Restore registers 0 .. 15 if required. */
+ if (start < 16)
+ __gnu_Unwind_Restore_VFP_D (&tmp);
+
+ /* Restore VFPv3 registers 16 .. 31 if required. */
+ if (num_vfpv3_regs > 0)
+ __gnu_Unwind_Restore_VFP_D_16_to_31 (&tmp_16_to_31);
+ }
+/* APPLE LOCAL end v7 support. Merge from mainline */
+ }
+ return _UVRSR_OK;
+
+ case _UVRSC_FPA:
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ return _UVRSR_NOT_IMPLEMENTED;
+
+ case _UVRSC_WMMXD:
+ {
+ _uw start = discriminator >> 16;
+ _uw count = discriminator & 0xffff;
+ struct wmmxd_regs tmp;
+ _uw *sp;
+ _uw *dest;
+
+ if ((representation != _UVRSD_UINT64) || start + count > 16)
+ return _UVRSR_FAILED;
+
+ if (vrs->demand_save_flags & DEMAND_SAVE_WMMXD)
+ {
+ /* Demand-save resisters for stage1. */
+ vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXD;
+ __gnu_Unwind_Save_WMMXD (&vrs->wmmxd);
+ }
+
+ /* Restore the registers from the stack. Do this by saving the
+ current WMMXD registers to a memory area, moving the in-memory
+ values into that area, and restoring from the whole area. */
+ __gnu_Unwind_Save_WMMXD (&tmp);
+
+ /* The stack address is only guaranteed to be word aligned, so
+ we can't use doubleword copies. */
+ sp = (_uw *) vrs->core.r[R_SP];
+ dest = (_uw *) &tmp.wd[start];
+ count *= 2;
+ while (count--)
+ *(dest++) = *(sp++);
+
+ /* Set the new stack pointer. */
+ vrs->core.r[R_SP] = (_uw) sp;
+
+ /* Reload the registers. */
+ __gnu_Unwind_Restore_WMMXD (&tmp);
+ }
+ return _UVRSR_OK;
+
+ case _UVRSC_WMMXC:
+ {
+ int i;
+ struct wmmxc_regs tmp;
+ _uw *sp;
+
+ if ((representation != _UVRSD_UINT32) || discriminator > 16)
+ return _UVRSR_FAILED;
+
+ if (vrs->demand_save_flags & DEMAND_SAVE_WMMXC)
+ {
+ /* Demand-save resisters for stage1. */
+ vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXC;
+ __gnu_Unwind_Save_WMMXC (&vrs->wmmxc);
+ }
+
+ /* Restore the registers from the stack. Do this by saving the
+ current WMMXC registers to a memory area, moving the in-memory
+ values into that area, and restoring from the whole area. */
+ __gnu_Unwind_Save_WMMXC (&tmp);
+
+ sp = (_uw *) vrs->core.r[R_SP];
+ for (i = 0; i < 4; i++)
+ if (discriminator & (1 << i))
+ tmp.wc[i] = *(sp++);
+
+ /* Set the new stack pointer. */
+ vrs->core.r[R_SP] = (_uw) sp;
+
+ /* Reload the registers. */
+ __gnu_Unwind_Restore_WMMXC (&tmp);
+ }
+ return _UVRSR_OK;
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+ default:
+ return _UVRSR_FAILED;
+ }
+}
+
+
+/* Core unwinding functions. */
+
+/* Calculate the address encoded by a 31-bit self-relative offset at address
+ P. */
+static inline _uw
+selfrel_offset31 (const _uw *p)
+{
+ _uw offset;
+
+ offset = *p;
+ /* Sign extend to 32 bits. */
+ if (offset & (1 << 30))
+ offset |= 1u << 31;
+ else
+ offset &= ~(1u << 31);
+
+ return offset + (_uw) p;
+}
+
+
+/* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains
+ NREC entries. */
+
+static const __EIT_entry *
+search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address)
+{
+ _uw next_fn;
+ _uw this_fn;
+ int n, left, right;
+
+ if (nrec == 0)
+ return (__EIT_entry *) 0;
+
+ left = 0;
+ right = nrec - 1;
+
+ while (1)
+ {
+ n = (left + right) / 2;
+ this_fn = selfrel_offset31 (&table[n].fnoffset);
+ if (n != nrec - 1)
+ next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1;
+ else
+ next_fn = (_uw)0 - 1;
+
+ if (return_address < this_fn)
+ {
+ if (n == left)
+ return (__EIT_entry *) 0;
+ right = n - 1;
+ }
+ else if (return_address <= next_fn)
+ return &table[n];
+ else
+ left = n + 1;
+ }
+}
+
+/* Find the exception index table eintry for the given address.
+ Fill in the relevant fields of the UCB.
+ Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
+
+static _Unwind_Reason_Code
+get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
+{
+ const __EIT_entry * eitp;
+ int nrec;
+
+ /* The return address is the address of the instruction following the
+ call instruction (plus one in thumb mode). If this was the last
+ instruction in the function the address will lie in the following
+ function. Subtract 2 from the address so that it points within the call
+ instruction itself. */
+ return_address -= 2;
+
+ if (__gnu_Unwind_Find_exidx)
+ {
+ eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address,
+ &nrec);
+ if (!eitp)
+ {
+ UCB_PR_ADDR (ucbp) = 0;
+ return _URC_FAILURE;
+ }
+ }
+ else
+ {
+ eitp = &__exidx_start;
+ nrec = &__exidx_end - &__exidx_start;
+ }
+
+ eitp = search_EIT_table (eitp, nrec, return_address);
+
+ if (!eitp)
+ {
+ UCB_PR_ADDR (ucbp) = 0;
+ return _URC_FAILURE;
+ }
+ ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset);
+
+ /* Can this frame be unwound at all? */
+ if (eitp->content == EXIDX_CANTUNWIND)
+ {
+ UCB_PR_ADDR (ucbp) = 0;
+ return _URC_END_OF_STACK;
+ }
+
+ /* Obtain the address of the "real" __EHT_Header word. */
+
+ if (eitp->content & uint32_highbit)
+ {
+ /* It is immediate data. */
+ ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
+ ucbp->pr_cache.additional = 1;
+ }
+ else
+ {
+ /* The low 31 bits of the content field are a self-relative
+ offset to an _Unwind_EHT_Entry structure. */
+ ucbp->pr_cache.ehtp =
+ (_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content);
+ ucbp->pr_cache.additional = 0;
+ }
+
+ /* Discover the personality routine address. */
+ if (*ucbp->pr_cache.ehtp & (1u << 31))
+ {
+ /* One of the predefined standard routines. */
+ _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf;
+ if (idx == 0)
+ UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr0;
+ else if (idx == 1)
+ UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr1;
+ else if (idx == 2)
+ UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr2;
+ else
+ { /* Failed */
+ UCB_PR_ADDR (ucbp) = 0;
+ return _URC_FAILURE;
+ }
+ }
+ else
+ {
+ /* Execute region offset to PR */
+ UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp);
+ }
+ return _URC_OK;
+}
+
+
+/* Perform phase2 unwinding. VRS is the initial virtual register state. */
+
+static void __attribute__((noreturn))
+unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs)
+{
+ _Unwind_Reason_Code pr_result;
+
+ do
+ {
+ /* Find the entry for this routine. */
+ if (get_eit_entry (ucbp, vrs->core.r[R_PC]) != _URC_OK)
+ abort ();
+
+ UCB_SAVED_CALLSITE_ADDR (ucbp) = vrs->core.r[R_PC];
+
+ /* Call the pr to decide what to do. */
+ pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
+ (_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs);
+ }
+ while (pr_result == _URC_CONTINUE_UNWIND);
+
+ if (pr_result != _URC_INSTALL_CONTEXT)
+ abort();
+
+ restore_core_regs (&vrs->core);
+}
+
+/* Perform phase2 forced unwinding. */
+
+static _Unwind_Reason_Code
+unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs,
+ int resuming)
+{
+ _Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp);
+ void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp);
+ _Unwind_Reason_Code pr_result = 0;
+ /* We use phase1_vrs here even though we do not demand save, for the
+ prev_sp field. */
+ phase1_vrs saved_vrs, next_vrs;
+
+ /* Save the core registers. */
+ saved_vrs.core = entry_vrs->core;
+ /* We don't need to demand-save the non-core registers, because we
+ unwind in a single pass. */
+ saved_vrs.demand_save_flags = 0;
+
+ /* Unwind until we reach a propagation barrier. */
+ do
+ {
+ _Unwind_State action;
+ _Unwind_Reason_Code entry_code;
+ _Unwind_Reason_Code stop_code;
+
+ /* Find the entry for this routine. */
+ entry_code = get_eit_entry (ucbp, saved_vrs.core.r[R_PC]);
+
+ if (resuming)
+ {
+ action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND;
+ resuming = 0;
+ }
+ else
+ action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND;
+
+ if (entry_code == _URC_OK)
+ {
+ UCB_SAVED_CALLSITE_ADDR (ucbp) = saved_vrs.core.r[R_PC];
+
+ next_vrs = saved_vrs;
+
+ /* Call the pr to decide what to do. */
+ pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
+ (action, ucbp, (void *) &next_vrs);
+
+ saved_vrs.prev_sp = next_vrs.core.r[R_SP];
+ }
+ else
+ {
+ /* Treat any failure as the end of unwinding, to cope more
+ gracefully with missing EH information. Mixed EH and
+ non-EH within one object will usually result in failure,
+ because the .ARM.exidx tables do not indicate the end
+ of the code to which they apply; but mixed EH and non-EH
+ shared objects should return an unwind failure at the
+ entry of a non-EH shared object. */
+ action |= _US_END_OF_STACK;
+
+ saved_vrs.prev_sp = saved_vrs.core.r[R_SP];
+ }
+
+ stop_code = stop_fn (1, action, ucbp->exception_class, ucbp,
+ (void *)&saved_vrs, stop_arg);
+ if (stop_code != _URC_NO_REASON)
+ return _URC_FAILURE;
+
+ if (entry_code != _URC_OK)
+ return entry_code;
+
+ saved_vrs = next_vrs;
+ }
+ while (pr_result == _URC_CONTINUE_UNWIND);
+
+ if (pr_result != _URC_INSTALL_CONTEXT)
+ {
+ /* Some sort of failure has occurred in the pr and probably the
+ pr returned _URC_FAILURE. */
+ return _URC_FAILURE;
+ }
+
+ restore_core_regs (&saved_vrs.core);
+}
+
+/* This is a very limited implementation of _Unwind_GetCFA. It returns
+ the stack pointer as it is about to be unwound, and is only valid
+ while calling the stop function during forced unwinding. If the
+ current personality routine result is going to run a cleanup, this
+ will not be the CFA; but when the frame is really unwound, it will
+ be. */
+
+_Unwind_Word
+_Unwind_GetCFA (_Unwind_Context *context)
+{
+ return ((phase1_vrs *) context)->prev_sp;
+}
+
+/* Perform phase1 unwinding. UCBP is the exception being thrown, and
+ entry_VRS is the register state on entry to _Unwind_RaiseException. */
+
+_Unwind_Reason_Code
+__gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *);
+
+_Unwind_Reason_Code
+__gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp,
+ phase2_vrs * entry_vrs)
+{
+ phase1_vrs saved_vrs;
+ _Unwind_Reason_Code pr_result;
+
+ /* Set the pc to the call site. */
+ entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
+
+ /* Save the core registers. */
+ saved_vrs.core = entry_vrs->core;
+ /* Set demand-save flags. */
+ saved_vrs.demand_save_flags = ~(_uw) 0;
+
+ /* Unwind until we reach a propagation barrier. */
+ do
+ {
+ /* Find the entry for this routine. */
+ if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
+ return _URC_FAILURE;
+
+ /* Call the pr to decide what to do. */
+ pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
+ (_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs);
+ }
+ while (pr_result == _URC_CONTINUE_UNWIND);
+
+ /* We've unwound as far as we want to go, so restore the original
+ register state. */
+ restore_non_core_regs (&saved_vrs);
+ if (pr_result != _URC_HANDLER_FOUND)
+ {
+ /* Some sort of failure has occurred in the pr and probably the
+ pr returned _URC_FAILURE. */
+ return _URC_FAILURE;
+ }
+
+ unwind_phase2 (ucbp, entry_vrs);
+}
+
+/* Resume unwinding after a cleanup has been run. UCBP is the exception
+ being thrown and ENTRY_VRS is the register state on entry to
+ _Unwind_Resume. */
+_Unwind_Reason_Code
+__gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *,
+ _Unwind_Stop_Fn, void *, phase2_vrs *);
+
+_Unwind_Reason_Code
+__gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp,
+ _Unwind_Stop_Fn stop_fn, void *stop_arg,
+ phase2_vrs *entry_vrs)
+{
+ UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn;
+ UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg;
+
+ /* Set the pc to the call site. */
+ entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
+
+ return unwind_phase2_forced (ucbp, entry_vrs, 0);
+}
+
+_Unwind_Reason_Code
+__gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *);
+
+_Unwind_Reason_Code
+__gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs)
+{
+ _Unwind_Reason_Code pr_result;
+
+ /* Recover the saved address. */
+ entry_vrs->core.r[R_PC] = UCB_SAVED_CALLSITE_ADDR (ucbp);
+
+ if (UCB_FORCED_STOP_FN (ucbp))
+ {
+ unwind_phase2_forced (ucbp, entry_vrs, 1);
+
+ /* We can't return failure at this point. */
+ abort ();
+ }
+
+ /* Call the cached PR. */
+ pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
+ (_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs);
+
+ switch (pr_result)
+ {
+ case _URC_INSTALL_CONTEXT:
+ /* Upload the registers to enter the landing pad. */
+ restore_core_regs (&entry_vrs->core);
+
+ case _URC_CONTINUE_UNWIND:
+ /* Continue unwinding the next frame. */
+ unwind_phase2 (ucbp, entry_vrs);
+
+ default:
+ abort ();
+ }
+}
+
+_Unwind_Reason_Code
+__gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *);
+
+_Unwind_Reason_Code
+__gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp,
+ phase2_vrs * entry_vrs)
+{
+ if (!UCB_FORCED_STOP_FN (ucbp))
+ return __gnu_Unwind_RaiseException (ucbp, entry_vrs);
+
+ /* Set the pc to the call site. */
+ entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
+ /* Continue unwinding the next frame. */
+ return unwind_phase2_forced (ucbp, entry_vrs, 0);
+}
+
+/* Clean up an exception object when unwinding is complete. */
+void
+_Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused)))
+{
+}
+
+
+/* Get the _Unwind_Control_Block from an _Unwind_Context. */
+
+static inline _Unwind_Control_Block *
+unwind_UCB_from_context (_Unwind_Context * context)
+{
+ return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP);
+}
+
+
+/* Free an exception. */
+
+void
+_Unwind_DeleteException (_Unwind_Exception * exc)
+{
+ if (exc->exception_cleanup)
+ (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc);
+}
+
+
+/* Common implementation for ARM ABI defined personality routines.
+ ID is the index of the personality routine, other arguments are as defined
+ by __aeabi_unwind_cpp_pr{0,1,2}. */
+
+static _Unwind_Reason_Code
+__gnu_unwind_pr_common (_Unwind_State state,
+ _Unwind_Control_Block *ucbp,
+ _Unwind_Context *context,
+ int id)
+{
+ __gnu_unwind_state uws;
+ _uw *data;
+ _uw offset;
+ _uw len;
+ _uw rtti_count;
+ int phase2_call_unexpected_after_unwind = 0;
+ int in_range = 0;
+ int forced_unwind = state & _US_FORCE_UNWIND;
+
+ state &= _US_ACTION_MASK;
+
+ data = (_uw *) ucbp->pr_cache.ehtp;
+ uws.data = *(data++);
+ uws.next = data;
+ if (id == 0)
+ {
+ uws.data <<= 8;
+ uws.words_left = 0;
+ uws.bytes_left = 3;
+ }
+ else
+ {
+ uws.words_left = (uws.data >> 16) & 0xff;
+ uws.data <<= 16;
+ uws.bytes_left = 2;
+ data += uws.words_left;
+ }
+
+ /* Restore the saved pointer. */
+ if (state == _US_UNWIND_FRAME_RESUME)
+ data = (_uw *) ucbp->cleanup_cache.bitpattern[0];
+
+ if ((ucbp->pr_cache.additional & 1) == 0)
+ {
+ /* Process descriptors. */
+ while (*data)
+ {
+ _uw addr;
+ _uw fnstart;
+
+ if (id == 2)
+ {
+ len = ((EHT32 *) data)->length;
+ offset = ((EHT32 *) data)->offset;
+ data += 2;
+ }
+ else
+ {
+ len = ((EHT16 *) data)->length;
+ offset = ((EHT16 *) data)->offset;
+ data++;
+ }
+
+ fnstart = ucbp->pr_cache.fnstart + (offset & ~1);
+ addr = _Unwind_GetGR (context, R_PC);
+ in_range = (fnstart <= addr && addr < fnstart + (len & ~1));
+
+ switch (((offset & 1) << 1) | (len & 1))
+ {
+ case 0:
+ /* Cleanup. */
+ if (state != _US_VIRTUAL_UNWIND_FRAME
+ && in_range)
+ {
+ /* Cleanup in range, and we are running cleanups. */
+ _uw lp;
+
+ /* Landing pad address is 31-bit pc-relative offset. */
+ lp = selfrel_offset31 (data);
+ data++;
+ /* Save the exception data pointer. */
+ ucbp->cleanup_cache.bitpattern[0] = (_uw) data;
+ if (!__cxa_begin_cleanup (ucbp))
+ return _URC_FAILURE;
+ /* Setup the VRS to enter the landing pad. */
+ _Unwind_SetGR (context, R_PC, lp);
+ return _URC_INSTALL_CONTEXT;
+ }
+ /* Cleanup not in range, or we are in stage 1. */
+ data++;
+ break;
+
+ case 1:
+ /* Catch handler. */
+ if (state == _US_VIRTUAL_UNWIND_FRAME)
+ {
+ if (in_range)
+ {
+ /* Check for a barrier. */
+ _uw rtti;
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ bool is_reference = (data[0] & uint32_highbit) != 0;
+ void *matched;
+
+ /* Check for no-throw areas. */
+ if (data[1] == (_uw) -2)
+ return _URC_FAILURE;
+
+ /* The thrown object immediately follows the ECB. */
+ matched = (void *)(ucbp + 1);
+ if (data[1] != (_uw) -1)
+ {
+ /* Match a catch specification. */
+ rtti = _Unwind_decode_target2 ((_uw) &data[1]);
+ if (!__cxa_type_match (ucbp, (type_info *) rtti,
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ is_reference,
+ &matched))
+ matched = (void *)0;
+ }
+
+ if (matched)
+ {
+ ucbp->barrier_cache.sp =
+ _Unwind_GetGR (context, R_SP);
+ ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
+ ucbp->barrier_cache.bitpattern[1] = (_uw) data;
+ return _URC_HANDLER_FOUND;
+ }
+ }
+ /* Handler out of range, or not matched. */
+ }
+ else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
+ && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
+ {
+ /* Matched a previous propagation barrier. */
+ _uw lp;
+
+ /* Setup for entry to the handler. */
+ lp = selfrel_offset31 (data);
+ _Unwind_SetGR (context, R_PC, lp);
+ _Unwind_SetGR (context, 0, (_uw) ucbp);
+ return _URC_INSTALL_CONTEXT;
+ }
+ /* Catch handler not matched. Advance to the next descriptor. */
+ data += 2;
+ break;
+
+ case 2:
+ rtti_count = data[0] & 0x7fffffff;
+ /* Exception specification. */
+ if (state == _US_VIRTUAL_UNWIND_FRAME)
+ {
+ if (in_range && (!forced_unwind || !rtti_count))
+ {
+ /* Match against the exception specification. */
+ _uw i;
+ _uw rtti;
+ void *matched;
+
+ for (i = 0; i < rtti_count; i++)
+ {
+ matched = (void *)(ucbp + 1);
+ rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ if (__cxa_type_match (ucbp, (type_info *) rtti, 0,
+ &matched))
+ break;
+ }
+
+ if (i == rtti_count)
+ {
+ /* Exception does not match the spec. */
+ ucbp->barrier_cache.sp =
+ _Unwind_GetGR (context, R_SP);
+ ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
+ ucbp->barrier_cache.bitpattern[1] = (_uw) data;
+ return _URC_HANDLER_FOUND;
+ }
+ }
+ /* Handler out of range, or exception is permitted. */
+ }
+ else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
+ && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
+ {
+ /* Matched a previous propagation barrier. */
+ _uw lp;
+ /* Record the RTTI list for __cxa_call_unexpected. */
+ ucbp->barrier_cache.bitpattern[1] = rtti_count;
+ ucbp->barrier_cache.bitpattern[2] = 0;
+ ucbp->barrier_cache.bitpattern[3] = 4;
+ ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
+
+ if (data[0] & uint32_highbit)
+ phase2_call_unexpected_after_unwind = 1;
+ else
+ {
+ data += rtti_count + 1;
+ /* Setup for entry to the handler. */
+ lp = selfrel_offset31 (data);
+ data++;
+ _Unwind_SetGR (context, R_PC, lp);
+ _Unwind_SetGR (context, 0, (_uw) ucbp);
+ return _URC_INSTALL_CONTEXT;
+ }
+ }
+ if (data[0] & uint32_highbit)
+ data++;
+ data += rtti_count + 1;
+ break;
+
+ default:
+ /* Should never happen. */
+ return _URC_FAILURE;
+ }
+ /* Finished processing this descriptor. */
+ }
+ }
+
+ if (__gnu_unwind_execute (context, &uws) != _URC_OK)
+ return _URC_FAILURE;
+
+ if (phase2_call_unexpected_after_unwind)
+ {
+ /* Enter __cxa_unexpected as if called from the call site. */
+ _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
+ _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
+ return _URC_INSTALL_CONTEXT;
+ }
+
+ return _URC_CONTINUE_UNWIND;
+}
+
+
+/* ABI defined personality routine entry points. */
+
+_Unwind_Reason_Code
+__aeabi_unwind_cpp_pr0 (_Unwind_State state,
+ _Unwind_Control_Block *ucbp,
+ _Unwind_Context *context)
+{
+ return __gnu_unwind_pr_common (state, ucbp, context, 0);
+}
+
+_Unwind_Reason_Code
+__aeabi_unwind_cpp_pr1 (_Unwind_State state,
+ _Unwind_Control_Block *ucbp,
+ _Unwind_Context *context)
+{
+ return __gnu_unwind_pr_common (state, ucbp, context, 1);
+}
+
+_Unwind_Reason_Code
+__aeabi_unwind_cpp_pr2 (_Unwind_State state,
+ _Unwind_Control_Block *ucbp,
+ _Unwind_Context *context)
+{
+ return __gnu_unwind_pr_common (state, ucbp, context, 2);
+}
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+/* Removed lines */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.h b/gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.h
new file mode 100644
index 000000000..a3040d7ad
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/unwind-arm.h
@@ -0,0 +1,271 @@
+/* Header file for the ARM EABI unwinder
+ Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_H
+#define UNWIND_ARM_H
+
+#define __ARM_EABI_UNWINDER__ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ typedef unsigned _Unwind_Word __attribute__((__mode__(__word__)));
+ typedef signed _Unwind_Sword __attribute__((__mode__(__word__)));
+ typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__)));
+ typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__)));
+ typedef _Unwind_Word _uw;
+ typedef unsigned _uw64 __attribute__((mode(__DI__)));
+ typedef unsigned _uw16 __attribute__((mode(__HI__)));
+ typedef unsigned _uw8 __attribute__((mode(__QI__)));
+
+ typedef enum
+ {
+ _URC_OK = 0, /* operation completed successfully */
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+ _URC_FAILURE = 9 /* unspecified failure of some kind */
+ }
+ _Unwind_Reason_Code;
+
+ typedef enum
+ {
+ _US_VIRTUAL_UNWIND_FRAME = 0,
+ _US_UNWIND_FRAME_STARTING = 1,
+ _US_UNWIND_FRAME_RESUME = 2,
+ _US_ACTION_MASK = 3,
+ _US_FORCE_UNWIND = 8,
+ _US_END_OF_STACK = 16
+ }
+ _Unwind_State;
+
+ /* Provided only for for compatibility with existing code. */
+ typedef int _Unwind_Action;
+#define _UA_SEARCH_PHASE 1
+#define _UA_CLEANUP_PHASE 2
+#define _UA_HANDLER_FRAME 4
+#define _UA_FORCE_UNWIND 8
+#define _UA_END_OF_STACK 16
+#define _URC_NO_REASON _URC_OK
+
+ typedef struct _Unwind_Control_Block _Unwind_Control_Block;
+ typedef struct _Unwind_Context _Unwind_Context;
+ typedef _uw _Unwind_EHT_Header;
+
+
+ /* UCB: */
+
+ struct _Unwind_Control_Block
+ {
+ char exception_class[8];
+ void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);
+ /* Unwinder cache, private fields for the unwinder's use */
+ struct
+ {
+ _uw reserved1; /* Forced unwind stop fn, 0 if not forced */
+ _uw reserved2; /* Personality routine address */
+ _uw reserved3; /* Saved callsite address */
+ _uw reserved4; /* Forced unwind stop arg */
+ _uw reserved5;
+ }
+ unwinder_cache;
+ /* Propagation barrier cache (valid after phase 1): */
+ struct
+ {
+ _uw sp;
+ _uw bitpattern[5];
+ }
+ barrier_cache;
+ /* Cleanup cache (preserved over cleanup): */
+ struct
+ {
+ _uw bitpattern[4];
+ }
+ cleanup_cache;
+ /* Pr cache (for pr's benefit): */
+ struct
+ {
+ _uw fnstart; /* function start address */
+ _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */
+ _uw additional; /* additional data */
+ _uw reserved1;
+ }
+ pr_cache;
+ long long int :0; /* Force alignment to 8-byte boundary */
+ };
+
+ /* Virtual Register Set*/
+
+ typedef enum
+ {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_FPA = 2, /* fpa */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+ }
+ _Unwind_VRS_RegClass;
+
+ typedef enum
+ {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_FPAX = 2,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+ }
+ _Unwind_VRS_DataRepresentation;
+
+ typedef enum
+ {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+ }
+ _Unwind_VRS_Result;
+
+ /* Frame unwinding state. */
+ typedef struct
+ {
+ /* The current word (bytes packed msb first). */
+ _uw data;
+ /* Pointer to the next word of data. */
+ _uw *next;
+ /* The number of bytes left in this word. */
+ _uw8 bytes_left;
+ /* The number of words pointed to by ptr. */
+ _uw8 words_left;
+ }
+ __gnu_unwind_state;
+
+ typedef _Unwind_Reason_Code (*personality_routine) (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation);
+
+
+ /* Support functions for the PR. */
+#define _Unwind_Exception _Unwind_Control_Block
+ typedef char _Unwind_Exception_Class[8];
+
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+
+ /* These two should never be used. */
+ _Unwind_Ptr _Unwind_GetDataRelBase (_Unwind_Context *);
+ _Unwind_Ptr _Unwind_GetTextRelBase (_Unwind_Context *);
+
+ /* Interface functions: */
+ _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp);
+ void __attribute__((noreturn)) _Unwind_Resume(_Unwind_Control_Block *ucbp);
+ _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (_Unwind_Control_Block *ucbp);
+
+ typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)
+ (int, _Unwind_Action, _Unwind_Exception_Class,
+ _Unwind_Control_Block *, struct _Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *,
+ _Unwind_Stop_Fn, void *);
+ _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *);
+ void _Unwind_Complete(_Unwind_Control_Block *ucbp);
+ void _Unwind_DeleteException (_Unwind_Exception *);
+
+ _Unwind_Reason_Code __gnu_unwind_frame (_Unwind_Control_Block *,
+ _Unwind_Context *);
+ _Unwind_Reason_Code __gnu_unwind_execute (_Unwind_Context *,
+ __gnu_unwind_state *);
+
+ /* Decode an R_ARM_TARGET2 relocation. */
+ static inline _Unwind_Word
+ _Unwind_decode_target2 (_Unwind_Word ptr)
+ {
+ _Unwind_Word tmp;
+
+ tmp = *(_Unwind_Word *) ptr;
+ /* Zero values are always NULL. */
+ if (!tmp)
+ return 0;
+
+#if defined(linux) || defined(__NetBSD__)
+ /* Pc-relative indirect. */
+ tmp += ptr;
+ tmp = *(_Unwind_Word *) tmp;
+#elif defined(__symbian__)
+ /* Absolute pointer. Nothing more to do. */
+#else
+ /* Pc-relative pointer. */
+ tmp += ptr;
+#endif
+ return tmp;
+ }
+
+ static inline _Unwind_Word
+ _Unwind_GetGR (_Unwind_Context *context, int regno)
+ {
+ _uw val;
+ _Unwind_VRS_Get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ return val;
+ }
+
+ /* Return the address of the instruction, not the actual IP value. */
+#define _Unwind_GetIP(context) \
+ (_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
+
+#define _Unwind_GetIPInfo(context, ip_before_insn) \
+ (*ip_before_insn = 0, _Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
+
+ static inline void
+ _Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val)
+ {
+ _Unwind_VRS_Set (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ }
+
+ /* The dwarf unwinder doesn't understand arm/thumb state. We assume the
+ landing pad uses the same instruction set as the call site. */
+#define _Unwind_SetIP(context, val) \
+ _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/vec-common.md b/gcc-4.2.1-5666.3/gcc/config/arm/vec-common.md
new file mode 100644
index 000000000..be513eeed
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/vec-common.md
@@ -0,0 +1,108 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; Machine Description for shared bits common to IWMMXT and Neon.
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;; Written by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA.
+
+;; Vector Moves
+
+;; All integer and float modes supported by Neon and IWMMXT.
+(define_mode_macro VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
+
+;; All integer and float modes supported by Neon and IWMMXT, except V2DI.
+(define_mode_macro VALLW [V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
+
+;; All integer modes supported by Neon and IWMMXT
+(define_mode_macro VINT [V2DI V2SI V4HI V8QI V4SI V8HI V16QI])
+
+;; All integer modes supported by Neon and IWMMXT, except V2DI
+(define_mode_macro VINTW [V2SI V4HI V8QI V4SI V8HI V16QI])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VALL 0 "nonimmediate_operand" "")
+ (match_operand:VALL 1 "general_operand" ""))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns seperately for IWMMXT and Neon.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:VALL 0 "s_register_operand" "")
+ (plus:VALL (match_operand:VALL 1 "s_register_operand" "")
+ (match_operand:VALL 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:VALL 0 "s_register_operand" "")
+ (minus:VALL (match_operand:VALL 1 "s_register_operand" "")
+ (match_operand:VALL 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "mul<mode>3"
+ [(set (match_operand:VALLW 0 "s_register_operand" "")
+ (mult:VALLW (match_operand:VALLW 1 "s_register_operand" "")
+ (match_operand:VALLW 2 "s_register_operand" "")))]
+ "TARGET_NEON || (<MODE>mode == V4HImode && TARGET_REALLY_IWMMXT)"
+{
+})
+
+(define_expand "smin<mode>3"
+ [(set (match_operand:VALLW 0 "s_register_operand" "")
+ (smin:VALLW (match_operand:VALLW 1 "s_register_operand" "")
+ (match_operand:VALLW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "umin<mode>3"
+ [(set (match_operand:VINTW 0 "s_register_operand" "")
+ (umin:VINTW (match_operand:VINTW 1 "s_register_operand" "")
+ (match_operand:VINTW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "smax<mode>3"
+ [(set (match_operand:VALLW 0 "s_register_operand" "")
+ (smax:VALLW (match_operand:VALLW 1 "s_register_operand" "")
+ (match_operand:VALLW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "umax<mode>3"
+ [(set (match_operand:VINTW 0 "s_register_operand" "")
+ (umax:VINTW (match_operand:VINTW 1 "s_register_operand" "")
+ (match_operand:VINTW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/vfp.md b/gcc-4.2.1-5666.3/gcc/config/arm/vfp.md
new file mode 100644
index 000000000..632cb6fc9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/vfp.md
@@ -0,0 +1,1068 @@
+;; ARM VFP coprocessor Machine Description
+;; APPLE LOCAL v7 support. Merge from mainline
+;; Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+;; Additional register numbers
+(define_constants
+;; APPLE LOCAL v7 support. Merge from mainline
+ [(VFPCC_REGNUM 127)]
+)
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
+
+;; The VFP "type" attributes differ from those used in the FPA model.
+;; ffarith Fast floating point insns, e.g. abs, neg, cpy, cmp.
+;; farith Most arithmetic insns.
+;; fmuls Single precision multiply.
+;; fmuld Double precision multiply.
+;; fmacs Single precision multiply-accumulate.
+;; fmacd Double precision multiply-accumulate.
+;; fdivs Single precision sqrt or division.
+;; fdivd Double precision sqrt or division.
+;; f_flag fmstat operation
+;; f_load[sd] Floating point load from memory.
+;; f_store[sd] Floating point store to memory.
+;; f_2_r Transfer vfp to arm reg.
+;; r_2_f Transfer arm to vfp reg.
+;; f_cvt Convert floating<->integral
+
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; APPLE LOCAL begin v7 support. Merge from mainline
+;; SImode moves
+;; ??? For now do not allow loading constants into vfp regs. This causes
+;; problems because small constants get converted into adds.
+(define_insn "*arm_movsi_vfp"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r ,m,*t,r,*t,*t, *Uv")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r,r,*t,*t,*Uvi,*t"))]
+ "TARGET_ARM && TARGET_VFP && TARGET_HARD_FLOAT
+ && ( s_register_operand (operands[0], SImode)
+ || s_register_operand (operands[1], SImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"mov%?\\t%0, %1\";
+ case 1:
+ return \"mvn%?\\t%0, #%B1\";
+ case 2:
+ return \"movw%?\\t%0, %1\";
+ case 3:
+ return \"ldr%?\\t%0, %1\";
+ case 4:
+ return \"str%?\\t%1, %0\";
+ case 5:
+ return \"fmsr%?\\t%0, %1\\t%@ int\";
+ case 6:
+ return \"fmrs%?\\t%0, %1\\t%@ int\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\\t%@ int\";
+ case 8: case 9:
+ return output_move_vfp (operands);
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "*,*,*,load1,store1,r_2_f,f_2_r,ffarith,f_loads,f_stores")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,mov,*,*,*,*,*,*,*")
+ (set_attr "pool_range" "*,*,*,4096,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*,4084,*,*,*,*,1008,*")]
+)
+
+(define_insn "*thumb2_movsi_vfp"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,m,*t,r,*t,*t, *Uv")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r,r,*t,*t,*Uvi,*t"))]
+ "TARGET_THUMB2 && TARGET_VFP && TARGET_HARD_FLOAT
+ && ( s_register_operand (operands[0], SImode)
+ || s_register_operand (operands[1], SImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"mov%?\\t%0, %1\";
+ case 1:
+ return \"mvn%?\\t%0, #%B1\";
+ case 2:
+ return \"movw%?\\t%0, %1\";
+ case 3:
+ return \"ldr%?\\t%0, %1\";
+ case 4:
+ return \"str%?\\t%1, %0\";
+ case 5:
+ return \"fmsr%?\\t%0, %1\\t%@ int\";
+ case 6:
+ return \"fmrs%?\\t%0, %1\\t%@ int\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\\t%@ int\";
+ case 8: case 9:
+ return output_move_vfp (operands);
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "*,*,*,load1,store1,r_2_f,f_2_r,ffarith,f_load,f_store")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,mov,*,*,*,*,*,*,*")
+ (set_attr "pool_range" "*,*,*,4096,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*, 0,*,*,*,*,1008,*")]
+)
+
+
+;; DImode moves
+
+(define_insn "*arm_movdi_vfp"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r,m,w,r,w,w, Uv")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r,r,w,w,Uvi,w"))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
+ && ( register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"#\";
+ case 1:
+ case 2:
+ return output_move_double (operands);
+ case 3:
+ return \"fmdrr%?\\t%P0, %Q1, %R1\\t%@ int\";
+ case 4:
+ return \"fmrrd%?\\t%Q0, %R0, %P1\\t%@ int\";
+ case 5:
+ return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
+ case 6: case 7:
+ return output_move_vfp (operands);
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarith,f_loadd,f_stored")
+ (set_attr "length" "8,8,8,4,4,4,4,4")
+ (set_attr "pool_range" "*,1020,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,1008,*,*,*,*,1008,*")]
+)
+
+(define_insn "*thumb2_movdi_vfp"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r,m,w,r,w,w, Uv")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r,r,w,w,Uvi,w"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "*
+ switch (which_alternative)
+ {
+ case 0: case 1: case 2:
+ return (output_move_double (operands));
+ case 3:
+ return \"fmdrr%?\\t%P0, %Q1, %R1\\t%@ int\";
+ case 4:
+ return \"fmrrd%?\\t%Q0, %R0, %P1\\t%@ int\";
+ case 5:
+ return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
+ case 6: case 7:
+ return output_move_vfp (operands);
+ default:
+ abort ();
+ }
+ "
+ [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarith,f_load,f_store")
+ (set_attr "length" "8,8,8,4,4,4,4,4")
+ (set_attr "pool_range" "*,4096,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*, 0,*,*,*,*,1008,*")]
+)
+
+
+;; SFmode moves
+;; Disparage the w<->r cases because reloading an invalid address is
+;; preferable to loading the value via integer registers.
+
+(define_insn "*movsf_vfp"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=t,?r,t ,t ,Uv,r ,m,t,r")
+ (match_operand:SF 1 "general_operand" " ?r,t,Dv,UvE,t, mE,r,t,r"))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
+ && ( s_register_operand (operands[0], SFmode)
+ || s_register_operand (operands[1], SFmode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmsr%?\\t%0, %1\";
+ case 1:
+ return \"fmrs%?\\t%0, %1\";
+ case 2:
+ return \"fconsts%?\\t%0, #%G1\";
+ case 3: case 4:
+ return output_move_vfp (operands);
+ case 5:
+ return \"ldr%?\\t%0, %1\\t%@ float\";
+ case 6:
+ return \"str%?\\t%1, %0\\t%@ float\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\";
+ case 8:
+ return \"mov%?\\t%0, %1\\t%@ float\";
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type"
+ "r_2_f,f_2_r,farith,f_loads,f_stores,load1,store1,ffarith,*")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "*,*,*,*,*,*,*,*,mov")
+ (set_attr "pool_range" "*,*,*,1020,*,4096,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,4080,*,*,*")]
+)
+
+(define_insn "*thumb2_movsf_vfp"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=t,?r,t, t ,Uv,r ,m,t,r")
+ (match_operand:SF 1 "general_operand" " ?r,t,Dv,UvE,t, mE,r,t,r"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP
+ && ( s_register_operand (operands[0], SFmode)
+ || s_register_operand (operands[1], SFmode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmsr%?\\t%0, %1\";
+ case 1:
+ return \"fmrs%?\\t%0, %1\";
+ case 2:
+ return \"fconsts%?\\t%0, #%G1\";
+ case 3: case 4:
+ return output_move_vfp (operands);
+ case 5:
+ return \"ldr%?\\t%0, %1\\t%@ float\";
+ case 6:
+ return \"str%?\\t%1, %0\\t%@ float\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\";
+ case 8:
+ return \"mov%?\\t%0, %1\\t%@ float\";
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type"
+ "r_2_f,f_2_r,farith,f_load,f_store,load1,store1,ffarith,*")
+ (set_attr "pool_range" "*,*,*,1020,*,4092,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,0,*,*,*")]
+)
+
+
+;; DFmode moves
+
+(define_insn "*movdf_vfp"
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w ,Uv,w,r")
+ (match_operand:DF 1 "soft_df_operand" " ?r,w,Dv,mF,r,UvF,w, w,r"))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
+ && ( register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmdrr%?\\t%P0, %Q1, %R1\";
+ case 1:
+ return \"fmrrd%?\\t%Q0, %R0, %P1\";
+ case 2:
+ return \"fconstd%?\\t%P0, #%G1\";
+ case 3: case 4:
+ return output_move_double (operands);
+ case 5: case 6:
+ return output_move_vfp (operands);
+ case 7:
+ return \"fcpyd%?\\t%P0, %P1\";
+ case 8:
+ return \"#\";
+ default:
+ gcc_unreachable ();
+ }
+ }
+ "
+ [(set_attr "type"
+ "r_2_f,f_2_r,farith,f_loadd,f_stored,load2,store2,ffarith,*")
+ (set_attr "length" "4,4,4,8,8,4,4,4,8")
+ (set_attr "pool_range" "*,*,*,1020,*,1020,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,1008,*,*,*")]
+)
+
+(define_insn "*thumb2_movdf_vfp"
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w ,Uv,w,r")
+ (match_operand:DF 1 "soft_df_operand" " ?r,w,Dv,mF,r,UvF,w, w,r"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmdrr%?\\t%P0, %Q1, %R1\";
+ case 1:
+ return \"fmrrd%?\\t%Q0, %R0, %P1\";
+ case 2:
+ return \"fconstd%?\\t%P0, #%G1\";
+ case 3: case 4: case 8:
+ return output_move_double (operands);
+ case 5: case 6:
+ return output_move_vfp (operands);
+ case 7:
+ return \"fcpyd%?\\t%P0, %P1\";
+ default:
+ abort ();
+ }
+ }
+ "
+ [(set_attr "type"
+ "r_2_f,f_2_r,farith,load2,store2,f_load,f_store,ffarith,*")
+ (set_attr "length" "4,4,4,8,8,4,4,4,8")
+ (set_attr "pool_range" "*,*,*,4096,*,1020,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,0,*,1008,*,*,*")]
+)
+
+
+;; Conditional move patterns
+
+(define_insn "*movsfcc_vfp"
+ [(set (match_operand:SF 0 "s_register_operand" "=t,t,t,t,t,t,?r,?r,?r")
+ (if_then_else:SF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,t,t,0,?r,?r,0,t,t")
+ (match_operand:SF 2 "s_register_operand" "t,0,t,?r,0,?r,t,0,t")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ fcpys%D3\\t%0, %2
+ fcpys%d3\\t%0, %1
+ fcpys%D3\\t%0, %2\;fcpys%d3\\t%0, %1
+ fmsr%D3\\t%0, %2
+ fmsr%d3\\t%0, %1
+ fmsr%D3\\t%0, %2\;fmsr%d3\\t%0, %1
+ fmrs%D3\\t%0, %2
+ fmrs%d3\\t%0, %1
+ fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,4,4,8,4,4,8")
+ (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
+)
+
+(define_insn "*thumb2_movsfcc_vfp"
+ [(set (match_operand:SF 0 "s_register_operand" "=t,t,t,t,t,t,?r,?r,?r")
+ (if_then_else:SF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,t,t,0,?r,?r,0,t,t")
+ (match_operand:SF 2 "s_register_operand" "t,0,t,?r,0,?r,t,0,t")))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ it\\t%D3\;fcpys%D3\\t%0, %2
+ it\\t%d3\;fcpys%d3\\t%0, %1
+ ite\\t%D3\;fcpys%D3\\t%0, %2\;fcpys%d3\\t%0, %1
+ it\\t%D3\;fmsr%D3\\t%0, %2
+ it\\t%d3\;fmsr%d3\\t%0, %1
+ ite\\t%D3\;fmsr%D3\\t%0, %2\;fmsr%d3\\t%0, %1
+ it\\t%D3\;fmrs%D3\\t%0, %2
+ it\\t%d3\;fmrs%d3\\t%0, %1
+ ite\\t%D3\;fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,6,10,6,6,10,6,6,10")
+ (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
+(define_insn "*movdfcc_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w,w,w,w,w,w,?r,?r,?r")
+ (if_then_else:DF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "s_register_operand" "0,w,w,0,?r,?r,0,w,w")
+ (match_operand:DF 2 "s_register_operand" "w,0,w,?r,0,?r,w,0,w")))]
+ "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ fcpyd%D3\\t%P0, %P2
+ fcpyd%d3\\t%P0, %P1
+ fcpyd%D3\\t%P0, %P2\;fcpyd%d3\\t%P0, %P1
+ fmdrr%D3\\t%P0, %Q2, %R2
+ fmdrr%d3\\t%P0, %Q1, %R1
+ fmdrr%D3\\t%P0, %Q2, %R2\;fmdrr%d3\\t%P0, %Q1, %R1
+ fmrrd%D3\\t%Q0, %R0, %P2
+ fmrrd%d3\\t%Q0, %R0, %P1
+ fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,4,4,8,4,4,8")
+ (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
+)
+
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*thumb2_movdfcc_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w,w,w,w,w,w,?r,?r,?r")
+ (if_then_else:DF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "s_register_operand" "0,w,w,0,?r,?r,0,w,w")
+ (match_operand:DF 2 "s_register_operand" "w,0,w,?r,0,?r,w,0,w")))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ it\\t%D3\;fcpyd%D3\\t%P0, %P2
+ it\\t%d3\;fcpyd%d3\\t%P0, %P1
+ ite\\t%D3\;fcpyd%D3\\t%P0, %P2\;fcpyd%d3\\t%P0, %P1
+ it\t%D3\;fmdrr%D3\\t%P0, %Q2, %R2
+ it\t%d3\;fmdrr%d3\\t%P0, %Q1, %R1
+ ite\\t%D3\;fmdrr%D3\\t%P0, %Q2, %R2\;fmdrr%d3\\t%P0, %Q1, %R1
+ it\t%D3\;fmrrd%D3\\t%Q0, %R0, %P2
+ it\t%d3\;fmrrd%d3\\t%Q0, %R0, %P1
+ ite\\t%D3\;fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,6,10,6,6,10,6,6,10")
+ (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
+)
+
+
+;; Sign manipulation functions
+
+(define_insn "*abssf2_vfp"
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fabss%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+(define_insn "*absdf2_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "w")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fabsd%?\\t%P0, %P1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+(define_insn "*negsf2_vfp"
+ [(set (match_operand:SF 0 "s_register_operand" "=t,?r")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "t,r")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ fnegs%?\\t%0, %1
+ eor%?\\t%0, %1, #-2147483648"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+(define_insn_and_split "*negdf2_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w,?r,?r")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "w,0,r")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ fnegd%?\\t%P0, %P1
+ #
+ #"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && reload_completed
+ && arm_general_register_operand (operands[0], DFmode)"
+ [(set (match_dup 0) (match_dup 1))]
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ operands[0] = gen_highpart (SImode, operands[0]);
+ operands[1] = gen_rtx_XOR (SImode, operands[0], GEN_INT (0x80000000));
+ }
+ else
+ {
+ rtx in_hi, in_lo, out_hi, out_lo;
+
+ in_hi = gen_rtx_XOR (SImode, gen_highpart (SImode, operands[1]),
+ GEN_INT (0x80000000));
+ in_lo = gen_lowpart (SImode, operands[1]);
+ out_hi = gen_highpart (SImode, operands[0]);
+ out_lo = gen_lowpart (SImode, operands[0]);
+
+ if (REGNO (in_lo) == REGNO (out_hi))
+ {
+ emit_insn (gen_rtx_SET (SImode, out_lo, in_lo));
+ operands[0] = out_hi;
+ operands[1] = in_hi;
+ }
+ else
+ {
+ emit_insn (gen_rtx_SET (SImode, out_hi, in_hi));
+ operands[0] = out_lo;
+ operands[1] = in_lo;
+ }
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "4,4,8")
+ (set_attr "type" "ffarith")]
+)
+
+
+;; Arithmetic insns
+
+(define_insn "*addsf3_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fadds%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "farith")]
+)
+
+(define_insn "*adddf3_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "w")
+ (match_operand:DF 2 "s_register_operand" "w")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "faddd%?\\t%P0, %P1, %P2"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "farith")]
+)
+
+
+(define_insn "*subsf3_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fsubs%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "farith")]
+)
+
+(define_insn "*subdf3_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (minus:DF (match_operand:DF 1 "s_register_operand" "w")
+ (match_operand:DF 2 "s_register_operand" "w")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fsubd%?\\t%P0, %P1, %P2"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "farith")]
+)
+
+
+;; Division insns
+
+(define_insn "*divsf3_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (div:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fdivs%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "fdivs")]
+)
+
+(define_insn "*divdf3_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "+w")
+ (div:DF (match_operand:DF 1 "s_register_operand" "w")
+ (match_operand:DF 2 "s_register_operand" "w")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fdivd%?\\t%P0, %P1, %P2"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "fdivd")]
+)
+
+
+;; Multiplication insns
+
+(define_insn "*mulsf3_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fmuls%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuls")]
+)
+
+(define_insn "*muldf3_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "+w")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "w")
+ (match_operand:DF 2 "s_register_operand" "w")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fmuld%?\\t%P0, %P1, %P2"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuld")]
+)
+
+
+(define_insn "*mulsf3negsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (mult:SF (neg:SF (match_operand:SF 1 "s_register_operand" "t"))
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fnmuls%?\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuls")]
+)
+
+(define_insn "*muldf3negdf_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "+w")
+ (mult:DF (neg:DF (match_operand:DF 1 "s_register_operand" "w"))
+ (match_operand:DF 2 "s_register_operand" "w")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fnmuld%?\\t%P0, %P1, %P2"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuld")]
+)
+
+
+;; Multiply-accumulate insns
+
+;; 0 = 1 * 2 + 0
+(define_insn "*mulsf3addsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (plus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+ (match_operand:SF 3 "s_register_operand" "t"))
+ (match_operand:SF 1 "s_register_operand" "0")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fmacs%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacs")]
+)
+
+(define_insn "*muldf3adddf_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (plus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w")
+ (match_operand:DF 3 "s_register_operand" "w"))
+ (match_operand:DF 1 "s_register_operand" "0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fmacd%?\\t%P0, %P2, %P3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
+)
+
+;; 0 = 1 * 2 - 0
+(define_insn "*mulsf3subsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+ (match_operand:SF 3 "s_register_operand" "t"))
+ (match_operand:SF 1 "s_register_operand" "0")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fmscs%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacs")]
+)
+
+(define_insn "*muldf3subdf_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (minus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w")
+ (match_operand:DF 3 "s_register_operand" "w"))
+ (match_operand:DF 1 "s_register_operand" "0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fmscd%?\\t%P0, %P2, %P3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
+)
+
+;; 0 = -(1 * 2) + 0
+(define_insn "*mulsf3negsfaddsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (match_operand:SF 1 "s_register_operand" "0")
+ (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+ (match_operand:SF 3 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fnmacs%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacs")]
+)
+
+(define_insn "*fmuldf3negdfadddf_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (minus:DF (match_operand:DF 1 "s_register_operand" "0")
+ (mult:DF (match_operand:DF 2 "s_register_operand" "w")
+ (match_operand:DF 3 "s_register_operand" "w"))))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fnmacd%?\\t%P0, %P2, %P3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
+)
+
+
+;; 0 = -(1 * 2) - 0
+(define_insn "*mulsf3negsfsubsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (mult:SF
+ (neg:SF (match_operand:SF 2 "s_register_operand" "t"))
+ (match_operand:SF 3 "s_register_operand" "t"))
+ (match_operand:SF 1 "s_register_operand" "0")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fnmscs%?\\t%0, %2, %3"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "fmacs")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+)
+
+(define_insn "*muldf3negdfsubdf_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (minus:DF (mult:DF
+ (neg:DF (match_operand:DF 2 "s_register_operand" "w"))
+ (match_operand:DF 3 "s_register_operand" "w"))
+ (match_operand:DF 1 "s_register_operand" "0")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fnmscd%?\\t%P0, %P2, %P3"
+ [(set_attr "predicable" "yes")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
+)
+
+
+;; Conversion routines
+
+(define_insn "*extendsfdf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fcvtds%?\\t%P0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+(define_insn "*truncdfsf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (float_truncate:SF (match_operand:DF 1 "s_register_operand" "w")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fcvtsd%?\\t%0, %P1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+(define_insn "*truncsisf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "ftosizs%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+(define_insn "*truncsidf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "w"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "ftosizd%?\\t%0, %P1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+
+(define_insn "fixuns_truncsfsi2"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (unsigned_fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "ftouizs%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+(define_insn "fixuns_truncdfsi2"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (unsigned_fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "ftouizd%?\\t%0, %P1"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+
+(define_insn "*floatsisf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (float:SF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fsitos%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+(define_insn "*floatsidf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (float:DF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fsitod%?\\t%P0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+
+(define_insn "floatunssisf2"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (unsigned_float:SF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fuitos%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+(define_insn "floatunssidf2"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (unsigned_float:DF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fuitod%?\\t%P0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+)
+
+
+;; Sqrt insns.
+
+(define_insn "*sqrtsf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "fsqrts%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "fdivs")]
+)
+
+(define_insn "*sqrtdf2_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "w")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fsqrtd%?\\t%P0, %P1"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "fdivd")]
+)
+
+
+;; Patterns to split/copy vfp condition flags.
+
+(define_insn "*movcc_vfp"
+ [(set (reg CC_REGNUM)
+ (reg VFPCC_REGNUM))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "fmstat%?"
+ [(set_attr "conds" "set")
+ (set_attr "type" "f_flag")]
+)
+
+(define_insn_and_split "*cmpsf_split_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "t")
+ (match_operand:SF 1 "vfp_compare_operand" "tG")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "#"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (reg:CCFP VFPCC_REGNUM)
+ (compare:CCFP (match_dup 0)
+ (match_dup 1)))
+ (set (reg:CCFP CC_REGNUM)
+ (reg:CCFP VFPCC_REGNUM))]
+ ""
+)
+
+(define_insn_and_split "*cmpsf_trap_split_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "t")
+ (match_operand:SF 1 "vfp_compare_operand" "tG")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "#"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (reg:CCFPE VFPCC_REGNUM)
+ (compare:CCFPE (match_dup 0)
+ (match_dup 1)))
+ (set (reg:CCFPE CC_REGNUM)
+ (reg:CCFPE VFPCC_REGNUM))]
+ ""
+)
+
+(define_insn_and_split "*cmpdf_split_vfp"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "w")
+ (match_operand:DF 1 "vfp_compare_operand" "wG")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "#"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (reg:CCFP VFPCC_REGNUM)
+ (compare:CCFP (match_dup 0)
+ (match_dup 1)))
+ (set (reg:CCFP CC_REGNUM)
+ (reg:CCFPE VFPCC_REGNUM))]
+ ""
+)
+
+(define_insn_and_split "*cmpdf_trap_split_vfp"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "w")
+ (match_operand:DF 1 "vfp_compare_operand" "wG")))]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "#"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set (reg:CCFPE VFPCC_REGNUM)
+ (compare:CCFPE (match_dup 0)
+ (match_dup 1)))
+ (set (reg:CCFPE CC_REGNUM)
+ (reg:CCFPE VFPCC_REGNUM))]
+ ""
+)
+
+
+;; Comparison patterns
+
+(define_insn "*cmpsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (reg:CCFP VFPCC_REGNUM)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "t,t")
+ (match_operand:SF 1 "vfp_compare_operand" "t,G")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "@
+ fcmps%?\\t%0, %1
+ fcmpzs%?\\t%0"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+(define_insn "*cmpsf_trap_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (reg:CCFPE VFPCC_REGNUM)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "t,t")
+ (match_operand:SF 1 "vfp_compare_operand" "t,G")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ "@
+ fcmpes%?\\t%0, %1
+ fcmpezs%?\\t%0"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+(define_insn "*cmpdf_vfp"
+ [(set (reg:CCFP VFPCC_REGNUM)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "w,w")
+ (match_operand:DF 1 "vfp_compare_operand" "w,G")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ fcmpd%?\\t%P0, %P1
+ fcmpzd%?\\t%P0"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+(define_insn "*cmpdf_trap_vfp"
+ [(set (reg:CCFPE VFPCC_REGNUM)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "w,w")
+ (match_operand:DF 1 "vfp_compare_operand" "w,G")))]
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ fcmped%?\\t%P0, %P1
+ fcmpezd%?\\t%P0"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "ffarith")]
+)
+
+
+;; Store multiple insn used in function prologue.
+
+(define_insn "*push_multi_vfp"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:DF 1 "s_register_operand" "w")]
+ UNSPEC_PUSH_MULT))])]
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "* return vfp_output_fstmd (operands);"
+;; APPLE LOCAL end v7 support. Merge from mainline
+ [(set_attr "type" "f_stored")]
+)
+
+
+;; Unimplemented insns:
+;; fldm*
+;; fstm*
+;; fmdhr et al (VFPv1)
+;; Support for xD (single precision only) variants.
+;; fmrrs, fmsrr
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/vfp11.md b/gcc-4.2.1-5666.3/gcc/config/arm/vfp11.md
new file mode 100644
index 000000000..fcc696d95
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/vfp11.md
@@ -0,0 +1,94 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM VFP11 pipeline description
+;; Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+(define_automaton "vfp11")
+
+;; There are 3 pipelines in the VFP11 unit.
+;;
+;; - A 8-stage FMAC pipeline (7 execute + writeback) with forward from
+;; fourth stage for simple operations.
+;;
+;; - A 5-stage DS pipeline (4 execute + writeback) for divide/sqrt insns.
+;; These insns also uses first execute stage of FMAC pipeline.
+;;
+;; - A 4-stage LS pipeline (execute + 2 memory + writeback) with forward from
+;; second memory stage for loads.
+
+;; We do not model Write-After-Read hazards.
+;; We do not do write scheduling with the arm core, so it is only necessary
+;; to model the first stage of each pipeline
+;; ??? Need to model LS pipeline properly for load/store multiple?
+;; We do not model fmstat properly. This could be done by modeling pipelines
+;; properly and defining an absence set between a dummy fmstat unit and all
+;; other vfp units.
+
+(define_cpu_unit "fmac" "vfp11")
+
+(define_cpu_unit "ds" "vfp11")
+
+(define_cpu_unit "vfp_ls" "vfp11")
+
+(define_cpu_unit "fmstat" "vfp11")
+
+(exclusion_set "fmac,ds" "fmstat")
+
+(define_insn_reservation "vfp_ffarith" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "ffarith"))
+ "fmac")
+
+(define_insn_reservation "vfp_farith" 8
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "farith,f_cvt,fmuls,fmacs"))
+ "fmac")
+
+(define_insn_reservation "vfp_fmul" 9
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "fmuld,fmacd"))
+ "fmac*2")
+
+(define_insn_reservation "vfp_fdivs" 19
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "fdivs"))
+ "ds*15")
+
+(define_insn_reservation "vfp_fdivd" 33
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "fdivd"))
+ "fmac+ds*29")
+
+;; Moves to/from arm regs also use the load/store pipeline.
+(define_insn_reservation "vfp_fload" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "f_loads,f_loadd,r_2_f"))
+ "vfp_ls")
+
+(define_insn_reservation "vfp_fstore" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "f_stores,f_stored,f_2_r"))
+ "vfp_ls")
+
+(define_insn_reservation "vfp_to_cpsr" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "f_flag"))
+ "fmstat,vfp_ls*3")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/vxworks.h b/gcc-4.2.1-5666.3/gcc/config/arm/vxworks.h
new file mode 100644
index 000000000..319c1e842
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/vxworks.h
@@ -0,0 +1,95 @@
+/* Definitions of target machine for GCC,
+ for ARM with targetting the VXWorks run time environment.
+ Copyright (C) 1999, 2000, 2003, 2004 Free Software Foundation, Inc.
+
+ Contributed by: Mike Stump <mrs@wrs.com>
+ Brought up to date by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__vxworks"); \
+ if (TARGET_BIG_END) \
+ builtin_define ("ARMEB"); \
+ else \
+ builtin_define ("ARMEL"); \
+ \
+ if (arm_is_xscale) \
+ builtin_define ("CPU=XSCALE"); \
+ else if (arm_arch5) \
+ builtin_define ("CPU=ARMARCH5"); \
+ else if (arm_arch4) \
+ { \
+ if (thumb_code) \
+ builtin_define ("CPU=ARMARCH4_T"); \
+ else \
+ builtin_define ("CPU=ARMARCH4"); \
+ } \
+ } while (0)
+
+#undef CC1_SPEC
+#define CC1_SPEC \
+"%{t4: -mlittle-endian -march=armv4 ; \
+ t4be: -mbig-endian -march=armv4 ; \
+ t4t: -mthumb -mthumb-interwork -mlittle-endian -march=armv4t ; \
+ t4tbe: -mthumb -mthumb-interwork -mbig-endian -march=armv4t ; \
+ t5: -mlittle-endian -march=armv5 ; \
+ t5be: -mbig-endian -march=armv5 ; \
+ t5t: -mthumb -mthumb-interwork -mlittle-endian -march=armv5 ; \
+ t5tbe: -mthumb -mthumb-interwork -mbig-endian -march=armv5 ; \
+ txscale: -mlittle-endian -mcpu=xscale ; \
+ txscalebe: -mbig-endian -mcpu=xscale ; \
+ : -march=armv4}"
+
+/* The -Q options from svr4.h aren't understood and must be removed. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{v:-V} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+
+/* VxWorks does all the library stuff itself. */
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* VxWorks uses object files, not loadable images. make linker just
+ combine objects. */
+#undef LINK_SPEC
+#define LINK_SPEC "-r"
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/VxWorks)", stderr);
+
+/* There is no default multilib. */
+#undef MULTILIB_DEFAULTS
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+ do \
+ { \
+ fprintf (STREAM, "%s Generated by GCC %s for ARM/VxWorks\n", \
+ ASM_COMMENT_START, version_string); \
+ } \
+ while (0)
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/wince-pe.h b/gcc-4.2.1-5666.3/gcc/config/arm/wince-pe.h
new file mode 100644
index 000000000..530340f9d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/wince-pe.h
@@ -0,0 +1,27 @@
+/* Definitions of target machine for GNU compiler, for ARM with WINCE-PE obj format.
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Nick Clifton <nickc@redhat.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_NOP_FUN_DLLIMPORT)
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "marm", "mlittle-endian", "msoft-float", "mno-thumb-interwork" }
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/x-darwin b/gcc-4.2.1-5666.3/gcc/config/arm/x-darwin
new file mode 100644
index 000000000..078e47e55
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/x-darwin
@@ -0,0 +1,7 @@
+ # APPLE LOCAL file ARM native compiler support
+host-arm-darwin.o : $(srcdir)/config/arm/host-arm-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) \
+ config/host-darwin.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+XCFLAGS = -mdynamic-no-pic
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/xscale-coff.h b/gcc-4.2.1-5666.3/gcc/config/arm/xscale-coff.h
new file mode 100644
index 000000000..e27594348
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/xscale-coff.h
@@ -0,0 +1,34 @@
+/* Definitions for XScale systems using COFF
+ Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#undef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_xscale
+
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mcpu=xscale}"
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "mlittle-endian", "mno-thumb-interwork", "marm" }
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (XScale/COFF)", stderr);
diff --git a/gcc-4.2.1-5666.3/gcc/config/arm/xscale-elf.h b/gcc-4.2.1-5666.3/gcc/config/arm/xscale-elf.h
new file mode 100644
index 000000000..be7be087b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/arm/xscale-elf.h
@@ -0,0 +1,59 @@
+/* Definitions for XScale architectures using ELF
+ Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (XScale/ELF non-Linux)", stderr);
+#endif
+
+#ifndef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_xscale
+#endif
+
+/* Note - there are three possible -mfpu= arguments that can be passed to
+ the assembler:
+
+ -mfpu=softvfp This is the default. It indicates thats doubles are
+ stored in a format compatible with the VFP
+ specification. This is the newer double format, whereby
+ the endian-ness of the doubles matches the endian-ness
+ of the memory architecture.
+
+ -mfpu=fpa This is when -mhard-float is specified.
+ [It is not known if any XScale's have been made with
+ hardware floating point support, but nevertheless this
+ is what happens].
+
+ -mfpu=softfpa This is when -msoft-float is specified.
+ This is the normal behavior of other arm configurations,
+ which for backwards compatibility purposes default to
+ supporting the old FPA format which was always big
+ endian, regardless of the endian-ness of the memory
+ system. */
+
+#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mcpu=xscale} \
+ %{mhard-float:-mfpu=fpa} \
+ %{!mhard-float: %{msoft-float:-mfpu=softfpa;:-mfpu=softvfp}}"
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "mlittle-endian", "mno-thumb-interwork", "marm", "msoft-float" }
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/asm.h b/gcc-4.2.1-5666.3/gcc/config/asm.h
new file mode 100644
index 000000000..ae934760d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/asm.h
@@ -0,0 +1,89 @@
+/* APPLE LOCAL file CW asm blocks */
+#ifndef CONFIG_ASM_H
+#define CONFIG_ASM_H
+
+#include "cpplib.h"
+
+/* We use a small state machine to inform the lexer when to start
+ returning tokens marking the beginning of each asm line. */
+enum iasm_states {
+ /* Normal code. */
+ iasm_none,
+ /* '{' of asm block seen, decls may appear. */
+ iasm_decls,
+ /* No more decls, in asm block proper, '}' not seen yet. */
+ iasm_asm
+};
+
+/* Nonzero means that CodeWarrior-style inline assembler is to be parsed. */
+
+extern int flag_iasm_blocks;
+
+extern enum iasm_states iasm_state;
+extern bool iasm_in_decl;
+extern bool inside_iasm_block;
+extern bool iasm_kill_regs;
+extern bool iasm_in_operands;
+extern tree iasm_do_id (tree);
+/* Maximum number of arguments. */
+#define IASM_MAX_ARG 11
+
+#ifndef TARGET_IASM_EXTRA_INFO
+#define TARGET_IASM_EXTRA_INFO
+#endif
+
+struct iasm_md_Extra_info {
+ /* Number of operands to the ASM_expr. Note, this can be different
+ from the number of operands to the instruction, in cases like:
+
+ mov 0(foo,bar,4), $42
+
+ where foo and bar are C expressions. */
+ int num;
+
+ struct {
+ /* Constraints for operand to the ASM_EXPR. */
+ const char *constraint;
+ tree var;
+ unsigned int argnum;
+ bool must_be_reg;
+ bool was_output;
+ } dat[IASM_MAX_ARG];
+
+ int num_rewrites;
+ struct {
+ int dat_index;
+ char *arg_p;
+ } rewrite[IASM_MAX_ARG];
+
+ bool no_label_map;
+ const char *modifier;
+
+ TARGET_IASM_EXTRA_INFO
+};
+typedef struct iasm_md_Extra_info iasm_md_extra_info;
+
+void iasm_print_operand (char *buf, tree arg, unsigned argnum, tree *uses,
+ bool must_be_reg, bool must_not_be_reg, iasm_md_extra_info *e);
+
+extern void iasm_stmt (tree, tree, int);
+extern tree iasm_build_register_offset (tree, tree);
+extern tree iasm_label (tree, bool);
+extern tree prepend_char_identifier (tree, char);
+extern tree iasm_reg_name (tree);
+extern void iasm_entry (int, tree);
+extern int iasm_typename_or_reserved (tree);
+extern tree iasm_c_build_component_ref (tree, tree);
+extern tree iasm_get_identifier (tree, const char *);
+extern tree iasm_build_bracket (tree, tree);
+extern bool iasm_is_prefix (tree);
+extern void iasm_skip_to_eol (void);
+extern bool iasm_memory_clobber (const char *);
+extern void iasm_force_constraint (const char *c, iasm_md_extra_info *e);
+extern tree iasm_ptr_conv (tree type, tree exp);
+extern void iasm_get_register_var (tree var, const char *modifier, char *buf,
+ unsigned argnum, bool must_be_reg, iasm_md_extra_info *e);
+extern bool iasm_is_pseudo (const char *);
+extern tree iasm_addr (tree);
+extern void iasm_end_block (void);
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/chorus.h b/gcc-4.2.1-5666.3/gcc/config/chorus.h
new file mode 100644
index 000000000..14cb3a023
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/chorus.h
@@ -0,0 +1,41 @@
+/* Definitions of target machine for GNU compiler.
+ Sun Chorus OS big-endian
+ Copyright (c) 2001 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define DWARF2_DEBUGGING_INFO 1
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Sun Chorus OS Embedded)");
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef LINK_SPEC
+#define LINK_SPEC ""
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-64.c b/gcc-4.2.1-5666.3/gcc/config/darwin-64.c
new file mode 100644
index 000000000..33094e307
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-64.c
@@ -0,0 +1,77 @@
+/* Functions shipped in the ppc64 and x86_64 version of libgcc_s.1.dylib
+ in older Mac OS X versions, preserved for backwards compatibility.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#if defined (__ppc64__) || defined (__x86_64__)
+/* Many of these functions have probably never been used by anyone
+ anywhere on these targets, but it's hard to prove this, so they're defined
+ here. None are actually necessary, as demonstrated below by defining
+ each function using the operation it implements. */
+
+typedef long DI;
+typedef unsigned long uDI;
+typedef int SI;
+typedef unsigned int uSI;
+typedef int word_type __attribute__ ((mode (__word__)));
+
+DI __ashldi3 (DI x, word_type c);
+DI __ashrdi3 (DI x, word_type c);
+int __clzsi2 (uSI x);
+word_type __cmpdi2 (DI x, DI y);
+int __ctzsi2 (uSI x);
+DI __divdi3 (DI x, DI y);
+uDI __lshrdi3 (uDI x, word_type c);
+DI __moddi3 (DI x, DI y);
+DI __muldi3 (DI x, DI y);
+DI __negdi2 (DI x);
+int __paritysi2 (uSI x);
+int __popcountsi2 (uSI x);
+word_type __ucmpdi2 (uDI x, uDI y);
+uDI __udivdi3 (uDI x, uDI y);
+uDI __udivmoddi4 (uDI x, uDI y, uDI *r);
+uDI __umoddi3 (uDI x, uDI y);
+
+DI __ashldi3 (DI x, word_type c) { return x << c; }
+DI __ashrdi3 (DI x, word_type c) { return x >> c; }
+int __clzsi2 (uSI x) { return __builtin_clz (x); }
+word_type __cmpdi2 (DI x, DI y) { return x < y ? 0 : x == y ? 1 : 2; }
+int __ctzsi2 (uSI x) { return __builtin_ctz (x); }
+DI __divdi3 (DI x, DI y) { return x / y; }
+uDI __lshrdi3 (uDI x, word_type c) { return x >> c; }
+DI __moddi3 (DI x, DI y) { return x % y; }
+DI __muldi3 (DI x, DI y) { return x * y; }
+DI __negdi2 (DI x) { return -x; }
+int __paritysi2 (uSI x) { return __builtin_parity (x); }
+int __popcountsi2 (uSI x) { return __builtin_popcount (x); }
+word_type __ucmpdi2 (uDI x, uDI y) { return x < y ? 0 : x == y ? 1 : 2; }
+uDI __udivdi3 (uDI x, uDI y) { return x / y; }
+uDI __udivmoddi4 (uDI x, uDI y, uDI *r) { *r = x % y; return x / y; }
+uDI __umoddi3 (uDI x, uDI y) { return x % y; }
+
+#endif /* __ppc64__ || __x86_64__ */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-c.c b/gcc-4.2.1-5666.3/gcc/config/darwin-c.c
new file mode 100644
index 000000000..2f291239c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-c.c
@@ -0,0 +1,1250 @@
+/* Darwin support needed only by C/C++ frontends.
+ Copyright (C) 2001, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "cpplib.h"
+#include "tree.h"
+#include "c-pragma.h"
+#include "c-tree.h"
+#include "c-incpath.h"
+#include "c-common.h"
+#include "toplev.h"
+#include "flags.h"
+#include "tm_p.h"
+#include "cppdefault.h"
+#include "prefix.h"
+/* APPLE LOCAL include options.h */
+#include "options.h"
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+#include "flags.h"
+#include "opts.h"
+#include "varray.h"
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
+
+/* Pragmas. */
+
+#define BAD(gmsgid) do { warning (OPT_Wpragmas, gmsgid); return; } while (0)
+#define BAD2(msgid, arg) do { warning (OPT_Wpragmas, msgid, arg); return; } while (0)
+
+static bool using_frameworks = false;
+
+/* Maintain a small stack of alignments. This is similar to pragma
+ pack's stack, but simpler. */
+
+/* APPLE LOCAL begin Macintosh alignment 2001-12-17 --ff */
+static void push_field_alignment (int, int, int);
+/* APPLE LOCAL end Macintosh alignment 2001-12-17 --ff */
+static void pop_field_alignment (void);
+static const char *find_subframework_file (const char *, const char *);
+/* APPLE LOCAL begin iframework for 4.3 4094959 */
+/* Remove add_system_framework_path */
+/* APPLE LOCAL end iframework for 4.3 4094959 */
+static const char *find_subframework_header (cpp_reader *pfile, const char *header,
+ cpp_dir **dirp);
+
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */
+/* There are four alignment modes supported on the Apple Macintosh
+ platform: power, mac68k, natural, and packed. These modes are
+ identified as follows:
+ if maximum_field_alignment != 0
+ mode = packed
+ else if OPTION_ALIGN_NATURAL
+ mode = natural
+ else if OPTION_ALIGN_MAC68K
+ mode
+ else
+ mode = power
+ These modes are saved on the alignment stack by saving the values
+ of maximum_field_alignment, OPTION_ALIGN_MAC68K, and
+ OPTION_ALIGN_NATURAL. */
+typedef struct align_stack
+{
+ int alignment;
+ unsigned long mac68k;
+ unsigned long natural;
+ struct align_stack * prev;
+} align_stack;
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */
+
+static struct align_stack * field_align_stack = NULL;
+
+/* APPLE LOCAL begin Macintosh alignment 2001-12-17 --ff */
+/* APPLE LOCAL begin radar 4679943 */
+/* natural_alignment == 0 means "off"
+ natural_alignment == 1 means "on"
+ natural_alignment == 2 means "unchanged" */
+/* APPLE LOCAL end radar 4679943 */
+
+static void
+push_field_alignment (int bit_alignment,
+ int mac68k_alignment, int natural_alignment)
+{
+ align_stack *entry = XNEW (align_stack);
+
+ entry->alignment = maximum_field_alignment;
+ entry->mac68k = OPTION_ALIGN_MAC68K;
+ entry->natural = OPTION_ALIGN_NATURAL;
+ entry->prev = field_align_stack;
+ field_align_stack = entry;
+
+ maximum_field_alignment = bit_alignment;
+ if (mac68k_alignment)
+ darwin_alignment_flags |= OPTION_MASK_ALIGN_MAC68K;
+ else
+ darwin_alignment_flags &= ~OPTION_MASK_ALIGN_MAC68K;
+
+ /* APPLE LOCAL begin radar 4679943 */
+ if (natural_alignment == 1)
+ darwin_alignment_flags |= OPTION_MASK_ALIGN_NATURAL;
+ else if (natural_alignment == 0)
+ darwin_alignment_flags &= ~OPTION_MASK_ALIGN_NATURAL;
+ /* APPLE LOCAL end radar 4679943 */
+}
+/* APPLE LOCAL end Macintosh alignment 2001-12-17 --ff */
+
+static void
+pop_field_alignment (void)
+{
+ if (field_align_stack)
+ {
+ align_stack *entry = field_align_stack;
+
+ maximum_field_alignment = entry->alignment;
+/* APPLE LOCAL begin Macintosh alignment 2001-12-17 --ff */
+ if (entry->mac68k)
+ darwin_alignment_flags |= OPTION_MASK_ALIGN_MAC68K;
+ else
+ darwin_alignment_flags &= ~OPTION_MASK_ALIGN_MAC68K;
+ if (entry->natural)
+ darwin_alignment_flags |= OPTION_MASK_ALIGN_NATURAL;
+ else
+ darwin_alignment_flags &= ~OPTION_MASK_ALIGN_NATURAL;
+/* APPLE LOCAL end Macintosh alignment 2001-12-17 --ff */
+ field_align_stack = entry->prev;
+ free (entry);
+ }
+ else
+ error ("too many #pragma options align=reset");
+}
+
+/* Handlers for Darwin-specific pragmas. */
+
+void
+darwin_pragma_ignore (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+/* APPLE LOCAL begin pragma fenv */
+/* #pragma GCC fenv
+ This is kept in <fenv.h>. The point is to allow trapping
+ math to default to off. According to C99, any program
+ that requires trapping math must include <fenv.h>, so
+ we enable trapping math when that gets included. */
+
+void
+darwin_pragma_fenv (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ flag_trapping_math = 1;
+}
+/* APPLE LOCAL end pragma fenv */
+
+/* #pragma options align={mac68k|power|reset} */
+
+void
+darwin_pragma_options (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ const char *arg;
+ tree t, x;
+
+ if (pragma_lex (&t) != CPP_NAME)
+ BAD ("malformed '#pragma options', ignoring");
+ arg = IDENTIFIER_POINTER (t);
+ if (strcmp (arg, "align"))
+ BAD ("malformed '#pragma options', ignoring");
+ if (pragma_lex (&t) != CPP_EQ)
+ BAD ("malformed '#pragma options', ignoring");
+ if (pragma_lex (&t) != CPP_NAME)
+ BAD ("malformed '#pragma options', ignoring");
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of '#pragma options'");
+
+ arg = IDENTIFIER_POINTER (t);
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */
+ if (!strcmp (arg, "mac68k"))
+ {
+ if (POINTER_SIZE == 64)
+ warning (OPT_Wpragmas, "mac68k alignment pragma is deprecated for 64-bit Darwin");
+ push_field_alignment (0, 1, 0);
+ }
+ else if (!strcmp (arg, "native")) /* equivalent to power on PowerPC */
+ push_field_alignment (0, 0, 0);
+ else if (!strcmp (arg, "natural"))
+ push_field_alignment (0, 0, 1);
+ else if (!strcmp (arg, "packed"))
+ push_field_alignment (8, 0, 0);
+ else if (!strcmp (arg, "power"))
+ push_field_alignment (0, 0, 0);
+ else if (!strcmp (arg, "reset"))
+ pop_field_alignment ();
+ else
+ BAD ("malformed '#pragma options align={mac68k|power|natural|reset}', ignoring");
+}
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */
+
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */
+/* #pragma pack ()
+ #pragma pack (N)
+ #pragma pack (pop[,id])
+ #pragma pack (push[,id],N)
+
+ We have a problem handling the semantics of these directives since,
+ to play well with the Macintosh alignment directives, we want the
+ usual pack(N) form to do a push of the previous alignment state.
+ Do we want pack() to do another push or a pop? */
+
+void
+darwin_pragma_pack (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree x, id = 0;
+ int align = -1;
+ enum cpp_ttype token;
+ enum { set, push, pop } action;
+
+ if (pragma_lex (&x) != CPP_OPEN_PAREN)
+ BAD ("missing '(' after '#pragma pack' - ignored");
+
+ token = pragma_lex (&x);
+ if (token == CPP_CLOSE_PAREN)
+ {
+ action = pop;
+ align = 0;
+ }
+ else if (token == CPP_NUMBER)
+ {
+ if (TREE_CODE (x) != INTEGER_CST)
+ BAD ("invalid constant in %<#pragma pack%> - ignored");
+ align = TREE_INT_CST_LOW (x);
+ action = push;
+ if (pragma_lex (&x) != CPP_CLOSE_PAREN)
+ BAD ("malformed '#pragma pack' - ignored");
+ }
+ else if (token == CPP_NAME)
+ {
+#define GCC_BAD_ACTION do { if (action == push) \
+ BAD ("malformed '#pragma pack(push[, id], <n>)' - ignored"); \
+ else \
+ BAD ("malformed '#pragma pack(pop[, id])' - ignored"); \
+ } while (0)
+
+ const char *op = IDENTIFIER_POINTER (x);
+ if (!strcmp (op, "push"))
+ action = push;
+ else if (!strcmp (op, "pop"))
+ action = pop;
+ else
+ BAD2 ("unknown action '%s' for '#pragma pack' - ignored", op);
+
+ while ((token = pragma_lex (&x)) == CPP_COMMA)
+ {
+ token = pragma_lex (&x);
+ if (token == CPP_NAME && id == 0)
+ {
+ id = x;
+ }
+ else if (token == CPP_NUMBER && action == push && align == -1)
+ {
+ if (TREE_CODE (x) != INTEGER_CST)
+ BAD ("invalid constant in %<#pragma pack%> - ignored");
+ align = TREE_INT_CST_LOW (x);
+ if (align == -1)
+ action = set;
+ }
+ else
+ GCC_BAD_ACTION;
+ }
+
+ if (token != CPP_CLOSE_PAREN)
+ GCC_BAD_ACTION;
+#undef GCC_BAD_ACTION
+ }
+else
+ BAD ("malformed '#pragma pack' - ignored");
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of '#pragma pack'");
+
+ if (action != pop)
+ {
+ switch (align)
+ {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ align *= BITS_PER_UNIT;
+ break;
+ case -1:
+ if (action == push)
+ {
+ align = maximum_field_alignment;
+ break;
+ }
+ default:
+ BAD2 ("alignment must be a small power of two, not %d", align);
+ }
+ }
+
+ switch (action)
+ {
+ case pop: pop_field_alignment (); break;
+ /* APPLE LOCAL begin radar 4679943 */
+ case push: push_field_alignment (align, 0, 2); break;
+ /* APPLE LOCAL end radar 4679943 */
+ case set: break;
+ }
+}
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */
+
+/* #pragma unused ([var {, var}*]) */
+
+void
+darwin_pragma_unused (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree decl, x;
+ int tok;
+
+ /* APPLE LOCAL 5979888 */
+ if ((tok=pragma_lex (&x)) != CPP_OPEN_PAREN)
+ BAD ("missing '(' after '#pragma unused', ignoring");
+
+ /* APPLE LOCAL 5979888 */
+ while (tok != CPP_EOF && tok != CPP_CLOSE_PAREN)
+ {
+ tok = pragma_lex (&decl);
+ if (tok == CPP_NAME && decl)
+ {
+ tree local = lookup_name (decl);
+ if (local && (TREE_CODE (local) == PARM_DECL
+ || TREE_CODE (local) == VAR_DECL))
+ TREE_USED (local) = 1;
+ tok = pragma_lex (&x);
+ if (tok != CPP_COMMA)
+ break;
+ }
+ }
+
+ if (tok != CPP_CLOSE_PAREN)
+ BAD ("missing ')' after '#pragma unused', ignoring");
+
+ if (pragma_lex (&x) != CPP_EOF)
+ BAD ("junk at end of '#pragma unused'");
+}
+
+/* Parse the ms_struct pragma. */
+void
+darwin_pragma_ms_struct (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ const char *arg;
+ tree t;
+
+ if (pragma_lex (&t) != CPP_NAME)
+ BAD ("malformed '#pragma ms_struct', ignoring");
+ arg = IDENTIFIER_POINTER (t);
+
+ if (!strcmp (arg, "on"))
+ darwin_ms_struct = true;
+ else if (!strcmp (arg, "off") || !strcmp (arg, "reset"))
+ darwin_ms_struct = false;
+ else
+ BAD ("malformed '#pragma ms_struct {on|off|reset}', ignoring");
+
+ if (pragma_lex (&t) != CPP_EOF)
+ BAD ("junk at end of '#pragma ms_struct'");
+}
+
+/* APPLE LOCAL begin pragma reverse_bitfields */
+/* Handle the reverse_bitfields pragma. */
+
+void
+darwin_pragma_reverse_bitfields (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ const char* arg;
+ tree t;
+
+ if (pragma_lex (&t) != CPP_NAME)
+ BAD ("malformed '#pragma reverse_bitfields', ignoring");
+ arg = IDENTIFIER_POINTER (t);
+
+ if (!strcmp (arg, "on"))
+ darwin_reverse_bitfields = true;
+ else if (!strcmp (arg, "off") || !strcmp (arg, "reset"))
+ darwin_reverse_bitfields = false;
+ else
+ BAD ("malformed '#pragma reverse_bitfields {on|off|reset}', ignoring");
+ if (pragma_lex (&t) != CPP_EOF)
+ BAD ("junk at end of '#pragma reverse_bitfields'");
+}
+/* APPLE LOCAL end pragma reverse_bitfields */
+
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+varray_type va_opt;
+
+static void
+push_opt_level (int level, int size)
+{
+ if (!va_opt)
+ VARRAY_INT_INIT (va_opt, 5, "va_opt");
+ VARRAY_PUSH_INT (va_opt, size << 16 | level);
+}
+
+static void
+pop_opt_level (void)
+{
+ int level;
+ if (!va_opt)
+ VARRAY_INT_INIT (va_opt, 5, "va_opt");
+ if (!VARRAY_ACTIVE_SIZE (va_opt))
+ BAD ("optimization pragma stack underflow");
+ level = VARRAY_TOP_INT (va_opt);
+ VARRAY_POP (va_opt);
+
+ optimize_size = level >> 16;
+ optimize = level & 0xffff;
+}
+
+/* APPLE LOCAL begin 4760857 optimization pragmas */
+/* Set the global flags as required by #pragma optimization_level or
+ #pragma optimize_size. */
+
+static void darwin_set_flags_from_pragma (void)
+{
+ set_flags_from_O (false);
+
+ /* MERGE FIXME 5416402 flag_loop_optimize2 is gone now */
+#if 0
+ /* Enable new loop optimizer pass if any of its optimizations is called. */
+ if (flag_move_loop_invariants
+ || flag_unswitch_loops
+ || flag_peel_loops
+ || flag_unroll_loops
+ || flag_branch_on_count_reg)
+ flag_loop_optimize2 = 1;
+#endif
+
+ /* This is expected to be defined in each target. Should contain
+ any snippets from OPTIMIZATION_OPTIONS and OVERRIDE_OPTIONS that
+ set per-func flags on the basis of -O level. */
+ reset_optimization_options (optimize, optimize_size);
+
+ if (align_loops <= 0) align_loops = 1;
+ if (align_loops_max_skip > align_loops || !align_loops)
+ align_loops_max_skip = align_loops - 1;
+ if (align_jumps <= 0) align_jumps = 1;
+ if (align_jumps_max_skip > align_jumps || !align_jumps)
+ align_jumps_max_skip = align_jumps - 1;
+ if (align_labels <= 0) align_labels = 1;
+}
+/* APPLE LOCAL end 4760857 optimization pragmas */
+
+void
+darwin_pragma_opt_level (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree t;
+ enum cpp_ttype argtype = pragma_lex (&t);
+
+ if (argtype == CPP_NAME)
+ {
+ const char* arg = IDENTIFIER_POINTER (t);
+ if (strcmp (arg, "reset") != 0)
+ BAD ("malformed '#pragma optimization_level [GCC] {0|1|2|3|reset}', ignoring");
+ pop_opt_level ();
+ }
+ else if (argtype == CPP_NUMBER)
+ {
+ if (TREE_CODE (t) != INTEGER_CST
+ || INT_CST_LT (t, integer_zero_node)
+ || TREE_INT_CST_HIGH (t) != 0)
+ BAD ("malformed '#pragma optimization_level [GCC] {0|1|2|3|reset}', ignoring");
+
+ push_opt_level (optimize, optimize_size);
+ optimize = TREE_INT_CST_LOW (t);
+ if (optimize > 3)
+ optimize = 3;
+ optimize_size = 0;
+ }
+ else
+ BAD ("malformed '#pragma optimization_level [GCC] {0|1|2|3|reset}', ignoring");
+
+ /* APPLE LOCAL begin 4760857 optimization pragmas */
+ darwin_set_flags_from_pragma ();
+ /* APPLE LOCAL end 4760857 optimization pragmas */
+
+ if (pragma_lex (&t) != CPP_EOF)
+ BAD ("junk at end of '#pragma optimization_level'");
+}
+
+void
+darwin_pragma_opt_size (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ const char* arg;
+ tree t;
+
+ if (pragma_lex (&t) != CPP_NAME)
+ BAD ("malformed '#pragma optimize_for_size { on | off | reset}', ignoring");
+ arg = IDENTIFIER_POINTER (t);
+
+ if (!strcmp (arg, "on"))
+ {
+ push_opt_level (optimize, optimize_size);
+ optimize_size = 1;
+ optimize = 2;
+ }
+ else if (!strcmp (arg, "off"))
+ /* Not clear what this should do exactly. CW does not do a pop so
+ we don't either. */
+ optimize_size = 0;
+ else if (!strcmp (arg, "reset"))
+ pop_opt_level ();
+ else
+ BAD ("malformed '#pragma optimize_for_size { on | off | reset }', ignoring");
+
+ /* APPLE LOCAL begin 4760857 optimization pragmas */
+ darwin_set_flags_from_pragma ();
+ /* APPLE LOCAL end 4760857 optimization pragmas */
+
+ if (pragma_lex (&t) != CPP_EOF)
+ BAD ("junk at end of '#pragma optimize_for_size'");
+}
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
+
+static struct {
+ size_t len;
+ const char *name;
+ cpp_dir* dir;
+} *frameworks_in_use;
+static int num_frameworks = 0;
+static int max_frameworks = 0;
+
+
+/* Remember which frameworks have been seen, so that we can ensure
+ that all uses of that framework come from the same framework. DIR
+ is the place where the named framework NAME, which is of length
+ LEN, was found. We copy the directory name from NAME, as it will be
+ freed by others. */
+
+static void
+add_framework (const char *name, size_t len, cpp_dir *dir)
+{
+ char *dir_name;
+ int i;
+ for (i = 0; i < num_frameworks; ++i)
+ {
+ if (len == frameworks_in_use[i].len
+ && strncmp (name, frameworks_in_use[i].name, len) == 0)
+ {
+ return;
+ }
+ }
+ if (i >= max_frameworks)
+ {
+ max_frameworks = i*2;
+ max_frameworks += i == 0;
+ frameworks_in_use = xrealloc (frameworks_in_use,
+ max_frameworks*sizeof(*frameworks_in_use));
+ }
+ dir_name = XNEWVEC (char, len + 1);
+ memcpy (dir_name, name, len);
+ dir_name[len] = '\0';
+ frameworks_in_use[num_frameworks].name = dir_name;
+ frameworks_in_use[num_frameworks].len = len;
+ frameworks_in_use[num_frameworks].dir = dir;
+ ++num_frameworks;
+}
+
+/* Recall if we have seen the named framework NAME, before, and where
+ we saw it. NAME is LEN bytes long. The return value is the place
+ where it was seen before. */
+
+static struct cpp_dir*
+find_framework (const char *name, size_t len)
+{
+ int i;
+ for (i = 0; i < num_frameworks; ++i)
+ {
+ if (len == frameworks_in_use[i].len
+ && strncmp (name, frameworks_in_use[i].name, len) == 0)
+ {
+ return frameworks_in_use[i].dir;
+ }
+ }
+ return 0;
+}
+
+/* There are two directories in a framework that contain header files,
+ Headers and PrivateHeaders. We search Headers first as it is more
+ common to upgrade a header from PrivateHeaders to Headers and when
+ that is done, the old one might hang around and be out of data,
+ causing grief. */
+
+struct framework_header {const char * dirName; int dirNameLen; };
+static struct framework_header framework_header_dirs[] = {
+ { "Headers", 7 },
+ { "PrivateHeaders", 14 },
+ { NULL, 0 }
+};
+
+/* Returns a pointer to a malloced string that contains the real pathname
+ to the file, given the base name and the name. */
+
+static char *
+framework_construct_pathname (const char *fname, cpp_dir *dir)
+{
+ char *buf;
+ size_t fname_len, frname_len;
+ cpp_dir *fast_dir;
+ char *frname;
+ struct stat st;
+ int i;
+
+ /* Framework names must have a / in them. */
+ buf = strchr (fname, '/');
+ if (buf)
+ fname_len = buf - fname;
+ else
+ return 0;
+
+ fast_dir = find_framework (fname, fname_len);
+
+ /* Framework includes must all come from one framework. */
+ if (fast_dir && dir != fast_dir)
+ return 0;
+
+ frname = XNEWVEC (char, strlen (fname) + dir->len + 2
+ + strlen(".framework/") + strlen("PrivateHeaders"));
+ strncpy (&frname[0], dir->name, dir->len);
+ frname_len = dir->len;
+ if (frname_len && frname[frname_len-1] != '/')
+ frname[frname_len++] = '/';
+ strncpy (&frname[frname_len], fname, fname_len);
+ frname_len += fname_len;
+ strncpy (&frname[frname_len], ".framework/", strlen (".framework/"));
+ frname_len += strlen (".framework/");
+
+ if (fast_dir == 0)
+ {
+ frname[frname_len-1] = 0;
+ if (stat (frname, &st) == 0)
+ {
+ /* As soon as we find the first instance of the framework,
+ we stop and never use any later instance of that
+ framework. */
+ add_framework (fname, fname_len, dir);
+ }
+ else
+ {
+ /* If we can't find the parent directory, no point looking
+ further. */
+ free (frname);
+ return 0;
+ }
+ frname[frname_len-1] = '/';
+ }
+
+ /* Append framework_header_dirs and header file name */
+ for (i = 0; framework_header_dirs[i].dirName; i++)
+ {
+ strncpy (&frname[frname_len],
+ framework_header_dirs[i].dirName,
+ framework_header_dirs[i].dirNameLen);
+ strcpy (&frname[frname_len + framework_header_dirs[i].dirNameLen],
+ &fname[fname_len]);
+
+ if (stat (frname, &st) == 0)
+ return frname;
+ }
+
+ free (frname);
+ return 0;
+}
+
+/* Search for FNAME in sub-frameworks. pname is the context that we
+ wish to search in. Return the path the file was found at,
+ otherwise return 0. */
+
+static const char*
+find_subframework_file (const char *fname, const char *pname)
+{
+ char *sfrname;
+ const char *dot_framework = ".framework/";
+ char *bufptr;
+ int sfrname_len, i, fname_len;
+ struct cpp_dir *fast_dir;
+ static struct cpp_dir subframe_dir;
+ struct stat st;
+
+ bufptr = strchr (fname, '/');
+
+ /* Subframework files must have / in the name. */
+ if (bufptr == 0)
+ return 0;
+
+ fname_len = bufptr - fname;
+ fast_dir = find_framework (fname, fname_len);
+
+ /* Sub framework header filename includes parent framework name and
+ header name in the "CarbonCore/OSUtils.h" form. If it does not
+ include slash it is not a sub framework include. */
+ bufptr = strstr (pname, dot_framework);
+
+ /* If the parent header is not of any framework, then this header
+ cannot be part of any subframework. */
+ if (!bufptr)
+ return 0;
+
+ /* Now translate. For example, +- bufptr
+ fname = CarbonCore/OSUtils.h |
+ pname = /System/Library/Frameworks/Foundation.framework/Headers/Foundation.h
+ into
+ sfrname = /System/Library/Frameworks/Foundation.framework/Frameworks/CarbonCore.framework/Headers/OSUtils.h */
+
+ sfrname = XNEWVEC (char, strlen (pname) + strlen (fname) + 2 +
+ strlen ("Frameworks/") + strlen (".framework/")
+ + strlen ("PrivateHeaders"));
+
+ bufptr += strlen (dot_framework);
+
+ sfrname_len = bufptr - pname;
+
+ strncpy (&sfrname[0], pname, sfrname_len);
+
+ strncpy (&sfrname[sfrname_len], "Frameworks/", strlen ("Frameworks/"));
+ sfrname_len += strlen("Frameworks/");
+
+ strncpy (&sfrname[sfrname_len], fname, fname_len);
+ sfrname_len += fname_len;
+
+ strncpy (&sfrname[sfrname_len], ".framework/", strlen (".framework/"));
+ sfrname_len += strlen (".framework/");
+
+ /* Append framework_header_dirs and header file name */
+ for (i = 0; framework_header_dirs[i].dirName; i++)
+ {
+ strncpy (&sfrname[sfrname_len],
+ framework_header_dirs[i].dirName,
+ framework_header_dirs[i].dirNameLen);
+ strcpy (&sfrname[sfrname_len + framework_header_dirs[i].dirNameLen],
+ &fname[fname_len]);
+
+ if (stat (sfrname, &st) == 0)
+ {
+ if (fast_dir != &subframe_dir)
+ {
+ if (fast_dir)
+ warning (0, "subframework include %s conflicts with framework include",
+ fname);
+ else
+ add_framework (fname, fname_len, &subframe_dir);
+ }
+
+ return sfrname;
+ }
+ }
+ free (sfrname);
+
+ return 0;
+}
+
+/* Add PATH to the system includes. PATH must be malloc-ed and
+ NUL-terminated. System framework paths are C++ aware. */
+
+static void
+add_system_framework_path (char *path)
+{
+ int cxx_aware = 1;
+ cpp_dir *p;
+
+ p = XNEW (cpp_dir);
+ p->next = NULL;
+ p->name = path;
+ p->sysp = 1 + !cxx_aware;
+ p->construct = framework_construct_pathname;
+ using_frameworks = 1;
+
+ add_cpp_dir_path (p, SYSTEM);
+}
+
+/* Add PATH to the bracket includes. PATH must be malloc-ed and
+ NUL-terminated. */
+
+void
+add_framework_path (char *path)
+{
+ cpp_dir *p;
+
+ p = XNEW (cpp_dir);
+ p->next = NULL;
+ p->name = path;
+ p->sysp = 0;
+ p->construct = framework_construct_pathname;
+ using_frameworks = 1;
+
+ add_cpp_dir_path (p, BRACKET);
+}
+
+static const char *framework_defaults [] =
+ {
+ "/System/Library/Frameworks",
+ "/Library/Frameworks",
+ };
+
+/* Register the GNU objective-C runtime include path if STDINC. */
+
+void
+darwin_register_objc_includes (const char *sysroot, const char *iprefix,
+ int stdinc)
+{
+ const char *fname;
+ size_t len;
+ /* We do not do anything if we do not want the standard includes. */
+ if (!stdinc)
+ return;
+
+ fname = GCC_INCLUDE_DIR "-gnu-runtime";
+
+ /* Register the GNU OBJC runtime include path if we are compiling OBJC
+ with GNU-runtime. */
+
+ if (c_dialect_objc () && !flag_next_runtime)
+ {
+ char *str;
+ /* See if our directory starts with the standard prefix.
+ "Translate" them, i.e. replace /usr/local/lib/gcc... with
+ IPREFIX and search them first. */
+ if (iprefix && (len = cpp_GCC_INCLUDE_DIR_len) != 0 && !sysroot
+ && !strncmp (fname, cpp_GCC_INCLUDE_DIR, len))
+ {
+ str = concat (iprefix, fname + len, NULL);
+ /* FIXME: wrap the headers for C++awareness. */
+ add_path (str, SYSTEM, /*c++aware=*/false, false);
+ }
+
+ /* Should this directory start with the sysroot? */
+ if (sysroot)
+ str = concat (sysroot, fname, NULL);
+ else
+ str = update_path (fname, "");
+
+ add_path (str, SYSTEM, /*c++aware=*/false, false);
+ }
+}
+
+
+/* Register all the system framework paths if STDINC is true and setup
+ the missing_header callback for subframework searching if any
+ frameworks had been registered. */
+
+void
+darwin_register_frameworks (const char *sysroot,
+ const char *iprefix ATTRIBUTE_UNUSED, int stdinc)
+{
+ if (stdinc)
+ {
+ size_t i;
+
+ /* Setup default search path for frameworks. */
+ for (i=0; i<sizeof (framework_defaults)/sizeof(const char *); ++i)
+ {
+ char *str;
+ if (sysroot)
+ str = concat (sysroot, xstrdup (framework_defaults [i]), NULL);
+ else
+ str = xstrdup (framework_defaults[i]);
+ /* System Framework headers are cxx aware. */
+ add_system_framework_path (str);
+ }
+ }
+
+ if (using_frameworks)
+ cpp_get_callbacks (parse_in)->missing_header = find_subframework_header;
+}
+
+/* Search for HEADER in context dependent way. The return value is
+ the malloced name of a header to try and open, if any, or NULL
+ otherwise. This is called after normal header lookup processing
+ fails to find a header. We search each file in the include stack,
+ using FUNC, starting from the most deeply nested include and
+ finishing with the main input file. We stop searching when FUNC
+ returns nonzero. */
+
+static const char*
+find_subframework_header (cpp_reader *pfile, const char *header, cpp_dir **dirp)
+{
+ const char *fname = header;
+ struct cpp_buffer *b;
+ const char *n;
+
+ for (b = cpp_get_buffer (pfile);
+ b && cpp_get_file (b) && cpp_get_path (cpp_get_file (b));
+ b = cpp_get_prev (b))
+ {
+ n = find_subframework_file (fname, cpp_get_path (cpp_get_file (b)));
+ if (n)
+ {
+ /* Logically, the place where we found the subframework is
+ the place where we found the Framework that contains the
+ subframework. This is useful for tracking wether or not
+ we are in a system header. */
+ *dirp = cpp_get_dir (cpp_get_file (b));
+ return n;
+ }
+ }
+
+ return 0;
+}
+
+/* Return the value of darwin_macosx_version_min suitable for the
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ macro,
+ so '10.4.2' becomes 1042.
+ Print a warning if the version number is not known. */
+static const char *
+/* APPLE LOCAL ARM 5683689 */
+macosx_version_as_macro (void)
+{
+ static char result[] = "1000";
+
+ if (strncmp (darwin_macosx_version_min, "10.", 3) != 0)
+ goto fail;
+ if (! ISDIGIT (darwin_macosx_version_min[3]))
+ goto fail;
+ result[2] = darwin_macosx_version_min[3];
+ if (darwin_macosx_version_min[4] != '\0')
+ {
+ if (darwin_macosx_version_min[4] != '.')
+ goto fail;
+ if (! ISDIGIT (darwin_macosx_version_min[5]))
+ goto fail;
+ if (darwin_macosx_version_min[6] != '\0')
+ goto fail;
+ result[3] = darwin_macosx_version_min[5];
+ }
+ else
+ result[3] = '0';
+
+ return result;
+
+ fail:
+ error ("Unknown value %qs of -mmacosx-version-min",
+ darwin_macosx_version_min);
+ return "1000";
+}
+
+/* APPLE LOCAL begin ARM 5683689 */
+/* Return the value of darwin_iphoneos_version_min suitable for the
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ macro. Unlike the
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ macros, minor version
+ numbers are left-zero-padded. e.g., '1.2.3' becomes 10203.
+ The last/third version number (patch level?) is optional, and
+ defaults to '00' if not specified. In the case of a parse error,
+ print a warning and return 10200. */
+static const char *
+iphoneos_version_as_macro (void)
+{
+ static char result[sizeof ("99.99.99") + 1];
+ const char *src_ptr = darwin_iphoneos_version_min;
+ char *result_ptr = &result[0];
+
+ if (! darwin_iphoneos_version_min)
+ goto fail;
+
+ if (! ISDIGIT (*src_ptr))
+ goto fail;
+
+ /* Copy over the major version number. */
+ *result_ptr++ = *src_ptr++;
+
+ if (ISDIGIT (*src_ptr))
+ *result_ptr++ = *src_ptr++;
+
+ if (*src_ptr != '.')
+ goto fail;
+
+ src_ptr++;
+
+ /* Start parsing the minor version number. */
+ if (! ISDIGIT (*src_ptr))
+ goto fail;
+
+ /* Zero-pad a single-digit value, or copy a two-digit value. */
+ *result_ptr++ = ISDIGIT (*(src_ptr + 1)) ? *src_ptr++ : '0';
+ *result_ptr++ = *src_ptr++;
+
+ /* Parse the third version number (patch level?) */
+ if (*src_ptr == '\0')
+ {
+ /* Not present -- default to zeroes. */
+ *result_ptr++ = '0';
+ *result_ptr++ = '0';
+ }
+ else if (*src_ptr == '.')
+ {
+ src_ptr++;
+
+ if (! ISDIGIT (*src_ptr))
+ goto fail;
+
+ /* Zero-pad a single-digit value, or copy a two-digit value. */
+ *result_ptr++ = ISDIGIT (*(src_ptr + 1)) ? *src_ptr++ : '0';
+ *result_ptr++ = *src_ptr++;
+ }
+ else
+ goto fail;
+
+ /* Verify and copy the terminating NULL. */
+ if (*src_ptr != '\0')
+ goto fail;
+
+ *result_ptr++ = '\0';
+ return result;
+
+ fail:
+ error ("Unknown value %qs of -miphoneos-version-min",
+ darwin_iphoneos_version_min);
+ return "10200";
+}
+/* APPLE LOCAL end ARM 5683689 */
+
+/* Define additional CPP flags for Darwin. */
+
+#define builtin_define(TXT) cpp_define (pfile, TXT)
+
+void
+darwin_cpp_builtins (cpp_reader *pfile)
+{
+ builtin_define ("__MACH__");
+ builtin_define ("__APPLE__");
+
+ /* APPLE LOCAL Apple version */
+ /* Don't define __APPLE_CC__ here. */
+
+ /* APPLE LOCAL begin ARM 5683689 */
+ if (darwin_macosx_version_min)
+ builtin_define_with_value ("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__",
+ macosx_version_as_macro(), false);
+ else
+ builtin_define_with_value ("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
+ iphoneos_version_as_macro(), false);
+ /* APPLE LOCAL end ARM 5683689 */
+
+ /* APPLE LOCAL begin constant cfstrings */
+ if (darwin_constant_cfstrings)
+ builtin_define ("__CONSTANT_CFSTRINGS__");
+ /* APPLE LOCAL end constant cfstrings */
+ /* APPLE LOCAL begin pascal strings */
+ if (darwin_pascal_strings)
+ {
+ builtin_define ("__PASCAL_STRINGS__");
+ }
+ /* APPLE LOCAL end pascal strings */
+ /* APPLE LOCAL begin ObjC GC */
+ /* APPLE LOCAL radar 5914395 */
+ if (flag_objc_gc || flag_objc_gc_only)
+ {
+ builtin_define ("__strong=__attribute__((objc_gc(strong)))");
+ builtin_define ("__weak=__attribute__((objc_gc(weak)))");
+ builtin_define ("__OBJC_GC__");
+ }
+ else
+ {
+ builtin_define ("__strong=");
+ /* APPLE LOCAL radar 5847976 */
+ builtin_define ("__weak=__attribute__((objc_gc(weak)))");
+ }
+ /* APPLE LOCAL end ObjC GC */
+ /* APPLE LOCAL begin radar 5932809 - copyable byref blocks */
+ if (flag_blocks) {
+ builtin_define ("__block=__attribute__((__blocks__(byref)))");
+ }
+ /* APPLE LOCAL radar 6230656 */
+ /* code removed */
+ /* APPLE LOCAL end radar 5932809 - copyable byref blocks */
+
+ /* APPLE LOCAL begin C* warnings to easy porting to new abi */
+ if (flag_objc_abi == 2)
+ builtin_define ("__OBJC2__");
+ /* APPLE LOCAL end C* warnings to easy porting to new abi */
+ /* APPLE LOCAL begin radar 5072864 */
+ if (flag_objc_zerocost_exceptions)
+ builtin_define ("OBJC_ZEROCOST_EXCEPTIONS");
+ /* APPLE LOCAL radar 4899595 */
+ builtin_define ("OBJC_NEW_PROPERTIES");
+ /* APPLE LOCAL end radar 5072864 */
+/* APPLE LOCAL begin confused diff */
+}
+/* APPLE LOCAL end confused diff */
+/* APPLE LOCAL begin iframework for 4.3 4094959 */
+bool
+darwin_handle_c_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ default:
+ /* Options with a flag are otherwise assumed to be handled. */
+ if (cl_options[code].flag_var)
+ break;
+
+ /* Unrecognized options that we said we'd handle turn into
+ errors if not listed here if they don't have a flag. */
+ return false;
+
+ case OPT_iframework:
+ add_system_framework_path (xstrdup (arg));
+ break;
+ }
+ return true;
+}
+/* APPLE LOCAL end iframework for 4.3 4094959 */
+
+/* APPLE LOCAL begin radar 4985544 - radar 5096648 - radar 5195402 */
+/* Check that TYPE is CFStringRef type. */
+bool
+objc_check_cfstringref_type (tree type)
+{
+ tree CFStringRef_decl = lookup_name (get_identifier ("CFStringRef"));
+ if (!CFStringRef_decl || TREE_CODE (CFStringRef_decl) != TYPE_DECL)
+ return false;
+ return type == TREE_TYPE (CFStringRef_decl);
+}
+
+/* This routine checks that FORMAT_NUM'th argument ARGUMENT has the 'CFStringRef' type. */
+bool
+objc_check_format_cfstring (tree argument,
+ unsigned HOST_WIDE_INT format_num,
+ bool *no_add_attrs)
+{
+ unsigned HOST_WIDE_INT i;
+ /* APPLE LOCAL begin 6212507 */
+ if (format_num < 1)
+ {
+ error ("argument number of CFString format cannot be less than one");
+ return false;
+ }
+ /* APPLE LOCAL end 6212507 */
+ for (i = 1; i != format_num; i++)
+ {
+ if (argument == 0)
+ break;
+ argument = TREE_CHAIN (argument);
+ }
+
+ if (!objc_check_cfstringref_type (TREE_VALUE (argument)))
+ {
+ error ("format CFString argument not an 'CFStringRef' type");
+ *no_add_attrs = true;
+ return false;
+ }
+ return true;
+}
+/* APPLE LOCAL end radar 4985544 - radar 5096648 - radar 5195402 */
+
+/* APPLE LOCAL begin radar 2996215 - 6068877 */
+/* wrapper to call libcpp's conversion routine. */
+bool
+cvt_utf8_utf16 (const unsigned char *inbuf, size_t length,
+ unsigned char **uniCharBuf, size_t *numUniChars)
+{
+ return cpp_utf8_utf16 (parse_in, inbuf, length, uniCharBuf, numUniChars);
+}
+/* This routine declares static char __utf16_string [numUniChars] in __TEXT,__ustring
+ section and initializes it with uniCharBuf[numUniChars] characters.
+*/
+tree
+create_init_utf16_var (const unsigned char *inbuf, size_t length, size_t *numUniChars)
+{
+ size_t l;
+ tree decl, type, init;
+ tree initlist = NULL_TREE;
+ tree attribute;
+ const char *section_name = "__TEXT,__ustring";
+ int len = strlen (section_name);
+ unsigned char *uniCharBuf;
+ static int num;
+ const char *name_prefix = "__utf16_string_";
+ char *name;
+ int embedNull = 0;
+
+ if (!cvt_utf8_utf16 (inbuf, length, &uniCharBuf, numUniChars))
+ return NULL_TREE;
+
+ /* APPLE LOCAL begin 7589850 */
+ /* ustring with embedded null should go into __const. It should not be forced
+ into "__TEXT,__ustring" section. */
+ for (l = 0; l < length; l++) {
+ if (!inbuf[l]) {
+ embedNull = 1;
+ break;
+ }
+ }
+ /* APPLE LOCAL end 7589850 */
+
+ for (l = 0; l < *numUniChars; l++)
+ initlist = tree_cons (NULL_TREE, build_int_cst (char_type_node, uniCharBuf[l]), initlist);
+ type = build_array_type (char_type_node,
+ build_index_type (build_int_cst (NULL_TREE, *numUniChars)));
+ name = (char *)alloca (strlen (name_prefix) + 10);
+ sprintf (name, "%s%d", name_prefix, ++num);
+ decl = build_decl (VAR_DECL, get_identifier (name), type);
+ TREE_STATIC (decl) = 1;
+ DECL_INITIAL (decl) = error_mark_node; /* A real initializer is coming... */
+ DECL_IGNORED_P (decl) = 1;
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_CONTEXT (decl) = NULL_TREE;
+
+ /* APPLE LOCAL begin 7589850 */
+ if (!embedNull) {
+ attribute = tree_cons (NULL_TREE, build_string (len, section_name), NULL_TREE);
+ attribute = tree_cons (get_identifier ("section"), attribute, NULL_TREE);
+ decl_attributes (&decl, attribute, 0);
+ }
+ /* APPLE LOCAL end 7589850 */
+ attribute = tree_cons (NULL_TREE, build_int_cst (NULL_TREE, 2), NULL_TREE);
+ attribute = tree_cons (get_identifier ("aligned"), attribute, NULL_TREE);
+ decl_attributes (&decl, attribute, 0);
+ init = build_constructor_from_list (type, nreverse (initlist));
+ TREE_CONSTANT (init) = 1;
+ TREE_STATIC (init) = 1;
+ TREE_READONLY (init) = 1;
+ if (c_dialect_cxx ())
+ TREE_TYPE (init) = NULL_TREE;
+ finish_decl (decl, init, NULL_TREE);
+ /* Ensure that the variable actually gets output. */
+ mark_decl_referenced (decl);
+ /* Mark the decl to avoid "defined but not used" warning. */
+ TREE_USED (decl) = 1;
+ return decl;
+}
+/* APPLE LOCAL end radar 2996215 - 6068877 */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-crt2.c b/gcc-4.2.1-5666.3/gcc/config/darwin-crt2.c
new file mode 100644
index 000000000..69408d389
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-crt2.c
@@ -0,0 +1,158 @@
+/* KeyMgr backwards-compatibility support for Darwin.
+ Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* It is incorrect to include config.h here, because this file is being
+ compiled for the target, and hence definitions concerning only the host
+ do not apply. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+
+/* This file doesn't do anything useful on non-powerpc targets, since they
+ don't have backwards compatibility anyway. */
+
+#ifdef __ppc__
+
+/* Homemade decls substituting for getsect.h and dyld.h, so cross
+ compilation works. */
+struct mach_header;
+extern char *getsectdatafromheader (struct mach_header *, const char *,
+ const char *, unsigned long *);
+extern void _dyld_register_func_for_add_image
+ (void (*) (struct mach_header *, unsigned long));
+extern void _dyld_register_func_for_remove_image
+ (void (*) (struct mach_header *, unsigned long));
+
+extern void __darwin_gcc3_preregister_frame_info (void);
+
+/* These are from "keymgr.h". */
+extern void _init_keymgr (void);
+extern void *_keymgr_get_and_lock_processwide_ptr (unsigned key);
+extern void _keymgr_set_and_unlock_processwide_ptr (unsigned key, void *ptr);
+
+extern void *__keymgr_global[];
+typedef struct _Sinfo_Node {
+ unsigned int size ; /*size of this node*/
+ unsigned short major_version ; /*API major version.*/
+ unsigned short minor_version ; /*API minor version.*/
+ } _Tinfo_Node ;
+
+/* KeyMgr 3.x is the first one supporting GCC3 stuff natively. */
+#define KEYMGR_API_MAJOR_GCC3 3
+/* ... with these keys. */
+#define KEYMGR_GCC3_LIVE_IMAGE_LIST 301 /* loaded images */
+#define KEYMGR_GCC3_DW2_OBJ_LIST 302 /* Dwarf2 object list */
+
+/* Node of KEYMGR_GCC3_LIVE_IMAGE_LIST. Info about each resident image. */
+struct live_images {
+ unsigned long this_size; /* sizeof (live_images) */
+ struct mach_header *mh; /* the image info */
+ unsigned long vm_slide;
+ void (*destructor)(struct live_images *); /* destructor for this */
+ struct live_images *next;
+ unsigned int examined_p;
+ void *fde;
+ void *object_info;
+ unsigned long info[2]; /* Future use. */
+};
+
+
+/* These routines are used only on Darwin versions before 10.2.
+ Later versions have equivalent code in the system.
+ Eventually, they might go away, although it might be a long time... */
+
+static void darwin_unwind_dyld_remove_image_hook
+ (struct mach_header *m, unsigned long s);
+static void darwin_unwind_dyld_remove_image_hook
+ (struct mach_header *m, unsigned long s);
+extern void __darwin_gcc3_preregister_frame_info (void);
+
+static void
+darwin_unwind_dyld_add_image_hook (struct mach_header *mh, unsigned long slide)
+{
+ struct live_images *l = (struct live_images *)calloc (1, sizeof (*l));
+ l->mh = mh;
+ l->vm_slide = slide;
+ l->this_size = sizeof (*l);
+ l->next = (struct live_images *)
+ _keymgr_get_and_lock_processwide_ptr (KEYMGR_GCC3_LIVE_IMAGE_LIST);
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_GCC3_LIVE_IMAGE_LIST, l);
+}
+
+static void
+darwin_unwind_dyld_remove_image_hook (struct mach_header *m, unsigned long s)
+{
+ struct live_images *top, **lip, *destroy = NULL;
+
+ /* Look for it in the list of live images and delete it. */
+
+ top = (struct live_images *)
+ _keymgr_get_and_lock_processwide_ptr (KEYMGR_GCC3_LIVE_IMAGE_LIST);
+ for (lip = &top; *lip != NULL; lip = &(*lip)->next)
+ {
+ if ((*lip)->mh == m && (*lip)->vm_slide == s)
+ {
+ destroy = *lip;
+ *lip = destroy->next; /* unlink DESTROY */
+
+ if (destroy->this_size != sizeof (*destroy)) /* sanity check */
+ abort ();
+
+ break;
+ }
+ }
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_GCC3_LIVE_IMAGE_LIST, top);
+
+ /* Now that we have unlinked this from the image list, toss it. */
+ if (destroy != NULL)
+ {
+ if (destroy->destructor != NULL)
+ (*destroy->destructor) (destroy);
+ free (destroy);
+ }
+}
+
+void
+__darwin_gcc3_preregister_frame_info (void)
+{
+ const _Tinfo_Node *info;
+ _init_keymgr ();
+ info = (_Tinfo_Node *)__keymgr_global[2];
+ if (info != NULL)
+ {
+ if (info->major_version >= KEYMGR_API_MAJOR_GCC3)
+ return;
+ /* Otherwise, use our own add_image_hooks. */
+ }
+
+ _dyld_register_func_for_add_image (darwin_unwind_dyld_add_image_hook);
+ _dyld_register_func_for_remove_image (darwin_unwind_dyld_remove_image_hook);
+}
+
+#endif /* __ppc__ */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-crt3.c b/gcc-4.2.1-5666.3/gcc/config/darwin-crt3.c
new file mode 100644
index 000000000..ac5e0398f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-crt3.c
@@ -0,0 +1,537 @@
+/* __cxa_atexit backwards-compatibility support for Darwin.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* Don't do anything if we are compiling for a kext multilib. */
+#ifdef __PIC__
+
+/* It is incorrect to include config.h here, because this file is being
+ compiled for the target, and hence definitions concerning only the host
+ do not apply. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+
+#include <dlfcn.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* This file works around two different problems.
+
+ The first problem is that there is no __cxa_atexit on Mac OS versions
+ before 10.4. It fixes this by providing a complete atexit and
+ __cxa_atexit emulation called from the regular atexit.
+
+ The second problem is that on all shipping versions of Mac OS,
+ __cxa_finalize and exit() don't work right: they don't run routines
+ that were registered while other atexit routines are running. This
+ is worked around by wrapping each atexit/__cxa_atexit routine with
+ our own routine which ensures that any __cxa_atexit calls while it
+ is running are honoured.
+
+ There are still problems which this does not solve. Before 10.4,
+ shared objects linked with previous compilers won't have their
+ atexit calls properly interleaved with code compiled with newer
+ compilers. Also, atexit routines registered from shared objects
+ linked with previous compilers won't get the bug fix. */
+
+typedef int (*cxa_atexit_p)(void (*func) (void*), void* arg, const void* dso);
+typedef void (*cxa_finalize_p)(const void *dso);
+typedef int (*atexit_p)(void (*func)(void));
+
+/* These are from "keymgr.h". */
+extern void *_keymgr_get_and_lock_processwide_ptr (unsigned key);
+extern int _keymgr_get_and_lock_processwide_ptr_2 (unsigned, void **);
+extern int _keymgr_set_and_unlock_processwide_ptr (unsigned key, void *ptr);
+
+extern void *__keymgr_global[];
+typedef struct _Sinfo_Node {
+ unsigned int size ; /*size of this node*/
+ unsigned short major_version ; /*API major version.*/
+ unsigned short minor_version ; /*API minor version.*/
+ } _Tinfo_Node ;
+
+#ifdef __ppc__
+#define CHECK_KEYMGR_ERROR(e) \
+ (((_Tinfo_Node *)__keymgr_global[2])->major_version >= 4 ? (e) : 0)
+#else
+#define CHECK_KEYMGR_ERROR(e) (e)
+#endif
+
+/* Our globals are stored under this keymgr index. */
+#define KEYMGR_ATEXIT_LIST 14
+
+/* The different kinds of callback routines. */
+typedef void (*atexit_callback)(void);
+typedef void (*cxa_atexit_callback)(void *);
+
+/* This structure holds a routine to call. There may be extra fields
+ at the end of the structure that this code doesn't know about. */
+struct one_atexit_routine
+{
+ union {
+ atexit_callback ac;
+ cxa_atexit_callback cac;
+ } callback;
+ /* has_arg is 0/2/4 if 'ac' is live, 1/3/5 if 'cac' is live.
+ Higher numbers indicate a later version of the structure that this
+ code doesn't understand and will ignore. */
+ int has_arg;
+ void * arg;
+};
+
+struct atexit_routine_list
+{
+ struct atexit_routine_list * next;
+ struct one_atexit_routine r;
+};
+
+/* The various possibilities for status of atexit(). */
+enum atexit_status {
+ atexit_status_unknown = 0,
+ atexit_status_missing = 1,
+ atexit_status_broken = 2,
+ atexit_status_working = 16
+};
+
+struct keymgr_atexit_list
+{
+ /* Version of this list. This code knows only about version 0.
+ If the version is higher than 0, this code may add new atexit routines
+ but should not attempt to run the list. */
+ short version;
+ /* 1 if an atexit routine is currently being run by this code, 0
+ otherwise. */
+ char running_routines;
+ /* Holds a value from 'enum atexit_status'. */
+ unsigned char atexit_status;
+ /* The list of atexit and cxa_atexit routines registered. If
+ atexit_status_missing it contains all routines registered while
+ linked with this code. If atexit_status_broken it contains all
+ routines registered during cxa_finalize while linked with this
+ code. */
+ struct atexit_routine_list *l;
+ /* &__cxa_atexit; set if atexit_status >= atexit_status_broken. */
+ cxa_atexit_p cxa_atexit_f;
+ /* &__cxa_finalize; set if atexit_status >= atexit_status_broken. */
+ cxa_finalize_p cxa_finalize_f;
+ /* &atexit; set if atexit_status >= atexit_status_working
+ or atexit_status == atexit_status_missing. */
+ atexit_p atexit_f;
+};
+
+/* Return 0 if __cxa_atexit has the bug it has in Mac OS 10.4: it
+ fails to call routines registered while an atexit routine is
+ running. Return 1 if it works properly, and -1 if an error occurred. */
+
+struct atexit_data
+{
+ int result;
+ cxa_atexit_p cxa_atexit;
+};
+
+static void cxa_atexit_check_2 (void *arg)
+{
+ ((struct atexit_data *)arg)->result = 1;
+}
+
+static void cxa_atexit_check_1 (void *arg)
+{
+ struct atexit_data * aed = arg;
+ if (aed->cxa_atexit (cxa_atexit_check_2, arg, arg) != 0)
+ aed->result = -1;
+}
+
+static int
+check_cxa_atexit (cxa_atexit_p cxa_atexit, cxa_finalize_p cxa_finalize)
+{
+ struct atexit_data aed = { 0, cxa_atexit };
+
+ /* We re-use &aed as the 'dso' parameter, since it's a unique address. */
+ if (cxa_atexit (cxa_atexit_check_1, &aed, &aed) != 0)
+ return -1;
+ cxa_finalize (&aed);
+ if (aed.result == 0)
+ {
+ /* Call __cxa_finalize again to make sure that cxa_atexit_check_2
+ is removed from the list before AED goes out of scope. */
+ cxa_finalize (&aed);
+ aed.result = 0;
+ }
+ return aed.result;
+}
+
+#ifdef __ppc__
+/* This comes from Csu. It works only before 10.4. The prototype has
+ been altered a bit to avoid casting. */
+extern int _dyld_func_lookup(const char *dyld_func_name,
+ void *address) __attribute__((visibility("hidden")));
+
+static void our_atexit (void);
+
+/* We're running on 10.3.9. Find the address of the system atexit()
+ function. So easy to say, so hard to do. */
+static atexit_p
+find_atexit_10_3 (void)
+{
+ unsigned int (*dyld_image_count_fn)(void);
+ const char *(*dyld_get_image_name_fn)(unsigned int image_index);
+ const void *(*dyld_get_image_header_fn)(unsigned int image_index);
+ const void *(*NSLookupSymbolInImage_fn)(const void *image,
+ const char *symbolName,
+ unsigned int options);
+ void *(*NSAddressOfSymbol_fn)(const void *symbol);
+ unsigned i, count;
+
+ /* Find some dyld functions. */
+ _dyld_func_lookup("__dyld_image_count", &dyld_image_count_fn);
+ _dyld_func_lookup("__dyld_get_image_name", &dyld_get_image_name_fn);
+ _dyld_func_lookup("__dyld_get_image_header", &dyld_get_image_header_fn);
+ _dyld_func_lookup("__dyld_NSLookupSymbolInImage", &NSLookupSymbolInImage_fn);
+ _dyld_func_lookup("__dyld_NSAddressOfSymbol", &NSAddressOfSymbol_fn);
+
+ /* If any of these don't exist, that's an error. */
+ if (! dyld_image_count_fn || ! dyld_get_image_name_fn
+ || ! dyld_get_image_header_fn || ! NSLookupSymbolInImage_fn
+ || ! NSAddressOfSymbol_fn)
+ return NULL;
+
+ count = dyld_image_count_fn ();
+ for (i = 0; i < count; i++)
+ {
+ const char * path = dyld_get_image_name_fn (i);
+ const void * image;
+ const void * symbol;
+
+ if (strcmp (path, "/usr/lib/libSystem.B.dylib") != 0)
+ continue;
+ image = dyld_get_image_header_fn (i);
+ if (! image)
+ return NULL;
+ /* '4' is NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR. */
+ symbol = NSLookupSymbolInImage_fn (image, "_atexit", 4);
+ if (! symbol)
+ return NULL;
+ return NSAddressOfSymbol_fn (symbol);
+ }
+ return NULL;
+}
+#endif
+
+/* Create (if necessary), find, lock, fill in, and return our globals.
+ Return NULL on error, in which case the globals will not be locked.
+ The caller should call keymgr_set_and_unlock. */
+static struct keymgr_atexit_list *
+get_globals (void)
+{
+ struct keymgr_atexit_list * r;
+
+#ifdef __ppc__
+ /* 10.3.9 doesn't have _keymgr_get_and_lock_processwide_ptr_2 so the
+ PPC side can't use it. On 10.4 this just means the error gets
+ reported a little later when
+ _keymgr_set_and_unlock_processwide_ptr finds that the key was
+ never locked. */
+ r = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
+#else
+ void * rr;
+ if (_keymgr_get_and_lock_processwide_ptr_2 (KEYMGR_ATEXIT_LIST, &rr))
+ return NULL;
+ r = rr;
+#endif
+
+ if (r == NULL)
+ {
+ r = calloc (sizeof (struct keymgr_atexit_list), 1);
+ if (! r)
+ return NULL;
+ }
+
+ if (r->atexit_status == atexit_status_unknown)
+ {
+ void *handle;
+
+ handle = dlopen ("/usr/lib/libSystem.B.dylib", RTLD_NOLOAD);
+ if (!handle)
+ {
+#ifdef __ppc__
+ r->atexit_status = atexit_status_missing;
+ r->atexit_f = find_atexit_10_3 ();
+ if (! r->atexit_f)
+ goto error;
+ if (r->atexit_f (our_atexit))
+ goto error;
+#else
+ goto error;
+#endif
+ }
+ else
+ {
+ int chk_result;
+
+ r->cxa_atexit_f = (cxa_atexit_p)dlsym (handle, "__cxa_atexit");
+ r->cxa_finalize_f = (cxa_finalize_p)dlsym (handle, "__cxa_finalize");
+ if (! r->cxa_atexit_f || ! r->cxa_finalize_f)
+ goto error;
+
+ chk_result = check_cxa_atexit (r->cxa_atexit_f, r->cxa_finalize_f);
+ if (chk_result == -1)
+ goto error;
+ else if (chk_result == 0)
+ r->atexit_status = atexit_status_broken;
+ else
+ {
+ r->atexit_f = (atexit_p)dlsym (handle, "atexit");
+ if (! r->atexit_f)
+ goto error;
+ r->atexit_status = atexit_status_working;
+ }
+ }
+ }
+
+ return r;
+
+ error:
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, r);
+ return NULL;
+}
+
+/* Add TO_ADD to ATEXIT_LIST. ATEXIT_LIST may be NULL but is
+ always the result of calling _keymgr_get_and_lock_processwide_ptr and
+ so KEYMGR_ATEXIT_LIST is known to be locked; this routine is responsible
+ for unlocking it. */
+
+static int
+add_routine (struct keymgr_atexit_list * g,
+ const struct one_atexit_routine * to_add)
+{
+ struct atexit_routine_list * s
+ = malloc (sizeof (struct atexit_routine_list));
+ int result;
+
+ if (!s)
+ {
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+ return -1;
+ }
+ s->r = *to_add;
+ s->next = g->l;
+ g->l = s;
+ result = _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+ return CHECK_KEYMGR_ERROR (result) == 0 ? 0 : -1;
+}
+
+/* This runs the routines in G->L up to STOP. */
+static struct keymgr_atexit_list *
+run_routines (struct keymgr_atexit_list *g,
+ struct atexit_routine_list *stop)
+{
+ for (;;)
+ {
+ struct atexit_routine_list * cur = g->l;
+ if (! cur || cur == stop)
+ break;
+ g->l = cur->next;
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+
+ switch (cur->r.has_arg) {
+ case 0: case 2: case 4:
+ cur->r.callback.ac ();
+ break;
+ case 1: case 3: case 5:
+ cur->r.callback.cac (cur->r.arg);
+ break;
+ default:
+ /* Don't understand, so don't call it. */
+ break;
+ }
+ free (cur);
+
+ g = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
+ if (! g)
+ break;
+ }
+ return g;
+}
+
+/* Call the routine described by ROUTINE_PARAM and then call any
+ routines added to KEYMGR_ATEXIT_LIST while that routine was
+ running, all with in_cxa_finalize set. */
+
+static void
+cxa_atexit_wrapper (void* routine_param)
+{
+ struct one_atexit_routine * routine = routine_param;
+ struct keymgr_atexit_list *g;
+ struct atexit_routine_list * base = NULL;
+ char prev_running = 0;
+
+ g = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
+ if (g)
+ {
+ prev_running = g->running_routines;
+ g->running_routines = 1;
+ base = g->l;
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+ }
+
+ if (routine->has_arg)
+ routine->callback.cac (routine->arg);
+ else
+ routine->callback.ac ();
+
+ if (g)
+ g = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
+ if (g)
+ g = run_routines (g, base);
+ if (g)
+ {
+ g->running_routines = prev_running;
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+ }
+}
+
+#ifdef __ppc__
+/* This code is used while running on 10.3.9, when __cxa_atexit doesn't
+ exist in the system library. 10.3.9 only supported regular PowerPC,
+ so this code isn't necessary on x86 or ppc64. */
+
+/* This routine is called from the system atexit(); it runs everything
+ registered on the KEYMGR_ATEXIT_LIST. */
+
+static void
+our_atexit (void)
+{
+ struct keymgr_atexit_list *g;
+ char prev_running;
+
+ g = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
+ if (! g || g->version != 0 || g->atexit_status != atexit_status_missing)
+ return;
+
+ prev_running = g->running_routines;
+ g->running_routines = 1;
+ g = run_routines (g, NULL);
+ if (! g)
+ return;
+ g->running_routines = prev_running;
+ _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+}
+#endif
+
+/* This is our wrapper around atexit and __cxa_atexit. It will return
+ nonzero if an error occurs, and otherwise:
+ - if in_cxa_finalize is set, or running on 10.3.9, add R to
+ KEYMGR_ATEXIT_LIST; or
+ - call the system __cxa_atexit to add cxa_atexit_wrapper with an argument
+ that indicates how cxa_atexit_wrapper should call R. */
+
+static int
+atexit_common (const struct one_atexit_routine *r, const void *dso)
+{
+ struct keymgr_atexit_list *g = get_globals ();
+
+ if (! g)
+ return -1;
+
+ if (g->running_routines || g->atexit_status == atexit_status_missing)
+ return add_routine (g, r);
+
+ if (g->atexit_status >= atexit_status_working)
+ {
+ int result;
+ if (r->has_arg)
+ {
+ cxa_atexit_p cxa_atexit = g->cxa_atexit_f;
+ result = _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST,
+ g);
+ if (CHECK_KEYMGR_ERROR (result))
+ return -1;
+ return cxa_atexit (r->callback.cac, r->arg, dso);
+ }
+ else
+ {
+ atexit_p atexit_f = g->atexit_f;
+ result = _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST,
+ g);
+ if (CHECK_KEYMGR_ERROR (result))
+ return -1;
+ return atexit_f (r->callback.ac);
+ }
+ }
+ else
+ {
+ cxa_atexit_p cxa_atexit = g->cxa_atexit_f;
+ struct one_atexit_routine *alloced;
+ int result;
+
+ result = _keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
+ if (CHECK_KEYMGR_ERROR (result))
+ return -1;
+
+ alloced = malloc (sizeof (struct one_atexit_routine));
+ if (! alloced)
+ return -1;
+ *alloced = *r;
+ return cxa_atexit (cxa_atexit_wrapper, alloced, dso);
+ }
+}
+
+/* These are the actual replacement routines; they just funnel into
+ atexit_common. */
+
+int __cxa_atexit (cxa_atexit_callback func, void* arg,
+ const void* dso) __attribute__((visibility("hidden")));
+
+int
+__cxa_atexit (cxa_atexit_callback func, void* arg, const void* dso)
+{
+ struct one_atexit_routine r;
+ r.callback.cac = func;
+ r.has_arg = 1;
+ r.arg = arg;
+ return atexit_common (&r, dso);
+}
+
+int atexit (atexit_callback func) __attribute__((visibility("hidden")));
+
+/* Use __dso_handle to allow even bundles that call atexit() to be unloaded
+ on 10.4. */
+extern void __dso_handle;
+
+int
+atexit (atexit_callback func)
+{
+ struct one_atexit_routine r;
+ r.callback.ac = func;
+ r.has_arg = 0;
+ return atexit_common (&r, &__dso_handle);
+}
+
+#endif /* __PIC__ */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-driver.c b/gcc-4.2.1-5666.3/gcc/config/darwin-driver.c
new file mode 100644
index 000000000..3d738e3f5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-driver.c
@@ -0,0 +1,213 @@
+/* APPLE LOCAL file 5235474 5683689 */
+/* Additional functions for the GCC driver on Darwin native.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#ifndef CROSS_DIRECTORY_STRUCTURE
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "gcc.h"
+#include <sys/sysctl.h>
+#include "xregex.h"
+
+#ifndef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) DEFAULT_SWITCH_TAKES_ARG(CHAR)
+#endif
+
+#ifndef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) DEFAULT_WORD_SWITCH_TAKES_ARG (STR)
+#endif
+
+/* This function is used when running on a Darwin system and using that
+ system's headers and libraries. Unless specified otherwise by
+ command-line options or environment variables, this routine will
+ set the appropriate version specification flag to a default value.
+ The version flag used is based on VERS_TYPE, and is either:
+ DARWIN_VERSION_MACOSX to use -mmacosx-version-min and
+ DARWIN_VERSION_IPHONEOS to use -miphoneos-version-min. */
+
+void
+darwin_default_min_version (int * argc_p, char *** argv_p,
+ enum darwin_version_type vers_type)
+{
+ const int argc = *argc_p;
+ char ** const argv = *argv_p;
+ int i;
+ char osversion[32];
+ size_t osversion_len = sizeof (osversion) - 1;
+ static int osversion_name[2] = { CTL_KERN, KERN_OSRELEASE };
+ char * version_p;
+ char * version_pend;
+ int major_vers;
+ char minor_vers[6];
+ static char new_flag[sizeof ("-mxxxxxx-version-min=99.99.99") + 6];
+
+ /* If the command-line is empty, just return. */
+ if (argc <= 1)
+ return;
+ /* Don't do this if the user has specified -b or -V at the start
+ of the command-line. */
+ if (argv[1][0] == '-'
+ && (argv[1][1] == 'V' ||
+ ((argv[1][1] == 'b') && (NULL != strchr(argv[1] + 2,'-')))))
+ return;
+
+ /* Don't do this if the user specified -mmacosx-version-min=,
+ -miphoneos-version-min, -mno-macosx-version-min, or
+ -mno-iphoneos-version-min. */
+ for (i = 1; i < argc; i++)
+ if (argv[i][0] == '-')
+ {
+ const char * const p = argv[i];
+ if (strncmp (p, "-mno-macosx-version-min", 23) == 0
+ || strncmp (p, "-mno-iphoneos-version-min", 25) == 0
+ || strncmp (p, "-mmacosx-version-min", 20) == 0
+ || strncmp (p, "-miphoneos-version-min", 22) == 0)
+ return;
+
+ /* It doesn't count if it's an argument to a different switch. */
+ if (p[0] == '-'
+ && ((SWITCH_TAKES_ARG (p[1]) > (p[2] != 0))
+ || WORD_SWITCH_TAKES_ARG (p + 1)))
+ i++;
+ }
+
+ /* Retrieve the deployment target from the environment and insert
+ it as a flag. */
+ {
+ const char * macosx_deployment_target;
+ const char * iphoneos_deployment_target;
+ bool iphoneos_env_set, macosx_env_set;
+
+ macosx_deployment_target = getenv ("MACOSX_DEPLOYMENT_TARGET");
+ iphoneos_deployment_target = getenv ("IPHONEOS_DEPLOYMENT_TARGET");
+
+ /* We choose to ignore an environment variable set to an empty
+ string. */
+ macosx_env_set = macosx_deployment_target
+ && macosx_deployment_target[0];
+ iphoneos_env_set = iphoneos_deployment_target
+ && iphoneos_deployment_target[0];
+
+ if (macosx_env_set && iphoneos_env_set)
+ {
+ /* Conflicting DEPLOYMENT_TARGETs given. Don't emit a warning
+ for now (see rdar://5819018) -- just choose based on
+ VERS_TYPE. */
+ if (vers_type == DARWIN_VERSION_IPHONEOS)
+ macosx_env_set = 0;
+ else
+ iphoneos_env_set = 0;
+ }
+
+ if (macosx_env_set)
+ {
+ ++*argc_p;
+ *argv_p = xmalloc (sizeof (char *) * *argc_p);
+ (*argv_p)[0] = argv[0];
+ (*argv_p)[1] = concat ("-mmacosx-version-min=",
+ macosx_deployment_target, NULL);
+ memcpy (*argv_p + 2, argv + 1, (argc - 1) * sizeof (char *));
+ return;
+ }
+
+ if (iphoneos_env_set)
+ {
+ ++*argc_p;
+ *argv_p = xmalloc (sizeof (char *) * *argc_p);
+ (*argv_p)[0] = argv[0];
+ (*argv_p)[1] = concat ("-miphoneos-version-min=",
+ iphoneos_deployment_target, NULL);
+ memcpy (*argv_p + 2, argv + 1, (argc - 1) * sizeof (char *));
+ return;
+ }
+ }
+
+ /* For iPhone OS, if no version number is specified, we default to
+ 3.0. */
+ if (vers_type == DARWIN_VERSION_IPHONEOS)
+ {
+ ++*argc_p;
+ *argv_p = xmalloc (sizeof (char *) * *argc_p);
+ (*argv_p)[0] = argv[0];
+ (*argv_p)[1] = xstrdup ("-miphoneos-version-min=3.0");
+ memcpy (*argv_p + 2, argv + 1, (argc - 1) * sizeof (char *));
+ return;
+ }
+
+ gcc_assert (vers_type == DARWIN_VERSION_MACOSX);
+
+ /* Determine the version of the running OS. If we can't, warn user,
+ and do nothing. */
+ if (sysctl (osversion_name, ARRAY_SIZE (osversion_name), osversion,
+ &osversion_len, NULL, 0) == -1)
+ {
+ fprintf (stderr, "sysctl for kern.osversion failed: %s\n",
+ xstrerror (errno));
+ return;
+ }
+
+ /* Try to parse the first two parts of the OS version number. Warn
+ user and return if it doesn't make sense. */
+ if (! ISDIGIT (osversion[0]))
+ goto parse_failed;
+ major_vers = osversion[0] - '0';
+ version_p = osversion + 1;
+ if (ISDIGIT (*version_p))
+ major_vers = major_vers * 10 + (*version_p++ - '0');
+ if (major_vers > 4 + 9)
+ goto parse_failed;
+ if (*version_p++ != '.')
+ goto parse_failed;
+ version_pend = strchr(version_p, '.');
+ if (!version_pend)
+ goto parse_failed;
+ if (! ISDIGIT (*version_p))
+ goto parse_failed;
+ strncpy(minor_vers, version_p, version_pend - version_p);
+ minor_vers[version_pend - version_p] = '\0';
+
+ /* The major kernel version number is 4 plus the second OS version
+ component. */
+ if (major_vers - 4 <= 4)
+ /* On 10.4 and earlier, the old linker is used which does not
+ support three-component system versions. */
+ sprintf (new_flag, "-mmacosx-version-min=10.%d", major_vers - 4);
+ else
+ sprintf (new_flag, "-mmacosx-version-min=10.%d.%s", major_vers - 4,
+ minor_vers);
+
+ /* Add the new flag. */
+ ++*argc_p;
+ *argv_p = xmalloc (sizeof (char *) * *argc_p);
+ (*argv_p)[0] = argv[0];
+ (*argv_p)[1] = new_flag;
+ memcpy (*argv_p + 2, argv + 1, (argc - 1) * sizeof (char *));
+ return;
+
+ parse_failed:
+ fprintf (stderr, "couldn't understand kern.osversion `%.*s'\n",
+ (int) osversion_len, osversion);
+ return;
+}
+
+#endif /* CROSS_DIRECTORY_STRUCTURE */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-protos.h b/gcc-4.2.1-5666.3/gcc/config/darwin-protos.h
new file mode 100644
index 000000000..a8df0f5dd
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-protos.h
@@ -0,0 +1,147 @@
+/* Prototypes.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+extern void darwin_init_sections (void);
+extern int name_needs_quotes (const char *);
+
+/* APPLE LOCAL ARM pic support */
+extern int machopic_lookup_stub_or_non_lazy_ptr (const char *);
+extern void machopic_validate_stub_or_non_lazy_ptr (const char *);
+
+extern const char *machopic_function_base_name (void);
+extern void machopic_output_function_base_name (FILE *);
+extern const char *machopic_indirection_name (rtx, bool);
+extern const char *machopic_mcount_stub_name (void);
+
+#ifdef RTX_CODE
+
+extern rtx machopic_function_base_sym (void);
+extern int machopic_operand_p (rtx);
+extern enum machopic_addr_class machopic_classify_symbol (rtx);
+
+extern rtx machopic_indirect_data_reference (rtx, rtx);
+/* APPLE LOCAL 4380289 */
+extern rtx machopic_force_indirect_call_target (rtx);
+extern rtx machopic_indirect_call_target (rtx);
+extern rtx machopic_legitimize_pic_address (rtx, enum machine_mode, rtx);
+
+extern void machopic_asm_out_constructor (rtx, int);
+extern void machopic_asm_out_destructor (rtx, int);
+/* APPLE LOCAL ARM pic support */
+extern int machopic_data_defined_p (rtx sym_ref);
+extern int indirect_data (rtx sym_ref);
+#endif /* RTX_CODE */
+
+#ifdef TREE_CODE
+
+extern void machopic_define_symbol (rtx);
+extern void darwin_encode_section_info (tree, rtx, int);
+extern void darwin_set_default_type_attributes (tree);
+/* APPLE LOCAL CW asm blocks */
+extern tree darwin_iasm_special_label (tree);
+
+#endif /* TREE_CODE */
+
+extern void machopic_finish (FILE *);
+
+extern int machopic_reloc_rw_mask (void);
+extern section *machopic_select_section (tree, int, unsigned HOST_WIDE_INT);
+extern section *machopic_select_rtx_section (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
+
+extern void darwin_unique_section (tree decl, int reloc);
+extern void darwin_asm_named_section (const char *, unsigned int, tree);
+extern void darwin_non_lazy_pcrel (FILE *, rtx);
+
+extern void darwin_emit_unwind_label (FILE *, tree, int, int);
+extern void darwin_emit_except_table_label (FILE *);
+
+extern void darwin_pragma_ignore (struct cpp_reader *);
+extern void darwin_pragma_options (struct cpp_reader *);
+extern void darwin_pragma_unused (struct cpp_reader *);
+extern void darwin_pragma_ms_struct (struct cpp_reader *);
+/* APPLE LOCAL pragma fenv */
+extern void darwin_pragma_fenv (struct cpp_reader *);
+/* APPLE LOCAL pragma reverse_bitfields */
+extern void darwin_pragma_reverse_bitfields (struct cpp_reader *);
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+extern void darwin_pragma_opt_level (struct cpp_reader *);
+extern void darwin_pragma_opt_size (struct cpp_reader *);
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
+
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */
+extern void darwin_pragma_pack (struct cpp_reader *);
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */
+
+/* APPLE LOCAL begin darwin_set_section_for_var_p */
+extern section* darwin_set_section_for_var_p (tree, int, int, section*);
+/* APPLE LOCAL end darwin_set_section_for_var_p */
+
+/* APPLE LOCAL ObjC GC */
+extern tree darwin_handle_objc_gc_attribute (tree *, tree, tree, int, bool *);
+/* APPLE LOCAL file radar 5595352 */
+extern tree darwin_handle_nsobject_attribute (tree *, tree, tree, int, bool *);
+
+extern void darwin_file_start (void);
+extern void darwin_file_end (void);
+
+extern void darwin_mark_decl_preserved (const char *);
+
+extern tree darwin_handle_kext_attribute (tree *, tree, tree, int, bool *);
+extern tree darwin_handle_weak_import_attribute (tree *node, tree name,
+ tree args, int flags,
+ bool * no_add_attrs);
+extern void machopic_output_stub (FILE *, const char *, const char *);
+extern void darwin_globalize_label (FILE *, const char *);
+extern void darwin_assemble_visibility (tree, int);
+extern void darwin_asm_output_dwarf_delta (FILE *, int, const char *,
+ const char *);
+extern void darwin_asm_output_dwarf_offset (FILE *, int, const char *,
+ section *);
+extern bool darwin_binds_local_p (tree);
+extern void darwin_cpp_builtins (struct cpp_reader *);
+/* APPLE LOCAL iframework for 4.3 4094959 */
+extern bool darwin_handle_c_option (size_t code, const char *arg, int value);
+extern void darwin_asm_output_anchor (rtx symbol);
+extern bool darwin_kextabi_p (void);
+extern void darwin_override_options (void);
+/* APPLE LOCAL optimization pragmas 3124235/3420242 */
+extern void reset_optimization_options (int, int);
+
+/* APPLE LOCAL begin constant cfstrings */
+extern void darwin_init_cfstring_builtins (void);
+extern tree darwin_expand_tree_builtin (tree, tree, tree);
+extern tree darwin_construct_objc_string (tree);
+extern bool darwin_constant_cfstring_p (tree);
+/* APPLE LOCAL end constant cfstrings */
+
+/* APPLE LOCAL begin radar 4985544 */
+extern bool darwin_cfstring_type_node (tree);
+extern bool objc_check_format_cfstring (tree, unsigned HOST_WIDE_INT, bool *);
+/* APPLE LOCAL end radar 4985544 */
+/* APPLE LOCAL radar 5195402 */
+extern bool objc_check_cfstringref_type (tree);
+
+/* APPLE LOCAL begin radar 2996215 - 6068877 */
+extern bool cvt_utf8_utf16 (const unsigned char *, size_t, unsigned char **, size_t *);
+extern tree create_init_utf16_var (const unsigned char *inbuf, size_t length, size_t *numUniChars);
+/* APPLE LOCAL end radar 2996215 - 6068877 */
+/* APPLE LOCAL radar 5202926 */
+extern bool objc_anonymous_local_objc_name (const char *);
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin-sections.def b/gcc-4.2.1-5666.3/gcc/config/darwin-sections.def
new file mode 100644
index 000000000..1c55fa274
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin-sections.def
@@ -0,0 +1,140 @@
+DEF_SECTION (text_coal_section, SECTION_CODE,
+ ".section __TEXT,__textcoal_nt,coalesced,pure_instructions", 0)
+DEF_SECTION (text_unlikely_coal_section, SECTION_CODE,
+ ".section __TEXT,__text_unlikely_coal,"
+ "coalesced,pure_instructions", 0)
+DEF_SECTION (const_section, 0, ".const", 0)
+DEF_SECTION (const_coal_section, 0,
+ ".section __TEXT,__const_coal,coalesced", 0)
+DEF_SECTION (const_data_section, 0, ".const_data", 0)
+DEF_SECTION (const_data_coal_section, 0,
+ ".section __DATA,__const_coal,coalesced", 0)
+DEF_SECTION (data_coal_section, SECTION_WRITE,
+ ".section __DATA,__datacoal_nt,coalesced", 0)
+DEF_SECTION (cstring_section, SECTION_MERGE, ".cstring", 0)
+DEF_SECTION (literal4_section, SECTION_MERGE, ".literal4", 0)
+DEF_SECTION (literal8_section, SECTION_MERGE, ".literal8", 0)
+DEF_SECTION (literal16_section, SECTION_MERGE, ".literal16", 0)
+DEF_SECTION (constructor_section, 0, ".constructor", 0)
+DEF_SECTION (mod_init_section, 0, ".mod_init_func", 0)
+DEF_SECTION (mod_term_section, 0, ".mod_term_func", 0)
+DEF_SECTION (destructor_section, 0, ".destructor", 0)
+DEF_SECTION (objc_class_section, 0, ".objc_class", 1)
+DEF_SECTION (objc_meta_class_section, 0, ".objc_meta_class", 1)
+DEF_SECTION (objc_category_section, 0, ".objc_category", 1)
+DEF_SECTION (objc_class_vars_section, 0, ".objc_class_vars", 1)
+DEF_SECTION (objc_instance_vars_section, 0, ".objc_instance_vars", 1)
+DEF_SECTION (objc_cls_meth_section, 0, ".objc_cls_meth", 1)
+DEF_SECTION (objc_inst_meth_section, 0, ".objc_inst_meth", 1)
+DEF_SECTION (objc_cat_cls_meth_section, 0, ".objc_cat_cls_meth", 1)
+DEF_SECTION (objc_cat_inst_meth_section, 0, ".objc_cat_inst_meth", 1)
+DEF_SECTION (objc_selector_refs_section, SECTION_MERGE, ".objc_message_refs", 1)
+DEF_SECTION (objc_selector_fixup_section, 0,
+ ".section __OBJC, __sel_fixup, regular, no_dead_strip", 1)
+DEF_SECTION (objc_symbols_section, 0, ".objc_symbols", 1)
+DEF_SECTION (objc_module_info_section, 0, ".objc_module_info", 1)
+DEF_SECTION (objc_protocol_section, 0, ".objc_protocol", 1)
+DEF_SECTION (objc_string_object_section, 0, ".objc_string_object", 1)
+DEF_SECTION (objc_constant_string_object_section, 0,
+ ".section __OBJC, __cstring_object, regular, no_dead_strip", 1)
+
+/* APPLE LOCAL begin constant cfstrings */
+/* Unlike constant NSStrings, constant CFStrings do not live in the
+ __OBJC segment since they may also occur in pure C or C++ programs. */
+DEF_SECTION (cfstring_constant_object_section, 0, ".section __DATA, __cfstring", 0)
+/* APPLE LOCAL end constant cfstrings */
+
+/* Fix-and-Continue image marker. */
+DEF_SECTION (objc_image_info_section, 0,
+ ".section __OBJC, __image_info, regular, no_dead_strip", 1)
+DEF_SECTION (objc_class_names_section, 0, ".objc_class_names", 1)
+DEF_SECTION (objc_meth_var_names_section, 0, ".objc_meth_var_names", 1)
+DEF_SECTION (objc_meth_var_types_section, 0, ".objc_meth_var_types", 1)
+DEF_SECTION (objc_cls_refs_section, SECTION_MERGE, ".objc_cls_refs", 1)
+
+DEF_SECTION (machopic_lazy_symbol_ptr_section, 0, ".lazy_symbol_pointer", 0)
+/* APPLE LOCAL begin dynamic-no-pic */
+DEF_SECTION (machopic_lazy_symbol_ptr2_section, 0,
+ ".section __DATA, __la_sym_ptr2,lazy_symbol_pointers", 0)
+DEF_SECTION (machopic_lazy_symbol_ptr3_section, 0,
+ ".section __DATA, __la_sym_ptr3,lazy_symbol_pointers", 0)
+/* APPLE LOCAL end dynamic-no-pic */
+/* APPLE LOCAL begin AT&T-style stub 4164563 */
+DEF_SECTION (machopic_nl_symbol_ptr_section, 0,
+ MACHOPIC_NL_SYMBOL_PTR_SECTION, 0)
+/* APPLE LOCAL end AT&T-style stub 4164563 */
+DEF_SECTION (machopic_symbol_stub_section, 0, ".symbol_stub", 0)
+DEF_SECTION (machopic_symbol_stub1_section, 0,
+ ".section __TEXT,__symbol_stub1,symbol_stubs,"
+ "pure_instructions,16", 0)
+/* APPLE LOCAL begin ARM pic support */
+DEF_SECTION (machopic_symbol_stub4_section, 0,
+ ".section __TEXT,__symbol_stub4,symbol_stubs,"
+ "none,12", 0)
+/* APPLE LOCAL end ARM pic support */
+DEF_SECTION (machopic_picsymbol_stub_section, 0, ".picsymbol_stub", 0)
+DEF_SECTION (machopic_picsymbol_stub1_section, 0,
+ ".section __TEXT,__picsymbolstub1,symbol_stubs,"
+ "pure_instructions,32", 0)
+/* APPLE LOCAL begin dynamic-no-pic */
+DEF_SECTION (machopic_picsymbol_stub2_section, 0,
+ ".section __TEXT,__picsymbolstub2,symbol_stubs,pure_instructions,25", 0)
+/* APPLE LOCAL end dynamic-no-pic */
+/* APPLE LOCAL begin AT&T-style stub 4164563 */
+DEF_SECTION (machopic_picsymbol_stub3_section, 0,
+ ".section __IMPORT,__jump_table,symbol_stubs,self_modifying_code+pure_instructions,5", 0)
+/* APPLE LOCAL end AT&T-style stub 4164563 */
+/* APPLE LOCAL begin ARM pic support */
+DEF_SECTION (machopic_picsymbol_stub4_section, 0,
+ ".section __TEXT,__picsymbolstub4,symbol_stubs,none,16", 0)
+/* APPLE LOCAL end ARM pic support */
+DEF_SECTION (darwin_exception_section, 0,
+ /* APPLE LOCAL EH __TEXT __gcc_except_tab 5819051 */
+ ".section __TEXT,__gcc_except_tab", 0)
+DEF_SECTION (darwin_eh_frame_section, 0,
+ ".section " EH_FRAME_SECTION_NAME ",__eh_frame"
+ EH_FRAME_SECTION_ATTR, 0)
+/* APPLE LOCAL begin ObjC new abi - radar 4792158 */
+/* APPLE LOCAL begin radar 5575115 */
+DEF_SECTION (objc_v2_message_refs_section, 0,
+ ".section __DATA, __objc_msgrefs, coalesced", 0)
+/* APPLE LOCAL end radar 5575115 */
+/* APPLE LOCAL begin radar 6255595 */
+DEF_SECTION (objc_v2_classdefs_section, 0,
+ ".section __DATA, __objc_data", 0)
+DEF_SECTION (objc_v2_metadata_section, 0,
+ ".section __DATA, __objc_const", 0)
+/* APPLE LOCAL end radar 6255595 */
+DEF_SECTION (objc_v2_classrefs_section, 0,
+ ".section __DATA, __objc_classrefs, regular, no_dead_strip", 1)
+DEF_SECTION (objc_v2_classlist_section, 0,
+ ".section __DATA, __objc_classlist, regular, no_dead_strip", 1)
+DEF_SECTION (objc_v2_categorylist_section, 0,
+ ".section __DATA, __objc_catlist, regular, no_dead_strip", 1)
+DEF_SECTION (objc_v2_selector_refs_section, 0,
+ ".section __DATA, __objc_selrefs, literal_pointers, no_dead_strip", 1)
+DEF_SECTION (objc_v2_nonlazy_class_section, 0,
+ ".section __DATA, __objc_nlclslist, regular, no_dead_strip", 1)
+DEF_SECTION (objc_v2_nonlazy_category_section, 0,
+ ".section __DATA, __objc_nlcatlist, regular, no_dead_strip", 1)
+/* APPLE LOCAL begin radar 6351990 */
+DEF_SECTION (objc_v2_protocollist_section, 0,
+ ".section __DATA, __objc_protolist, coalesced, no_dead_strip", 1)
+DEF_SECTION (objc_v2_protocolrefs_section, 0,
+ ".section __DATA, __objc_protorefs, coalesced, no_dead_strip", 1)
+/* APPLE LOCAL end radar 6351990 */
+DEF_SECTION (objc_v2_super_classrefs_section, 0,
+ ".section __DATA, __objc_superrefs, regular, no_dead_strip", 1)
+DEF_SECTION (objc_v2_image_info_section, 0,
+ ".section __DATA, __objc_imageinfo, regular, no_dead_strip", 1)
+DEF_SECTION (objc_v2_constant_string_object_section, 0,
+ ".section __DATA, __objc_stringobj, regular, no_dead_strip", 1)
+/* APPLE LOCAL end ObjC new abi - radar 4792158 */
+/* APPLE LOCAL begin radar 4585769 - Objective-C 1.0 extensions */
+DEF_SECTION (objc_class_ext_section, 0,
+ ".section __OBJC, __class_ext, regular, no_dead_strip", 1)
+DEF_SECTION (objc_prop_list_section, 0,
+ ".section __OBJC, __property, regular, no_dead_strip", 1)
+DEF_SECTION (objc_protocol_ext_section, 0,
+ ".section __OBJC, __protocol_ext, regular, no_dead_strip", 1)
+/* APPLE LOCAL end radar 4585769 - Objective-C 1.0 extensions */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin.c b/gcc-4.2.1-5666.3/gcc/config/darwin.c
new file mode 100644
index 000000000..77a53123d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin.c
@@ -0,0 +1,2565 @@
+/* Functions for generic Darwin as target machine for GNU C compiler.
+ Copyright (C) 1989, 1990, 1991, 1992, 1993, 2000, 2001, 2002, 2003, 2004,
+ 2005
+ Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+#include "reload.h"
+#include "function.h"
+#include "ggc.h"
+#include "langhooks.h"
+#include "target.h"
+#include "tm_p.h"
+#include "toplev.h"
+#include "hashtab.h"
+/* APPLE LOCAL begin constant cfstrings */
+#include "toplev.h"
+
+static tree darwin_build_constant_cfstring (tree);
+
+enum darwin_builtins
+{
+ DARWIN_BUILTIN_MIN = (int)END_BUILTINS,
+
+ DARWIN_BUILTIN_CFSTRINGMAKECONSTANTSTRING,
+ DARWIN_BUILTIN_MAX
+};
+/* APPLE LOCAL end constant cfstrings */
+
+/* Darwin supports a feature called fix-and-continue, which is used
+ for rapid turn around debugging. When code is compiled with the
+ -mfix-and-continue flag, two changes are made to the generated code
+ that allow the system to do things that it would normally not be
+ able to do easily. These changes allow gdb to load in
+ recompilation of a translation unit that has been changed into a
+ running program and replace existing functions and methods of that
+ translation unit with versions of those functions and methods
+ from the newly compiled translation unit. The new functions access
+ the existing static symbols from the old translation unit, if the
+ symbol existed in the unit to be replaced, and from the new
+ translation unit, otherwise.
+
+ The changes are to insert 5 nops at the beginning of all functions
+ and to use indirection to get at static symbols. The 5 nops
+ are required by consumers of the generated code. Currently, gdb
+ uses this to patch in a jump to the overriding function, this
+ allows all uses of the old name to forward to the replacement,
+ including existing function pointers and virtual methods. See
+ rs6000_emit_prologue for the code that handles the nop insertions.
+
+ The added indirection allows gdb to redirect accesses to static
+ symbols from the newly loaded translation unit to the existing
+ symbol, if any. @code{static} symbols are special and are handled by
+ setting the second word in the .non_lazy_symbol_pointer data
+ structure to symbol. See indirect_data for the code that handles
+ the extra indirection, and machopic_output_indirection and its use
+ of MACHO_SYMBOL_STATIC for the code that handles @code{static}
+ symbol indirection. */
+
+/* APPLE LOCAL begin pragma reverse_bitfields */
+/* Shouldn't there be a comment here? */
+int darwin_reverse_bitfields = 0;
+/* APPLE LOCAL end pragma reverse_bitfields */
+
+/* APPLE LOCAL begin axe stubs 5571540 */
+int darwin_stubs = false;
+/* APPLE LOCAL end axe stubs 5571540 */
+
+/* Section names. */
+section * darwin_sections[NUM_DARWIN_SECTIONS];
+
+/* True if we're setting __attribute__ ((ms_struct)). */
+int darwin_ms_struct = false;
+
+/* A get_unnamed_section callback used to switch to an ObjC section.
+ DIRECTIVE is as for output_section_asm_op. */
+
+static void
+output_objc_section_asm_op (const void *directive)
+{
+ static bool been_here = false;
+
+ if (! been_here)
+ {
+ static const enum darwin_section_enum tomark[] =
+ {
+ /* written, cold -> hot */
+ objc_cat_cls_meth_section,
+ objc_cat_inst_meth_section,
+ objc_string_object_section,
+ objc_constant_string_object_section,
+ objc_selector_refs_section,
+ objc_selector_fixup_section,
+ objc_cls_refs_section,
+ objc_class_section,
+ objc_meta_class_section,
+ /* shared, hot -> cold */
+ objc_cls_meth_section,
+ objc_inst_meth_section,
+ objc_protocol_section,
+ objc_class_names_section,
+ objc_meth_var_types_section,
+ objc_meth_var_names_section,
+ objc_category_section,
+ objc_class_vars_section,
+ objc_instance_vars_section,
+ objc_module_info_section,
+ /* APPLE LOCAL begin objc2 */
+ objc_symbols_section,
+ objc_protocol_ext_section,
+ objc_class_ext_section,
+ objc_prop_list_section
+ /* APPLE LOCAL end objc2 */
+ };
+ /* APPLE LOCAL begin ObjC abi v2 - radar 4792158 */
+ static const enum darwin_section_enum tomarkv2[] =
+ {
+ objc_v2_message_refs_section,
+ /* APPLE LOCAL begin radar 6255595 */
+ objc_v2_classdefs_section,
+ objc_v2_metadata_section,
+ /* APPLE LOCAL end radar 6255595 */
+ objc_v2_classrefs_section,
+ objc_v2_classlist_section,
+ objc_v2_categorylist_section,
+ objc_v2_selector_refs_section,
+ objc_v2_nonlazy_class_section,
+ objc_v2_nonlazy_category_section,
+ objc_v2_protocollist_section,
+ objc_v2_protocolrefs_section,
+ objc_v2_super_classrefs_section,
+ objc_v2_image_info_section,
+ objc_v2_constant_string_object_section
+ } ;
+ /* APPLE LOCAL end ObjC abi v2 - radar 4792158 */
+ size_t i;
+
+ been_here = true;
+ /* APPLE LOCAL begin radar 4792158 */
+ /* APPLE LOCAL begin radar 4585769 - Objective-C 1.0 extensions */
+ if (flag_objc_abi == 1)
+ for (i = 0; i < ARRAY_SIZE (tomark); i++)
+ switch_to_section (darwin_sections[tomark[i]]);
+ /* APPLE LOCAL end radar 4585769 - Objective-C 1.0 extensions */ \
+ else if (flag_objc_abi == 2)
+ for (i = 0; i < ARRAY_SIZE (tomarkv2); i++)
+ switch_to_section (darwin_sections[tomarkv2[i]]);
+ /* APPLE LOCAL end radar 4792158 */
+ }
+ output_section_asm_op (directive);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+void
+darwin_init_sections (void)
+{
+#define DEF_SECTION(NAME, FLAGS, DIRECTIVE, OBJC) \
+ darwin_sections[NAME] = \
+ get_unnamed_section (FLAGS, (OBJC \
+ ? output_objc_section_asm_op \
+ : output_section_asm_op), \
+ "\t" DIRECTIVE);
+#include "config/darwin-sections.def"
+#undef DEF_SECTION
+
+ readonly_data_section = darwin_sections[const_section];
+ exception_section = darwin_sections[darwin_exception_section];
+ eh_frame_section = darwin_sections[darwin_eh_frame_section];
+}
+
+int
+name_needs_quotes (const char *name)
+{
+ int c;
+ while ((c = *name++) != '\0')
+ if (! ISIDNUM (c) && c != '.' && c != '$')
+ return 1;
+ return 0;
+}
+
+/* Return true if SYM_REF can be used without an indirection. */
+/* APPLE LOCAL dynamic-no-pic */
+int
+machopic_symbol_defined_p (rtx sym_ref)
+{
+ if (SYMBOL_REF_FLAGS (sym_ref) & MACHO_SYMBOL_FLAG_DEFINED)
+ return true;
+
+ /* If a symbol references local and is not an extern to this
+ file, then the symbol might be able to declared as defined. */
+ if (SYMBOL_REF_LOCAL_P (sym_ref) && ! SYMBOL_REF_EXTERNAL_P (sym_ref))
+ {
+ /* If the symbol references a variable and the variable is a
+ common symbol, then this symbol is not defined. */
+ if (SYMBOL_REF_FLAGS (sym_ref) & MACHO_SYMBOL_FLAG_VARIABLE)
+ {
+ tree decl = SYMBOL_REF_DECL (sym_ref);
+ if (!decl)
+ return true;
+ if (DECL_COMMON (decl))
+ return false;
+ }
+ /* APPLE LOCAL begin 6077274 */
+ /* Weak functions should always be indirected. */
+ else if (SYMBOL_REF_FLAGS (sym_ref) & SYMBOL_FLAG_FUNCTION)
+ {
+ tree decl = SYMBOL_REF_DECL (sym_ref);
+ if (decl && DECL_WEAK (decl))
+ return false;
+ }
+ /* APPLE LOCAL end 6077274 */
+ return true;
+ }
+ return false;
+}
+
+/* This module assumes that (const (symbol_ref "foo")) is a legal pic
+ reference, which will not be changed. */
+
+enum machopic_addr_class
+machopic_classify_symbol (rtx sym_ref)
+{
+ int flags;
+ bool function_p;
+
+ flags = SYMBOL_REF_FLAGS (sym_ref);
+ function_p = SYMBOL_REF_FUNCTION_P (sym_ref);
+ if (machopic_symbol_defined_p (sym_ref))
+ return (function_p
+ ? MACHOPIC_DEFINED_FUNCTION : MACHOPIC_DEFINED_DATA);
+ else
+ return (function_p
+ ? MACHOPIC_UNDEFINED_FUNCTION : MACHOPIC_UNDEFINED_DATA);
+}
+
+#ifndef TARGET_FIX_AND_CONTINUE
+#define TARGET_FIX_AND_CONTINUE 0
+#endif
+
+/* Indicate when fix-and-continue style code generation is being used
+ and when a reference to data should be indirected so that it can be
+ rebound in a new translation unit to reference the original instance
+ of that data. Symbol names that are for code generation local to
+ the translation unit are bound to the new translation unit;
+ currently this means symbols that begin with L or _OBJC_;
+ otherwise, we indicate that an indirect reference should be made to
+ permit the runtime to rebind new instances of the translation unit
+ to the original instance of the data. */
+
+/* APPLE LOCAL fix-and-continue 6227434 */
+int
+indirect_data (rtx sym_ref)
+{
+ int lprefix;
+ const char *name;
+
+ /* If we aren't generating fix-and-continue code, don't do anything special. */
+ if (TARGET_FIX_AND_CONTINUE == 0)
+ return 0;
+
+ /* Otherwise, all symbol except symbols that begin with L or _OBJC_
+ are indirected. Symbols that begin with L and _OBJC_ are always
+ bound to the current translation unit as they are used for
+ generated local data of the translation unit. */
+
+ name = XSTR (sym_ref, 0);
+
+ lprefix = (((name[0] == '*' || name[0] == '&')
+ /* APPLE LOCAL begin fix-and-continue 6227434 */
+ && (name[1] == 'L'
+ || (name[1] == '"' && name[2] == 'L')
+ /* Don't indirect writable strings. */
+ || (name[1] == 'l' && name[2] == 'C')))
+ || (strncmp (name, "_OBJC_", 6) == 0)
+ || objc_anonymous_local_objc_name (name));
+ /* APPLE LOCAL end fix-and-continue 6227434 */
+
+ return ! lprefix;
+}
+
+
+/* APPLE LOCAL ARM pic support */
+int
+machopic_data_defined_p (rtx sym_ref)
+{
+ if (indirect_data (sym_ref))
+ return 0;
+
+ switch (machopic_classify_symbol (sym_ref))
+ {
+ case MACHOPIC_DEFINED_DATA:
+ case MACHOPIC_DEFINED_FUNCTION:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+void
+machopic_define_symbol (rtx mem)
+{
+ rtx sym_ref;
+
+ gcc_assert (GET_CODE (mem) == MEM);
+ sym_ref = XEXP (mem, 0);
+ SYMBOL_REF_FLAGS (sym_ref) |= MACHO_SYMBOL_FLAG_DEFINED;
+}
+
+static GTY(()) char * function_base;
+
+const char *
+machopic_function_base_name (void)
+{
+ /* if dynamic-no-pic is on, we should not get here */
+ gcc_assert (!MACHO_DYNAMIC_NO_PIC_P);
+
+ if (function_base == NULL)
+ function_base =
+ (char *) ggc_alloc_string ("<pic base>", sizeof ("<pic base>"));
+
+ current_function_uses_pic_offset_table = 1;
+
+ return function_base;
+}
+
+/* Return a SYMBOL_REF for the PIC function base. */
+
+rtx
+machopic_function_base_sym (void)
+{
+ rtx sym_ref;
+
+ sym_ref = gen_rtx_SYMBOL_REF (Pmode, machopic_function_base_name ());
+ SYMBOL_REF_FLAGS (sym_ref)
+ |= (MACHO_SYMBOL_FLAG_VARIABLE | MACHO_SYMBOL_FLAG_DEFINED);
+ return sym_ref;
+}
+
+/* Return either ORIG or (const:P (minus:P ORIG PIC_BASE)), depending
+ on whether pic_base is NULL or not. */
+static inline rtx
+gen_pic_offset (rtx orig, rtx pic_base)
+{
+ if (!pic_base)
+ return orig;
+ else
+ return gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, orig, pic_base));
+}
+
+static GTY(()) const char * function_base_func_name;
+static GTY(()) int current_pic_label_num;
+
+void
+machopic_output_function_base_name (FILE *file)
+{
+ const char *current_name;
+
+ /* If dynamic-no-pic is on, we should not get here. */
+ gcc_assert (!MACHO_DYNAMIC_NO_PIC_P);
+ current_name =
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl));
+ if (function_base_func_name != current_name)
+ {
+ ++current_pic_label_num;
+ function_base_func_name = current_name;
+ }
+ fprintf (file, "\"L%011d$pb\"", current_pic_label_num);
+}
+
+/* The suffix attached to non-lazy pointer symbols. */
+#define NON_LAZY_POINTER_SUFFIX "$non_lazy_ptr"
+/* The suffix attached to stub symbols. */
+#define STUB_SUFFIX "$stub"
+
+typedef struct machopic_indirection GTY (())
+{
+ /* The SYMBOL_REF for the entity referenced. */
+ rtx symbol;
+ /* The name of the stub or non-lazy pointer. */
+ const char * ptr_name;
+ /* True iff this entry is for a stub (as opposed to a non-lazy
+ pointer). */
+ bool stub_p;
+ /* True iff this stub or pointer pointer has been referenced. */
+ bool used;
+ /* APPLE LOCAL begin ARM 5440570 */
+ /* True iff this stub or pointer pointer has been outputted. */
+ bool emitted;
+ /* APPLE LOCAL end ARM 5440570 */
+} machopic_indirection;
+
+/* A table mapping stub names and non-lazy pointer names to
+ SYMBOL_REFs for the stubbed-to and pointed-to entities. */
+
+static GTY ((param_is (struct machopic_indirection))) htab_t
+ machopic_indirections;
+
+/* APPLE LOCAL begin 5440570 */
+/* Used to identify that usage information has changed while
+ outputting the stubs. */
+static GTY (()) bool indirection_uses_changed;
+/* APPLE LOCAL end 5440570 */
+
+/* Return a hash value for a SLOT in the indirections hash table. */
+
+static hashval_t
+machopic_indirection_hash (const void *slot)
+{
+ const machopic_indirection *p = (const machopic_indirection *) slot;
+ return htab_hash_string (p->ptr_name);
+}
+
+/* Returns true if the KEY is the same as that associated with
+ SLOT. */
+
+static int
+machopic_indirection_eq (const void *slot, const void *key)
+{
+ return strcmp (((const machopic_indirection *) slot)->ptr_name, key) == 0;
+}
+
+/* Return the name of the non-lazy pointer (if STUB_P is false) or
+ stub (if STUB_B is true) corresponding to the given name. */
+
+const char *
+machopic_indirection_name (rtx sym_ref, bool stub_p)
+{
+ char *buffer;
+ const char *name = XSTR (sym_ref, 0);
+ size_t namelen = strlen (name);
+ machopic_indirection *p;
+ void ** slot;
+ bool saw_star = false;
+ bool needs_quotes;
+ const char *suffix;
+ const char *prefix = user_label_prefix;
+ const char *quote = "";
+ tree id;
+
+ id = maybe_get_identifier (name);
+ if (id)
+ {
+ tree id_orig = id;
+
+ while (IDENTIFIER_TRANSPARENT_ALIAS (id))
+ id = TREE_CHAIN (id);
+ if (id != id_orig)
+ {
+ name = IDENTIFIER_POINTER (id);
+ namelen = strlen (name);
+ }
+ }
+
+ if (name[0] == '*')
+ {
+ saw_star = true;
+ prefix = "";
+ ++name;
+ --namelen;
+ }
+
+ needs_quotes = name_needs_quotes (name);
+ if (needs_quotes)
+ {
+ quote = "\"";
+ }
+
+ if (stub_p)
+ suffix = STUB_SUFFIX;
+ else
+ suffix = NON_LAZY_POINTER_SUFFIX;
+
+ buffer = alloca (strlen ("&L")
+ + strlen (prefix)
+ + namelen
+ + strlen (suffix)
+ + 2 * strlen (quote)
+ + 1 /* '\0' */);
+
+ /* Construct the name of the non-lazy pointer or stub. */
+ sprintf (buffer, "&%sL%s%s%s%s", quote, prefix, name, suffix, quote);
+
+ if (!machopic_indirections)
+ machopic_indirections = htab_create_ggc (37,
+ machopic_indirection_hash,
+ machopic_indirection_eq,
+ /*htab_del=*/NULL);
+
+ slot = htab_find_slot_with_hash (machopic_indirections, buffer,
+ htab_hash_string (buffer), INSERT);
+ if (*slot)
+ {
+ p = (machopic_indirection *) *slot;
+ }
+ else
+ {
+ p = (machopic_indirection *) ggc_alloc (sizeof (machopic_indirection));
+ p->symbol = sym_ref;
+ p->ptr_name = xstrdup (buffer);
+ p->stub_p = stub_p;
+ p->used = false;
+ /* APPLE LOCAL ARM 5440570 */
+ p->emitted = false;
+ *slot = p;
+ }
+
+ return p->ptr_name;
+}
+
+/* Return the name of the stub for the mcount function. */
+
+const char*
+machopic_mcount_stub_name (void)
+{
+ rtx symbol = gen_rtx_SYMBOL_REF (Pmode, "*mcount");
+ return machopic_indirection_name (symbol, /*stub_p=*/true);
+}
+
+/* APPLE LOCAL begin ARM pic support */
+/* Determine whether the specified symbol is in the indirections table. */
+int
+machopic_lookup_stub_or_non_lazy_ptr (const char *name)
+{
+ machopic_indirection *p;
+
+ if (! machopic_indirections)
+ return 0;
+
+ p = (machopic_indirection *)
+ htab_find_with_hash (machopic_indirections, name,
+ htab_hash_string (name));
+ if (p)
+ return 1;
+ else
+ return 0;
+}
+/* APPLE LOCAL end ARM pic support */
+
+/* If NAME is the name of a stub or a non-lazy pointer , mark the stub
+ or non-lazy pointer as used -- and mark the object to which the
+ pointer/stub refers as used as well, since the pointer/stub will
+ emit a reference to it. */
+
+void
+machopic_validate_stub_or_non_lazy_ptr (const char *name)
+{
+ machopic_indirection *p;
+
+ p = ((machopic_indirection *)
+ (htab_find_with_hash (machopic_indirections, name,
+ htab_hash_string (name))));
+ if (p && ! p->used)
+ {
+ const char *real_name;
+ tree id;
+
+ p->used = true;
+ /* APPLE LOCAL ARM 5440570 */
+ indirection_uses_changed = true;
+
+ /* Do what output_addr_const will do when we actually call it. */
+ if (SYMBOL_REF_DECL (p->symbol))
+ mark_decl_referenced (SYMBOL_REF_DECL (p->symbol));
+
+ real_name = targetm.strip_name_encoding (XSTR (p->symbol, 0));
+
+ id = maybe_get_identifier (real_name);
+ if (id)
+ mark_referenced (id);
+ }
+}
+
+/* Transform ORIG, which may be any data source, to the corresponding
+ source using indirections. */
+
+rtx
+machopic_indirect_data_reference (rtx orig, rtx reg)
+{
+ rtx ptr_ref = orig;
+
+ if (! MACHOPIC_INDIRECT)
+ return orig;
+
+ /* APPLE LOCAL begin dynamic-no-pic */
+ switch (GET_CODE (orig))
+ {
+ case SYMBOL_REF:
+ {
+ int defined = machopic_data_defined_p (orig);
+
+ if (defined && MACHO_DYNAMIC_NO_PIC_P)
+ {
+#if defined (TARGET_TOC)
+ /* Create a new register for CSE opportunities. */
+ rtx hi_reg = (no_new_pseudos ? reg : gen_reg_rtx (Pmode));
+ emit_insn (gen_macho_high (hi_reg, orig));
+ emit_insn (gen_macho_low (reg, hi_reg, orig));
+#else
+#if defined (TARGET_386)
+ return orig;
+#else /* defined (TARGET_386) */
+ /* some other cpu -- writeme! */
+ gcc_unreachable ();
+#endif /* defined (TARGET_386) */
+#endif
+ return reg;
+ }
+ else if (defined)
+ {
+#if defined (TARGET_TOC) || defined (HAVE_lo_sum)
+ rtx pic_base = machopic_function_base_sym ();
+ rtx offset = gen_pic_offset (orig, pic_base);
+#endif
+
+#if defined (TARGET_TOC) /* i.e., PowerPC */
+ rtx hi_sum_reg = (no_new_pseudos ? reg : gen_reg_rtx (Pmode));
+
+ gcc_assert (reg);
+
+ emit_insn (gen_rtx_SET (Pmode, hi_sum_reg,
+ gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
+ gen_rtx_HIGH (Pmode, offset))));
+ emit_insn (gen_rtx_SET (Pmode, reg,
+ gen_rtx_LO_SUM (Pmode, hi_sum_reg, offset)));
+
+ orig = reg;
+#else
+#if defined (HAVE_lo_sum)
+ gcc_assert (reg);
+
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_HIGH (Pmode, offset)));
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_LO_SUM (Pmode, reg, offset)));
+ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
+
+ orig = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, reg);
+#endif
+#endif
+ return orig;
+ }
+
+ ptr_ref = (gen_rtx_SYMBOL_REF
+ (Pmode,
+ machopic_indirection_name (orig, /*stub_p=*/false)));
+
+ SYMBOL_REF_DATA (ptr_ref) = SYMBOL_REF_DATA (orig);
+
+ ptr_ref = gen_const_mem (Pmode, ptr_ref);
+ machopic_define_symbol (ptr_ref);
+
+#ifdef TARGET_386
+ if (reg && MACHO_DYNAMIC_NO_PIC_P)
+ {
+ emit_insn (gen_rtx_SET (Pmode, reg, ptr_ref));
+ ptr_ref = reg;
+ }
+#endif /* TARGET_386 */
+
+ return ptr_ref;
+ }
+ break;
+
+ case CONST:
+ {
+ /* If "(const (plus ...", walk the PLUS and return that result.
+ PLUS processing (below) will restore the "(const ..." if
+ appropriate. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ return machopic_indirect_data_reference (XEXP (orig, 0), reg);
+ else
+ return orig;
+ }
+ break;
+
+ case MEM:
+ {
+ XEXP (ptr_ref, 0) = machopic_indirect_data_reference (XEXP (orig, 0), reg);
+ return ptr_ref;
+ }
+ break;
+
+ case PLUS:
+ {
+ rtx base, result;
+
+ /* When the target is i386, this code prevents crashes due to the
+ compiler's ignorance on how to move the PIC base register to
+ other registers. (The reload phase sometimes introduces such
+ insns.) */
+ if (GET_CODE (XEXP (orig, 0)) == REG
+ && REGNO (XEXP (orig, 0)) == PIC_OFFSET_TABLE_REGNUM
+#ifdef TARGET_386
+ /* Prevent the same register from being erroneously used
+ as both the base and index registers. */
+ && GET_CODE (XEXP (orig, 1)) == CONST
+#endif
+ && reg)
+ {
+ emit_move_insn (reg, XEXP (orig, 0));
+ XEXP (ptr_ref, 0) = reg;
+ return ptr_ref;
+ }
+
+ /* Legitimize both operands of the PLUS. */
+ base = machopic_indirect_data_reference (XEXP (orig, 0), reg);
+ orig = machopic_indirect_data_reference (XEXP (orig, 1),
+ (base == reg ? 0 : reg));
+ if (MACHOPIC_INDIRECT && GET_CODE (orig) == CONST_INT)
+ result = plus_constant (base, INTVAL (orig));
+ else
+ result = gen_rtx_PLUS (Pmode, base, orig);
+
+ if (MACHOPIC_JUST_INDIRECT && GET_CODE (base) == MEM)
+ {
+ if (reg)
+ {
+ emit_move_insn (reg, result);
+ result = reg;
+ }
+ else
+ result = force_reg (GET_MODE (result), result);
+ }
+ return result;
+ }
+ break;
+
+ default:
+ break;
+ } /* End switch (GET_CODE (orig)) */
+ /* APPLE LOCAL end dynamic-no-pic */
+ return ptr_ref;
+}
+
+/* APPLE LOCAL begin 4380289 */
+/* Force a Mach-O stub. Expects MEM(SYM_REF(foo)). No sanity
+ checking. */
+static inline rtx
+machopic_force_stub (rtx target)
+{
+ rtx sym_ref = XEXP (target, 0);
+ rtx new_target;
+ enum machine_mode mem_mode = GET_MODE (target);
+ enum machine_mode sym_mode = GET_MODE (XEXP (target, 0));
+ const char *stub_name = XSTR (sym_ref, 0);
+
+ stub_name = machopic_indirection_name (sym_ref, /*stub_p=*/true);
+
+ new_target = gen_rtx_MEM (mem_mode, gen_rtx_SYMBOL_REF (sym_mode, stub_name));
+ SYMBOL_REF_DATA (XEXP (new_target, 0)) = SYMBOL_REF_DATA (sym_ref);
+ MEM_READONLY_P (new_target) = 1;
+ MEM_NOTRAP_P (new_target) = 1;
+ return new_target;
+}
+
+/* Like machopic_indirect_call_target, but always stubify,
+ and don't re-stubify anything already stubified. */
+rtx
+machopic_force_indirect_call_target (rtx target)
+{
+ if (MEM_P (target))
+ {
+ rtx sym_ref = XEXP (target, 0);
+ const char *stub_name = XSTR (sym_ref, 0);
+ unsigned int stub_name_length = strlen (stub_name);
+
+ /* If "$stub" suffix absent, add it. */
+ if (stub_name_length < 6 || strcmp ("$stub", stub_name + stub_name_length - 5))
+ target = machopic_force_stub (target);
+ }
+
+ return target;
+}
+/* APPLE LOCAL end 4380289 */
+
+/* Transform TARGET (a MEM), which is a function call target, to the
+ corresponding symbol_stub if necessary. Return a new MEM. */
+
+rtx
+machopic_indirect_call_target (rtx target)
+{
+ /* APPLE LOCAL begin axe stubs 5571540 */
+ if (! darwin_stubs)
+ return target;
+ /* APPLE LOCAL end axe stubs 5571540 */
+
+ if (GET_CODE (target) != MEM)
+ return target;
+
+ if (MACHOPIC_INDIRECT
+ && GET_CODE (XEXP (target, 0)) == SYMBOL_REF
+ && !(SYMBOL_REF_FLAGS (XEXP (target, 0))
+ & MACHO_SYMBOL_FLAG_DEFINED))
+ /* APPLE LOCAL begin 4380289 */
+ target = machopic_force_stub (target);
+ /* APPLE LOCAL end 4380289 */
+
+ return target;
+}
+
+rtx
+machopic_legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
+{
+ rtx pic_ref = orig;
+
+ if (! MACHOPIC_INDIRECT)
+ return orig;
+
+ /* First handle a simple SYMBOL_REF or LABEL_REF */
+ if (GET_CODE (orig) == LABEL_REF
+ || (GET_CODE (orig) == SYMBOL_REF
+ ))
+ {
+ /* addr(foo) = &func+(foo-func) */
+ rtx pic_base;
+
+ orig = machopic_indirect_data_reference (orig, reg);
+
+ if (GET_CODE (orig) == PLUS
+ && GET_CODE (XEXP (orig, 0)) == REG)
+ {
+ if (reg == 0)
+ return force_reg (mode, orig);
+
+ emit_move_insn (reg, orig);
+ return reg;
+ }
+
+ /* if dynamic-no-pic we don't have a pic base */
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ pic_base = NULL;
+ else
+ pic_base = machopic_function_base_sym ();
+
+ if (GET_CODE (orig) == MEM)
+ {
+ if (reg == 0)
+ {
+ gcc_assert (!reload_in_progress);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+#ifdef HAVE_lo_sum
+ if (MACHO_DYNAMIC_NO_PIC_P
+ && (GET_CODE (XEXP (orig, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (orig, 0)) == LABEL_REF))
+ {
+#if defined (TARGET_TOC) /* ppc */
+ rtx temp_reg = (no_new_pseudos) ? reg : gen_reg_rtx (Pmode);
+ rtx asym = XEXP (orig, 0);
+ rtx mem;
+
+ emit_insn (gen_macho_high (temp_reg, asym));
+ mem = gen_const_mem (GET_MODE (orig),
+ gen_rtx_LO_SUM (Pmode, temp_reg, asym));
+ emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+#else
+ /* Some other CPU -- WriteMe! but right now there are no other platform that can use dynamic-no-pic */
+ gcc_unreachable ();
+#endif
+ pic_ref = reg;
+ }
+ else
+ if (GET_CODE (XEXP (orig, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (orig, 0)) == LABEL_REF)
+ {
+ rtx offset = gen_pic_offset (XEXP (orig, 0), pic_base);
+#if defined (TARGET_TOC) /* i.e., PowerPC */
+ /* Generating a new reg may expose opportunities for
+ common subexpression elimination. */
+ rtx hi_sum_reg = no_new_pseudos ? reg : gen_reg_rtx (Pmode);
+ rtx mem;
+ rtx insn;
+ rtx sum;
+
+ sum = gen_rtx_HIGH (Pmode, offset);
+ if (! MACHO_DYNAMIC_NO_PIC_P)
+ sum = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, sum);
+
+ emit_insn (gen_rtx_SET (Pmode, hi_sum_reg, sum));
+
+ mem = gen_const_mem (GET_MODE (orig),
+ gen_rtx_LO_SUM (Pmode,
+ hi_sum_reg, offset));
+ insn = emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, pic_ref,
+ REG_NOTES (insn));
+
+ pic_ref = reg;
+#else
+ emit_insn (gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ PIC_OFFSET_TABLE_REGNUM)));
+
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_HIGH (Pmode,
+ gen_rtx_CONST (Pmode,
+ offset))));
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_LO_SUM (Pmode, reg,
+ gen_rtx_CONST (Pmode, offset))));
+ pic_ref = gen_rtx_PLUS (Pmode,
+ pic_offset_table_rtx, reg);
+#endif
+ }
+ else
+#endif /* HAVE_lo_sum */
+ {
+ rtx pic = pic_offset_table_rtx;
+ if (GET_CODE (pic) != REG)
+ {
+ emit_move_insn (reg, pic);
+ pic = reg;
+ }
+#if 0
+ emit_insn (gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ PIC_OFFSET_TABLE_REGNUM)));
+#endif
+
+ if (reload_in_progress)
+ regs_ever_live[REGNO (pic)] = 1;
+ pic_ref = gen_rtx_PLUS (Pmode, pic,
+ gen_pic_offset (XEXP (orig, 0),
+ pic_base));
+ }
+
+#if !defined (TARGET_TOC)
+ emit_move_insn (reg, pic_ref);
+ pic_ref = gen_const_mem (GET_MODE (orig), reg);
+#endif
+ }
+ else
+ {
+
+#ifdef HAVE_lo_sum
+ if (GET_CODE (orig) == SYMBOL_REF
+ || GET_CODE (orig) == LABEL_REF)
+ {
+ rtx offset = gen_pic_offset (orig, pic_base);
+#if defined (TARGET_TOC) /* i.e., PowerPC */
+ rtx hi_sum_reg;
+
+ if (reg == 0)
+ {
+ gcc_assert (!reload_in_progress);
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ hi_sum_reg = reg;
+
+ emit_insn (gen_rtx_SET (Pmode, hi_sum_reg,
+ (MACHO_DYNAMIC_NO_PIC_P)
+ ? gen_rtx_HIGH (Pmode, offset)
+ : gen_rtx_PLUS (Pmode,
+ pic_offset_table_rtx,
+ gen_rtx_HIGH (Pmode,
+ offset))));
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_LO_SUM (Pmode,
+ hi_sum_reg, offset)));
+ pic_ref = reg;
+#else
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_HIGH (Pmode, offset)));
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_LO_SUM (Pmode, reg, offset)));
+ pic_ref = gen_rtx_PLUS (Pmode,
+ pic_offset_table_rtx, reg);
+#endif
+ }
+ else
+#endif /* HAVE_lo_sum */
+ {
+ if (REG_P (orig)
+ || GET_CODE (orig) == SUBREG)
+ {
+ return orig;
+ }
+ else
+ {
+ rtx pic = pic_offset_table_rtx;
+ if (GET_CODE (pic) != REG)
+ {
+ emit_move_insn (reg, pic);
+ pic = reg;
+ }
+#if 0
+ emit_insn (gen_rtx_USE (VOIDmode,
+ pic_offset_table_rtx));
+#endif
+ if (reload_in_progress)
+ regs_ever_live[REGNO (pic)] = 1;
+ pic_ref = gen_rtx_PLUS (Pmode,
+ pic,
+ gen_pic_offset (orig, pic_base));
+ }
+ }
+ }
+
+ if (GET_CODE (pic_ref) != REG)
+ {
+ if (reg != 0)
+ {
+ emit_move_insn (reg, pic_ref);
+ return reg;
+ }
+ else
+ {
+ return force_reg (mode, pic_ref);
+ }
+ }
+ else
+ {
+ return pic_ref;
+ }
+ }
+
+ else if (GET_CODE (orig) == SYMBOL_REF)
+ return orig;
+
+ else if (GET_CODE (orig) == PLUS
+ && (GET_CODE (XEXP (orig, 0)) == MEM
+ || GET_CODE (XEXP (orig, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (orig, 0)) == LABEL_REF)
+ && XEXP (orig, 0) != pic_offset_table_rtx
+ && GET_CODE (XEXP (orig, 1)) != REG)
+
+ {
+ rtx base;
+ int is_complex = (GET_CODE (XEXP (orig, 0)) == MEM);
+
+ base = machopic_legitimize_pic_address (XEXP (orig, 0), Pmode, reg);
+ orig = machopic_legitimize_pic_address (XEXP (orig, 1),
+ Pmode, (base == reg ? 0 : reg));
+ if (GET_CODE (orig) == CONST_INT)
+ {
+ pic_ref = plus_constant (base, INTVAL (orig));
+ is_complex = 1;
+ }
+ else
+ pic_ref = gen_rtx_PLUS (Pmode, base, orig);
+
+ /* APPLE LOCAL begin gen ADD */
+#ifdef MASK_80387
+ {
+ rtx mem, other;
+
+ if (GET_CODE (orig) == MEM) {
+ mem = orig; other = base;
+ /* Swap the kids only if there is only one MEM, and it's on the right. */
+ if (GET_CODE (base) != MEM) {
+ XEXP (pic_ref, 0) = orig;
+ XEXP (pic_ref, 1) = base;
+ }
+ }
+ else if (GET_CODE (base) == MEM) {
+ mem = base; other = orig;
+ } else
+ mem = other = NULL_RTX;
+
+ /* Both kids are MEMs. */
+ if (other && GET_CODE (other) == MEM)
+ other = force_reg (GET_MODE (other), other);
+
+ /* The x86 can't post-index a MEM; emit an ADD instruction to handle this. */
+ if (mem && GET_CODE (mem) == MEM) {
+ if ( ! reload_in_progress) {
+ rtx set = gen_rtx_SET (VOIDmode, reg, pic_ref);
+ rtx clobber_cc = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
+ pic_ref = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber_cc));
+ emit_insn (pic_ref);
+ pic_ref = reg;
+ is_complex = 0;
+ }
+ }
+ }
+#endif
+ /* APPLE LOCAL end gen ADD */
+
+ if (reg && is_complex)
+ {
+ emit_move_insn (reg, pic_ref);
+ pic_ref = reg;
+ }
+ /* Likewise, should we set special REG_NOTEs here? */
+ }
+
+ else if (GET_CODE (orig) == CONST)
+ {
+ return machopic_legitimize_pic_address (XEXP (orig, 0), Pmode, reg);
+ }
+
+ else if (GET_CODE (orig) == MEM
+ && GET_CODE (XEXP (orig, 0)) == SYMBOL_REF)
+ {
+ /* APPLE LOCAL begin use new pseudo for temp; reusing reg confuses PRE */
+ rtx tempreg = reg;
+ rtx addr;
+ if ( !no_new_pseudos )
+ tempreg = gen_reg_rtx (Pmode);
+ addr = machopic_legitimize_pic_address (XEXP (orig, 0), Pmode, tempreg);
+ /* APPLE LOCAL end use new pseudo for temp; reusing reg confuses PRE */
+ addr = replace_equiv_address (orig, addr);
+ emit_move_insn (reg, addr);
+ pic_ref = reg;
+ }
+
+ return pic_ref;
+}
+
+/* Output the stub or non-lazy pointer in *SLOT, if it has been used.
+ DATA is the FILE* for assembly output. Called from
+ htab_traverse. */
+
+static int
+machopic_output_indirection (void **slot, void *data)
+{
+ machopic_indirection *p = *((machopic_indirection **) slot);
+ FILE *asm_out_file = (FILE *) data;
+ rtx symbol;
+ const char *sym_name;
+ const char *ptr_name;
+
+ /* APPLE LOCAL ARM 5440570 */
+ if (!p->used || p->emitted)
+ return 1;
+
+ symbol = p->symbol;
+ sym_name = XSTR (symbol, 0);
+ ptr_name = p->ptr_name;
+
+ if (p->stub_p)
+ {
+ char *sym;
+ char *stub;
+ tree id;
+
+ id = maybe_get_identifier (sym_name);
+ if (id)
+ {
+ tree id_orig = id;
+
+ while (IDENTIFIER_TRANSPARENT_ALIAS (id))
+ id = TREE_CHAIN (id);
+ if (id != id_orig)
+ sym_name = IDENTIFIER_POINTER (id);
+ }
+
+ sym = alloca (strlen (sym_name) + 2);
+ if (sym_name[0] == '*' || sym_name[0] == '&')
+ strcpy (sym, sym_name + 1);
+ else if (sym_name[0] == '-' || sym_name[0] == '+')
+ strcpy (sym, sym_name);
+ else
+ sprintf (sym, "%s%s", user_label_prefix, sym_name);
+
+ stub = alloca (strlen (ptr_name) + 2);
+ if (ptr_name[0] == '*' || ptr_name[0] == '&')
+ strcpy (stub, ptr_name + 1);
+ else
+ sprintf (stub, "%s%s", user_label_prefix, ptr_name);
+
+ machopic_output_stub (asm_out_file, sym, stub);
+ }
+ else if (! indirect_data (symbol)
+ && (machopic_symbol_defined_p (symbol)
+ || SYMBOL_REF_LOCAL_P (symbol)))
+ {
+ switch_to_section (data_section);
+ assemble_align (GET_MODE_ALIGNMENT (Pmode));
+ assemble_label (ptr_name);
+ assemble_integer (gen_rtx_SYMBOL_REF (Pmode, sym_name),
+ GET_MODE_SIZE (Pmode),
+ GET_MODE_ALIGNMENT (Pmode), 1);
+ }
+ else
+ {
+ rtx init = const0_rtx;
+
+ switch_to_section (darwin_sections[machopic_nl_symbol_ptr_section]);
+ assemble_name (asm_out_file, ptr_name);
+ fprintf (asm_out_file, ":\n");
+
+ fprintf (asm_out_file, "\t.indirect_symbol ");
+ assemble_name (asm_out_file, sym_name);
+ fprintf (asm_out_file, "\n");
+
+ /* Variables that are marked with MACHO_SYMBOL_STATIC need to
+ have their symbol name instead of 0 in the second entry of
+ the non-lazy symbol pointer data structure when they are
+ defined. This allows the runtime to rebind newer instances
+ of the translation unit with the original instance of the
+ symbol. */
+
+ if ((SYMBOL_REF_FLAGS (symbol) & MACHO_SYMBOL_STATIC)
+ && machopic_symbol_defined_p (symbol))
+ init = gen_rtx_SYMBOL_REF (Pmode, sym_name);
+
+ assemble_integer (init, GET_MODE_SIZE (Pmode),
+ GET_MODE_ALIGNMENT (Pmode), 1);
+ }
+ /* APPLE LOCAL ARM 5440570 */
+ p->emitted = true;
+
+ return 1;
+}
+
+void
+machopic_finish (FILE *asm_out_file)
+{
+ if (machopic_indirections)
+ /* APPLE LOCAL begin 5440570 */
+ do
+ {
+ indirection_uses_changed = false;
+ htab_traverse_noresize (machopic_indirections,
+ machopic_output_indirection,
+ asm_out_file);
+ }
+ while (indirection_uses_changed == true);
+ /* APPLE LOCAL end 5440570 */
+}
+
+int
+machopic_operand_p (rtx op)
+{
+ if (MACHOPIC_JUST_INDIRECT)
+ {
+ while (GET_CODE (op) == CONST)
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return machopic_symbol_defined_p (op);
+ else
+ return 0;
+ }
+
+ while (GET_CODE (op) == CONST)
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == MINUS
+ && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (op, 1)) == SYMBOL_REF
+ && machopic_symbol_defined_p (XEXP (op, 0))
+ && machopic_symbol_defined_p (XEXP (op, 1)))
+ return 1;
+
+ return 0;
+}
+
+/* This function records whether a given name corresponds to a defined
+ or undefined function or variable, for machopic_classify_ident to
+ use later. */
+
+void
+darwin_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
+{
+ rtx sym_ref;
+
+ /* Do the standard encoding things first. */
+ default_encode_section_info (decl, rtl, first);
+
+ if (TREE_CODE (decl) != FUNCTION_DECL && TREE_CODE (decl) != VAR_DECL)
+ return;
+
+ sym_ref = XEXP (rtl, 0);
+ if (TREE_CODE (decl) == VAR_DECL)
+ SYMBOL_REF_FLAGS (sym_ref) |= MACHO_SYMBOL_FLAG_VARIABLE;
+
+ if (!DECL_EXTERNAL (decl)
+ && (!TREE_PUBLIC (decl) || !DECL_WEAK (decl))
+ && ! lookup_attribute ("weakref", DECL_ATTRIBUTES (decl))
+ && ((TREE_STATIC (decl)
+ && (!DECL_COMMON (decl) || !TREE_PUBLIC (decl)))
+ || (!DECL_COMMON (decl) && DECL_INITIAL (decl)
+ && DECL_INITIAL (decl) != error_mark_node)))
+ SYMBOL_REF_FLAGS (sym_ref) |= MACHO_SYMBOL_FLAG_DEFINED;
+
+ if (! TREE_PUBLIC (decl))
+ SYMBOL_REF_FLAGS (sym_ref) |= MACHO_SYMBOL_STATIC;
+
+ /* APPLE LOCAL begin fix OBJC codegen */
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (strncmp (XSTR (sym_ref, 0), "_OBJC_", 6) == 0)
+ SYMBOL_REF_FLAGS (sym_ref) |= MACHO_SYMBOL_FLAG_DEFINED;
+ }
+ /* APPLE LOCAL end fix OBJC codegen */
+}
+
+void
+darwin_mark_decl_preserved (const char *name)
+{
+ fprintf (asm_out_file, ".no_dead_strip ");
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+}
+
+int
+machopic_reloc_rw_mask (void)
+{
+ return MACHOPIC_INDIRECT ? 3 : 0;
+}
+
+/* APPLE LOCAL begin radar 5575115, 6255595 */
+/* This routine returns TRUE if EXP is a variable representing
+ on objective C meta data. */
+static inline bool
+objc_internal_variable_name (tree exp)
+{
+ if (TREE_CODE (exp) == VAR_DECL)
+ {
+ tree decl_name = DECL_NAME (exp);
+ if (decl_name && TREE_CODE (decl_name) == IDENTIFIER_NODE
+ && IDENTIFIER_POINTER (decl_name))
+ {
+ const char* name = IDENTIFIER_POINTER (decl_name);
+ return
+ (!strncmp (name, "_OBJC_", 6)
+ || !strncmp (name, "OBJC_", 5)
+ || !strncmp (name, "l_OBJC_", 7)
+ || !strncmp (name, "l_objc_", 7));
+ }
+ }
+ return false;
+}
+/* APPLE LOCAL end radar 5575115, 6255595 */
+
+section *
+machopic_select_section (tree exp, int reloc,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ section *base_section;
+ bool weak_p = (DECL_P (exp) && DECL_WEAK (exp)
+ && (lookup_attribute ("weak", DECL_ATTRIBUTES (exp))
+ || ! lookup_attribute ("weak_import",
+ DECL_ATTRIBUTES (exp))));
+
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ {
+ if (reloc == 1)
+ base_section = (weak_p
+ ? darwin_sections[text_unlikely_coal_section]
+ : unlikely_text_section ());
+ else
+ base_section = weak_p ? darwin_sections[text_coal_section] : text_section;
+ }
+ else if (decl_readonly_section (exp, reloc))
+ base_section = weak_p ? darwin_sections[const_coal_section] : darwin_sections[const_section];
+ else if (TREE_READONLY (exp) || TREE_CONSTANT (exp))
+ base_section = weak_p ? darwin_sections[const_data_coal_section] : darwin_sections[const_data_section];
+ else
+ base_section = weak_p ? darwin_sections[data_coal_section] : data_section;
+
+ /* APPLE LOCAL begin fwritable strings */
+ if (TREE_CODE (exp) == STRING_CST
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* deletion */
+ /* Copied from varasm.c:output_constant_def_contents(). 5346453 */
+ && (MAX ((HOST_WIDE_INT)TREE_STRING_LENGTH (exp),
+ int_size_in_bytes (TREE_TYPE (exp)))
+ /* APPLE LOCAL ARM signedness mismatch */
+ == (HOST_WIDE_INT) strlen (TREE_STRING_POINTER (exp)) + 1)
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ && ! flag_writable_strings)
+ return darwin_sections[cstring_section];
+ /* APPLE LOCAL end fwritable strings, 5346453 */
+ else if ((TREE_CODE (exp) == INTEGER_CST || TREE_CODE (exp) == REAL_CST)
+ && flag_merge_constants)
+ {
+ tree size = TYPE_SIZE_UNIT (TREE_TYPE (exp));
+
+ if (TREE_CODE (size) == INTEGER_CST &&
+ TREE_INT_CST_LOW (size) == 4 &&
+ TREE_INT_CST_HIGH (size) == 0)
+ return darwin_sections[literal4_section];
+ else if (TREE_CODE (size) == INTEGER_CST &&
+ TREE_INT_CST_LOW (size) == 8 &&
+ TREE_INT_CST_HIGH (size) == 0)
+ return darwin_sections[literal8_section];
+ /* APPLE LOCAL begin mainline x86_64 literal16 */
+#ifndef HAVE_GAS_LITERAL16
+#define HAVE_GAS_LITERAL16 0
+#endif
+ else if (HAVE_GAS_LITERAL16
+ && TARGET_64BIT
+ /* APPLE LOCAL end mainline x86_64 literal16 */
+ && TREE_CODE (size) == INTEGER_CST
+ && TREE_INT_CST_LOW (size) == 16
+ && TREE_INT_CST_HIGH (size) == 0)
+ return darwin_sections[literal16_section];
+ else
+ return base_section;
+ }
+ else if (TREE_CODE (exp) == CONSTRUCTOR
+ && TREE_TYPE (exp)
+ && TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
+ && TYPE_NAME (TREE_TYPE (exp)))
+ {
+ /* APPLE LOCAL constant strings */
+ extern int flag_next_runtime;
+ tree name = TYPE_NAME (TREE_TYPE (exp));
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+
+ if (!strcmp (IDENTIFIER_POINTER (name), "__builtin_ObjCString"))
+ {
+ /* APPLE LOCAL begin radar 4792158 */
+ if (flag_next_runtime)
+ {
+ if (flag_objc_abi == 2)
+ return darwin_sections[objc_v2_constant_string_object_section];
+ else
+ return darwin_sections[objc_constant_string_object_section];
+ }
+ /* APPLE LOCAL end radar 4792158 */
+ else
+ return darwin_sections[objc_string_object_section];
+ }
+ /* APPLE LOCAL begin constant strings */
+ else if (!strcmp (IDENTIFIER_POINTER (name), "__builtin_CFString"))
+ return darwin_sections[cfstring_constant_object_section];
+ /* APPLE LOCAL end constant strings */
+ else
+ return base_section;
+ }
+ /* APPLE LOCAL begin radar 5575115, 6255595 */
+ else if (objc_internal_variable_name (exp))
+ /* APPLE LOCAL end radar 5575115, 6255595 */
+ {
+ const char *name = IDENTIFIER_POINTER (DECL_NAME (exp));
+ /* APPLE LOCAL begin radar 4792158 */
+ if (flag_objc_abi == 1)
+ {
+ if (!strncmp (name, "_OBJC_CLASS_METHODS_", 20))
+ return darwin_sections[objc_cls_meth_section];
+ else if (!strncmp (name, "_OBJC_INSTANCE_METHODS_", 23))
+ return darwin_sections[objc_inst_meth_section];
+ else if (!strncmp (name, "_OBJC_CATEGORY_CLASS_METHODS_", 29))
+ return darwin_sections[objc_cat_cls_meth_section];
+ else if (!strncmp (name, "_OBJC_CATEGORY_INSTANCE_METHODS_", 32))
+ return darwin_sections[objc_cat_inst_meth_section];
+ else if (!strncmp (name, "_OBJC_CLASS_VARIABLES_", 22))
+ return darwin_sections[objc_class_vars_section];
+ else if (!strncmp (name, "_OBJC_INSTANCE_VARIABLES_", 25))
+ return darwin_sections[objc_instance_vars_section];
+ else if (!strncmp (name, "_OBJC_CLASS_PROTOCOLS_", 22))
+ return darwin_sections[objc_cat_cls_meth_section];
+ else if (!strncmp (name, "_OBJC_CLASS_NAME_", 17))
+ return darwin_sections[objc_class_names_section];
+ else if (!strncmp (name, "_OBJC_METH_VAR_NAME_", 20))
+ return darwin_sections[objc_meth_var_names_section];
+ else if (!strncmp (name, "_OBJC_METH_VAR_TYPE_", 20))
+ return darwin_sections[objc_meth_var_types_section];
+ else if (!strncmp (name, "_OBJC_CLASS_REFERENCES", 22))
+ return darwin_sections[objc_cls_refs_section];
+ else if (!strncmp (name, "_OBJC_CLASS_", 12))
+ return darwin_sections[objc_class_section];
+ else if (!strncmp (name, "_OBJC_METACLASS_", 16))
+ return darwin_sections[objc_meta_class_section];
+ else if (!strncmp (name, "_OBJC_CATEGORY_", 15))
+ return darwin_sections[objc_category_section];
+ else if (!strncmp (name, "_OBJC_SELECTOR_REFERENCES", 25))
+ return darwin_sections[objc_selector_refs_section];
+ else if (!strncmp (name, "_OBJC_SELECTOR_FIXUP", 20))
+ return darwin_sections[objc_selector_fixup_section];
+ else if (!strncmp (name, "_OBJC_SYMBOLS", 13))
+ return darwin_sections[objc_symbols_section];
+ else if (!strncmp (name, "_OBJC_MODULES", 13))
+ return darwin_sections[objc_module_info_section];
+ else if (!strncmp (name, "_OBJC_IMAGE_INFO", 16))
+ return darwin_sections[objc_image_info_section];
+ else if (!strncmp (name, "_OBJC_PROTOCOL_INSTANCE_METHODS_", 32))
+ return darwin_sections[objc_cat_inst_meth_section];
+ else if (!strncmp (name, "_OBJC_PROTOCOL_CLASS_METHODS_", 29))
+ return darwin_sections[objc_cat_cls_meth_section];
+ else if (!strncmp (name, "_OBJC_PROTOCOL_REFS_", 20))
+ return darwin_sections[objc_cat_cls_meth_section];
+ else if (!strncmp (name, "_OBJC_PROTOCOL_", 15))
+ return darwin_sections[objc_protocol_section];
+ else if (!strncmp (name, "_OBJC_CLASSEXT_", 15))
+ return darwin_sections[objc_class_ext_section];
+ else if (!strncmp (name, "_OBJC_$_PROP_LIST", 17)
+ || !strncmp (name, "_OBJC_$_PROP_PROTO", 18))
+ return darwin_sections[objc_prop_list_section];
+ else if (!strncmp (name, "_OBJC_PROTOCOLEXT", 17))
+ return darwin_sections[objc_protocol_ext_section];
+ else if (!strncmp (name, "_OBJC_PROP_NAME_ATTR_", 21))
+ return darwin_sections[cstring_section];
+ else
+ return base_section;
+ }
+ else /* flag_objc_abi == 2 */
+ {
+ if (!strncmp (name, "_OBJC_PROP_NAME_ATTR_", 21)
+ || !strncmp (name, "_OBJC_CLASS_NAME_", 17)
+ || !strncmp (name, "_OBJC_METH_VAR_NAME_", 20)
+ || !strncmp (name, "_OBJC_METH_VAR_TYPE_", 20))
+ return darwin_sections[cstring_section];
+ else if (!strncmp (name, "_OBJC_CLASSLIST_REFERENCES_", 27))
+ return darwin_sections[objc_v2_classrefs_section];
+ else if (!strncmp (name, "_OBJC_CLASSLIST_SUP_REFS_", 25))
+ return darwin_sections[objc_v2_super_classrefs_section];
+ /* APPLE LOCAL radar 5575115 - radar 6252174 */
+ else if (!strncmp (name, "l_objc_msgSend", 14))
+ return darwin_sections[objc_v2_message_refs_section];
+ else if (!strncmp (name, "_OBJC_LABEL_CLASS_", 18))
+ return darwin_sections[objc_v2_classlist_section];
+ /* APPLE LOCAL radar 6351990 */
+ else if (!strncmp (name, "l_OBJC_LABEL_PROTOCOL_", 22))
+ return darwin_sections[objc_v2_protocollist_section];
+ else if (!strncmp (name, "_OBJC_LABEL_CATEGORY_", 21))
+ return darwin_sections[objc_v2_categorylist_section];
+ else if (!strncmp (name, "_OBJC_LABEL_NONLAZY_CLASS_", 26))
+ return darwin_sections[objc_v2_nonlazy_class_section];
+ else if (!strncmp (name, "_OBJC_LABEL_NONLAZY_CATEGORY_", 29))
+ return darwin_sections[objc_v2_nonlazy_category_section];
+ /* APPLE LOCAL radar 6351990 */
+ else if (!strncmp (name, "l_OBJC_PROTOCOL_REFERENCE_", 26))
+ return darwin_sections[objc_v2_protocolrefs_section];
+ else if (!strncmp (name, "_OBJC_SELECTOR_REFERENCES", 25))
+ return darwin_sections[objc_v2_selector_refs_section];
+ else if (!strncmp (name, "_OBJC_IMAGE_INFO", 16))
+ return darwin_sections[objc_v2_image_info_section];
+ /* APPLE LOCAL begin radar 6255595 */
+ else if (!strncmp (name, "OBJC_CLASS_$_", 13)
+ || !strncmp (name, "OBJC_METACLASS_$_", 17))
+ return darwin_sections[objc_v2_classdefs_section];
+ else
+ return (base_section == data_section) ?
+ darwin_sections[objc_v2_metadata_section] : base_section;
+ /* APPLE LOCAL end radar 6255595 */
+ }
+ /* APPLE LOCAL end radar 4792158 */
+ }
+ /* APPLE LOCAL coalescing */
+ /* Removed special handling of '::operator new' and '::operator delete'. */
+ /* APPLE LOCAL begin darwin_set_section_for_var_p */
+ else
+ return darwin_set_section_for_var_p (exp, reloc, align, base_section);
+ /* APPLE LOCAL end darwin_set_section_for_var_p */
+}
+
+/* This can be called with address expressions as "rtx".
+ They must go in "const". */
+
+section *
+machopic_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ if (GET_MODE_SIZE (mode) == 8
+ && (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE))
+ return darwin_sections[literal8_section];
+ else if (GET_MODE_SIZE (mode) == 4
+ && (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE))
+ return darwin_sections[literal4_section];
+ /* APPLE LOCAL begin mainline x86_64 literal16 */
+ else if (HAVE_GAS_LITERAL16
+ && TARGET_64BIT
+ /* APPLE LOCAL end mainline x86_64 literal16 */
+ && GET_MODE_SIZE (mode) == 16
+ && (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE
+ || GET_CODE (x) == CONST_VECTOR))
+ return darwin_sections[literal16_section];
+ else if (MACHOPIC_INDIRECT
+ && (GET_CODE (x) == SYMBOL_REF
+ || GET_CODE (x) == CONST
+ || GET_CODE (x) == LABEL_REF))
+ return darwin_sections[const_data_section];
+ else
+ return darwin_sections[const_section];
+}
+
+void
+machopic_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+ if (MACHOPIC_INDIRECT)
+ switch_to_section (darwin_sections[mod_init_section]);
+ else
+ switch_to_section (darwin_sections[constructor_section]);
+ assemble_align (POINTER_SIZE);
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+
+ if (! MACHOPIC_INDIRECT)
+ fprintf (asm_out_file, ".reference .constructors_used\n");
+}
+
+void
+machopic_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
+{
+ if (MACHOPIC_INDIRECT)
+ switch_to_section (darwin_sections[mod_term_section]);
+ else
+ switch_to_section (darwin_sections[destructor_section]);
+ assemble_align (POINTER_SIZE);
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+
+ if (! MACHOPIC_INDIRECT)
+ fprintf (asm_out_file, ".reference .destructors_used\n");
+}
+
+void
+darwin_globalize_label (FILE *stream, const char *name)
+{
+ if (!!strncmp (name, "_OBJC_", 6))
+ default_globalize_label (stream, name);
+}
+
+/* APPLE LOCAL begin assembly "abort" directive */
+/* This can be called instead of EXIT. It will emit a '.abort' directive
+ into any existing assembly file, causing assembly to immediately abort,
+ thus preventing the assembler from spewing out numerous, irrelevant
+ error messages. */
+
+void
+abort_assembly_and_exit (int status)
+{
+ /* If we're aborting, get the assembler to abort, too. */
+ if (status == FATAL_EXIT_CODE && asm_out_file != 0)
+ fprintf (asm_out_file, "\n.abort\n");
+
+ exit (status);
+}
+/* APPLE LOCAL end assembly "abort" directive */
+
+/* APPLE LOCAL begin ObjC GC */
+tree
+darwin_handle_objc_gc_attribute (tree *node,
+ tree name,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree orig = *node, type;
+
+ /* Propagate GC-ness to the innermost pointee. */
+ while (POINTER_TYPE_P (orig)
+ || TREE_CODE (orig) == FUNCTION_TYPE
+ || TREE_CODE (orig) == METHOD_TYPE
+ || TREE_CODE (orig) == ARRAY_TYPE)
+ orig = TREE_TYPE (orig);
+
+ type = build_type_attribute_variant (orig,
+ tree_cons (name, args,
+ TYPE_ATTRIBUTES (orig)));
+
+ /* For some reason, build_type_attribute_variant() creates a distinct
+ type instead of a true variant! We make up for this here. */
+ /* APPLE LOCAL begin radar 4600999 */
+ /* The main variant must be preserved no matter what. What ever
+ main variant comes out of the call to build_type_attribute_variant
+ is bogus here. */
+ if (TYPE_MAIN_VARIANT (orig) != TYPE_MAIN_VARIANT (type))
+ {
+ TYPE_MAIN_VARIANT (type) = TYPE_MAIN_VARIANT (orig);
+ /* APPLE LOCAL end radar 4600999 */
+ TYPE_NEXT_VARIANT (type) = TYPE_NEXT_VARIANT (orig);
+ TYPE_NEXT_VARIANT (orig) = type;
+ }
+
+ *node = reconstruct_complex_type (*node, type);
+ /* No need to hang on to the attribute any longer. */
+ *no_add_attrs = true;
+
+ return NULL_TREE;
+}
+/* APPLE LOCAL end ObjC GC */
+
+/* APPLE LOCAL begin radar 5595352 */
+tree
+darwin_handle_nsobject_attribute (tree *node,
+ tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree orig = *node, type;
+ if (!POINTER_TYPE_P (orig) || TREE_CODE (TREE_TYPE (orig)) != RECORD_TYPE)
+ {
+ error ("__attribute ((NSObject)) is for pointer types only");
+ return NULL_TREE;
+ }
+ type = build_type_attribute_variant (orig,
+ tree_cons (name, NULL_TREE,
+ TYPE_ATTRIBUTES (orig)));
+ /* The main variant must be preserved no matter what. What ever
+ main variant comes out of the call to build_type_attribute_variant
+ is bogus here. */
+ if (TYPE_MAIN_VARIANT (orig) != TYPE_MAIN_VARIANT (type))
+ {
+ TYPE_MAIN_VARIANT (type) = TYPE_MAIN_VARIANT (orig);
+ TYPE_NEXT_VARIANT (type) = TYPE_NEXT_VARIANT (orig);
+ TYPE_NEXT_VARIANT (orig) = type;
+ }
+
+ *node = type;
+ /* No need to hang on to the attribute any longer. */
+ *no_add_attrs = true;
+ return NULL_TREE;
+}
+/* APPLE LOCAL end radar 5595352 */
+
+/* APPLE LOCAL begin darwin_set_section_for_var_p 20020226 --turly */
+
+/* This is specifically for any initialised static class constants
+ which may be output by the C++ front end at the end of compilation.
+ SELECT_SECTION () macro won't do because these are VAR_DECLs, not
+ STRING_CSTs or INTEGER_CSTs. And by putting 'em in appropriate
+ sections, we save space.
+
+ FIXME: does this really do anything? Won't the DECL_WEAK test be
+ true 99% (or 100%) of the time? In the other 1% of the time,
+ shouldn't select_section be fixed instead of this hackery? */
+
+section*
+darwin_set_section_for_var_p (tree exp, int reloc, int align, section* base_section)
+{
+ if (!reloc && TREE_CODE (exp) == VAR_DECL
+ && DECL_ALIGN (exp) == align
+ && TREE_READONLY (exp) && DECL_INITIAL (exp)
+ && ! DECL_WEAK (exp))
+ {
+ /* Put constant string vars in ".cstring" section. */
+
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (exp))) == INTEGER_TYPE
+ && integer_onep (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (exp))))
+ && TREE_CODE (DECL_INITIAL (exp)) == STRING_CST)
+ {
+
+ /* Compare string length with actual number of characters
+ the compiler will write out (which is not necessarily
+ TREE_STRING_LENGTH, in the case of a constant array of
+ characters that is not null-terminated). Select appropriate
+ section accordingly. */
+
+ if (MIN ( TREE_STRING_LENGTH (DECL_INITIAL(exp)),
+ int_size_in_bytes (TREE_TYPE (exp)))
+ == (long) strlen (TREE_STRING_POINTER (DECL_INITIAL (exp))) + 1)
+ return darwin_sections[cstring_section];
+ else
+ return darwin_sections[const_section];
+ }
+ else
+ if (TREE_READONLY (exp)
+ && ((TREE_CODE (TREE_TYPE (exp)) == INTEGER_TYPE
+ && TREE_CODE (DECL_INITIAL (exp)) == INTEGER_CST)
+ || (TREE_CODE (TREE_TYPE (exp)) == REAL_TYPE
+ && TREE_CODE (DECL_INITIAL (exp)) == REAL_CST))
+ && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (DECL_INITIAL (exp))))
+ == INTEGER_CST)
+ {
+ tree size = TYPE_SIZE_UNIT (TREE_TYPE (DECL_INITIAL (exp)));
+ if (TREE_INT_CST_HIGH (size) != 0)
+ return base_section;
+
+ /* Put integer and float consts in the literal4|8|16 sections. */
+
+ if (TREE_INT_CST_LOW (size) == 4)
+ return darwin_sections[literal4_section];
+ else if (TREE_INT_CST_LOW (size) == 8)
+ return darwin_sections[literal8_section];
+ else if (HAVE_GAS_LITERAL16
+ && TARGET_64BIT
+ && TREE_INT_CST_LOW (size) == 16)
+ return darwin_sections[literal16_section];
+ }
+ }
+
+ return base_section;
+}
+/* APPLE LOCAL end darwin_set_section_for_var_p 20020226 --turly */
+
+void
+darwin_asm_named_section (const char *name,
+ unsigned int flags ATTRIBUTE_UNUSED,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ fprintf (asm_out_file, "\t.section %s\n", name);
+}
+
+void
+darwin_unique_section (tree decl ATTRIBUTE_UNUSED, int reloc ATTRIBUTE_UNUSED)
+{
+ /* Darwin does not use unique sections. */
+}
+
+/* Handle __attribute__ ((apple_kext_compatibility)).
+ This only applies to darwin kexts for 2.95 compatibility -- it shrinks the
+ vtable for classes with this attribute (and their descendants) by not
+ outputting the new 3.0 nondeleting destructor. This means that such
+ objects CANNOT be allocated on the stack or as globals UNLESS they have
+ a completely empty `operator delete'.
+ Luckily, this fits in with the Darwin kext model.
+
+ This attribute also disables gcc3's potential overlaying of derived
+ class data members on the padding at the end of the base class. */
+
+tree
+darwin_handle_kext_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ /* APPLE KEXT stuff -- only applies with pure static C++ code. */
+ if (! TARGET_KEXTABI)
+ {
+ warning (0, "%<%s%> 2.95 vtable-compatability attribute applies "
+ "only when compiling a kext", IDENTIFIER_POINTER (name));
+
+ *no_add_attrs = true;
+ }
+ else if (TREE_CODE (*node) != RECORD_TYPE)
+ {
+ warning (0, "%<%s%> 2.95 vtable-compatability attribute applies "
+ "only to C++ classes", IDENTIFIER_POINTER (name));
+
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* APPLE LOCAL begin radar 4733555 */
+/* Ick, this probably will cause other languages to die. */
+extern bool objc_method_decl (enum tree_code ARG_UNUSED (opcode));
+ /* APPLE LOCAL end radar 4733555 */
+
+/* Handle a "weak_import" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+tree
+darwin_handle_weak_import_attribute (tree *node, tree name,
+ tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags),
+ bool * no_add_attrs)
+{
+ /* APPLE LOCAL begin radar 4733555 */
+ /* The compiler should silently ignore weak_import when specified on a method. All
+ Objective-C methods are "weak" in the sense that the availability macros want. */
+ if (objc_method_decl (TREE_CODE (*node)))
+ return NULL_TREE;
+ /* APPLE LOCAL end radar 4733555 */
+ /* APPLE LOCAL begin weak_import on property 6676828 */
+ if (in_objc_property_decl_context ())
+ {
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+ /* APPLE LOCAL end weak_import on property 6676828 */
+ if (TREE_CODE (*node) != FUNCTION_DECL && TREE_CODE (*node) != VAR_DECL)
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else
+ declare_weak (*node);
+
+ return NULL_TREE;
+}
+
+/* APPLE LOCAL begin for-fsf-4_4 5480287 */ \
+/* APPLE LOCAL end for-fsf-4_4 5480287 */ \
+/* Emit a label for an FDE, making it global and/or weak if appropriate.
+ The third parameter is nonzero if this is for exception handling.
+ The fourth parameter is nonzero if this is just a placeholder for an
+ FDE that we are omitting. */
+
+void
+darwin_emit_unwind_label (FILE *file, tree decl, int for_eh, int empty)
+{
+/* APPLE LOCAL begin for-fsf-4_4 5480287 */ \
+ char *lab;
+
+ if (! for_eh)
+ return;
+
+ lab = concat (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), ".eh", NULL);
+
+ if (TREE_PUBLIC (decl))
+ {
+ targetm.asm_out.globalize_label (file, lab);
+ if (DECL_VISIBILITY (decl) == VISIBILITY_HIDDEN)
+ {
+ fputs ("\t.private_extern ", file);
+ assemble_name (file, lab);
+ fputc ('\n', file);
+ }
+ }
+
+ if (DECL_WEAK (decl))
+ {
+ fputs ("\t.weak_definition ", file);
+ assemble_name (file, lab);
+ fputc ('\n', file);
+ }
+
+ assemble_name (file, lab);
+ if (empty)
+ {
+ fputs (" = 0\n", file);
+
+ /* Mark the absolute .eh and .eh1 style labels as needed to
+ ensure that we don't dead code strip them and keep such
+ labels from another instantiation point until we can fix this
+ properly with group comdat support. */
+ darwin_mark_decl_preserved (lab);
+ }
+ else
+ fputs (":\n", file);
+
+/* APPLE LOCAL end for-fsf-4_4 5480287 */ \
+ free (lab);
+}
+
+static GTY(()) unsigned long except_table_label_num;
+
+void
+darwin_emit_except_table_label (FILE *file)
+{
+ char section_start_label[30];
+
+ ASM_GENERATE_INTERNAL_LABEL (section_start_label, "GCC_except_table",
+ except_table_label_num++);
+ ASM_OUTPUT_LABEL (file, section_start_label);
+}
+/* Generate a PC-relative reference to a Mach-O non-lazy-symbol. */
+
+void
+darwin_non_lazy_pcrel (FILE *file, rtx addr)
+{
+ const char *nlp_name;
+
+ gcc_assert (GET_CODE (addr) == SYMBOL_REF);
+
+ nlp_name = machopic_indirection_name (addr, /*stub_p=*/false);
+ fputs ("\t.long\t", file);
+ ASM_OUTPUT_LABELREF (file, nlp_name);
+ fputs ("-.", file);
+}
+
+/* Emit an assembler directive to set visibility for a symbol. The
+ only supported visibilities are VISIBILITY_DEFAULT and
+ VISIBILITY_HIDDEN; the latter corresponds to Darwin's "private
+ extern". There is no MACH-O equivalent of ELF's
+ VISIBILITY_INTERNAL or VISIBILITY_PROTECTED. */
+
+void
+darwin_assemble_visibility (tree decl, int vis)
+{
+ if (vis == VISIBILITY_DEFAULT)
+ ;
+ else if (vis == VISIBILITY_HIDDEN)
+ {
+ fputs ("\t.private_extern ", asm_out_file);
+ assemble_name (asm_out_file,
+ (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
+ fputs ("\n", asm_out_file);
+ }
+ else
+ warning (OPT_Wattributes, "internal and protected visibility attributes "
+ "not supported in this configuration; ignored");
+}
+
+/* Output a difference of two labels that will be an assembly time
+ constant if the two labels are local. (.long lab1-lab2 will be
+ very different if lab1 is at the boundary between two sections; it
+ will be relocated according to the second section, not the first,
+ so one ends up with a difference between labels in different
+ sections, which is bad in the dwarf2 eh context for instance.) */
+
+static int darwin_dwarf_label_counter;
+
+void
+darwin_asm_output_dwarf_delta (FILE *file, int size,
+ const char *lab1, const char *lab2)
+{
+ int islocaldiff = (lab1[0] == '*' && lab1[1] == 'L'
+ && lab2[0] == '*' && lab2[1] == 'L');
+ const char *directive = (size == 8 ? ".quad" : ".long");
+
+ if (islocaldiff)
+ fprintf (file, "\t.set L$set$%d,", darwin_dwarf_label_counter);
+ else
+ fprintf (file, "\t%s\t", directive);
+ assemble_name_raw (file, lab1);
+ fprintf (file, "-");
+ assemble_name_raw (file, lab2);
+ if (islocaldiff)
+ fprintf (file, "\n\t%s L$set$%d", directive, darwin_dwarf_label_counter++);
+}
+
+/* Output labels for the start of the DWARF sections if necessary. */
+void
+darwin_file_start (void)
+{
+ if (write_symbols == DWARF2_DEBUG)
+ {
+ static const char * const debugnames[] =
+ {
+ DEBUG_FRAME_SECTION,
+ DEBUG_INFO_SECTION,
+ DEBUG_ABBREV_SECTION,
+ DEBUG_ARANGES_SECTION,
+ DEBUG_MACINFO_SECTION,
+ DEBUG_LINE_SECTION,
+ DEBUG_LOC_SECTION,
+ DEBUG_PUBNAMES_SECTION,
+ /* APPLE LOCAL begin pubtypes, approved for 4.3 4535968 */
+ DEBUG_PUBTYPES_SECTION,
+ /* APPLE LOCAL end pubtypes, approved for 4.3 4535968 */
+ /* APPLE LOCAL radar 6275985 debug inlined section */
+ DEBUG_INLINED_SECTION,
+ DEBUG_STR_SECTION,
+ DEBUG_RANGES_SECTION
+ };
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE (debugnames); i++)
+ {
+ int namelen;
+
+ switch_to_section (get_section (debugnames[i], SECTION_DEBUG, NULL));
+
+ gcc_assert (strncmp (debugnames[i], "__DWARF,", 8) == 0);
+ gcc_assert (strchr (debugnames[i] + 8, ','));
+
+ namelen = strchr (debugnames[i] + 8, ',') - (debugnames[i] + 8);
+ fprintf (asm_out_file, "Lsection%.*s:\n", namelen, debugnames[i] + 8);
+ }
+ }
+}
+
+/* Output an offset in a DWARF section on Darwin. On Darwin, DWARF section
+ offsets are not represented using relocs in .o files; either the
+ section never leaves the .o file, or the linker or other tool is
+ responsible for parsing the DWARF and updating the offsets. */
+
+void
+darwin_asm_output_dwarf_offset (FILE *file, int size, const char * lab,
+ section *base)
+{
+ char sname[64];
+ int namelen;
+
+ gcc_assert (base->common.flags & SECTION_NAMED);
+ gcc_assert (strncmp (base->named.name, "__DWARF,", 8) == 0);
+ gcc_assert (strchr (base->named.name + 8, ','));
+
+ namelen = strchr (base->named.name + 8, ',') - (base->named.name + 8);
+ sprintf (sname, "*Lsection%.*s", namelen, base->named.name + 8);
+ darwin_asm_output_dwarf_delta (file, size, lab, sname);
+}
+
+void
+darwin_file_end (void)
+{
+ machopic_finish (asm_out_file);
+ /* APPLE LOCAL constant cfstrings */
+ if (darwin_running_cxx)
+ {
+ switch_to_section (darwin_sections[constructor_section]);
+ switch_to_section (darwin_sections[destructor_section]);
+ ASM_OUTPUT_ALIGN (asm_out_file, 1);
+ }
+ /* APPLE LOCAL begin CW asm blocks */
+ if (! has_alternative_entry_points ())
+ fprintf (asm_out_file, "\t.subsections_via_symbols\n");
+ /* APPLE LOCAL end CW asm blocks */
+}
+
+/* APPLE LOCAL KEXT treat vtables as overridable */
+#define DARWIN_VTABLE_P(DECL) lang_hooks.vtable_p (DECL)
+
+/* Cross-module name binding. Darwin does not support overriding
+ functions at dynamic-link time, except for vtables in kexts. */
+
+bool
+darwin_binds_local_p (tree decl)
+{
+ return default_binds_local_p_1 (decl,
+ TARGET_KEXTABI && DARWIN_VTABLE_P (decl));
+}
+
+/* APPLE LOCAL begin constant cfstrings */
+int darwin_running_cxx;
+
+static GTY(()) tree cfstring_class_reference = NULL_TREE;
+static GTY(()) tree cfstring_type_node = NULL_TREE;
+static GTY(()) tree ccfstring_type_node = NULL_TREE;
+static GTY(()) tree pccfstring_type_node = NULL_TREE;
+static GTY(()) tree pcint_type_node = NULL_TREE;
+static GTY(()) tree pcchar_type_node = NULL_TREE;
+
+/* Store all constructed constant CFStrings in a hash table so that
+ they get uniqued properly. */
+
+struct cfstring_descriptor GTY(())
+{
+ /* The literal argument . */
+ tree literal;
+
+ /* The resulting constant CFString. */
+ tree constructor;
+};
+
+static GTY((param_is (struct cfstring_descriptor))) htab_t cfstring_htab;
+
+static hashval_t cfstring_hash (const void *);
+static int cfstring_eq (const void *, const void *);
+
+void
+darwin_init_cfstring_builtins (void)
+{
+ tree field, fields, pccfstring_ftype_pcchar;
+ /* APPLE LOCAL begin 3996036 */
+ int save_warn_padded;
+ /* APPLE LOCAL end 3996036 */
+
+ /* struct __builtin_CFString {
+ const int *isa; (will point at
+ int flags; __CFConstantStringClassReference)
+ const char *str;
+ long length;
+ }; */
+
+ pcint_type_node
+ = build_pointer_type (build_qualified_type (integer_type_node,
+ TYPE_QUAL_CONST));
+ pcchar_type_node
+ = build_pointer_type (build_qualified_type (char_type_node,
+ TYPE_QUAL_CONST));
+ cfstring_type_node = (*lang_hooks.types.make_type) (RECORD_TYPE);
+ fields = build_decl (FIELD_DECL, NULL_TREE, pcint_type_node);
+ field = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
+ TREE_CHAIN (field) = fields; fields = field;
+ field = build_decl (FIELD_DECL, NULL_TREE, pcchar_type_node);
+ TREE_CHAIN (field) = fields; fields = field;
+ /* APPLE LOCAL radar 4493912 */
+ field = build_decl (FIELD_DECL, NULL_TREE, long_integer_type_node);
+ TREE_CHAIN (field) = fields; fields = field;
+ /* NB: The finish_builtin_struct() routine expects FIELD_DECLs in
+ reverse order! */
+ /* APPLE LOCAL begin 3996036 */
+ save_warn_padded = warn_padded;
+ warn_padded = 0;
+ /* APPLE LOCAL end 3996036 */
+ finish_builtin_struct (cfstring_type_node, "__builtin_CFString",
+ fields, NULL_TREE);
+ /* APPLE LOCAL begin 3996036 */
+ warn_padded = save_warn_padded;
+ /* APPLE LOCAL end 3996036 */
+
+ /* const struct __builtin_CFstring *
+ __builtin___CFStringMakeConstantString (const char *); */
+
+ ccfstring_type_node
+ = build_qualified_type (cfstring_type_node, TYPE_QUAL_CONST);
+ pccfstring_type_node
+ = build_pointer_type (ccfstring_type_node);
+ pccfstring_ftype_pcchar
+ = build_function_type_list (pccfstring_type_node,
+ pcchar_type_node, NULL_TREE);
+ lang_hooks.builtin_function ("__builtin___CFStringMakeConstantString",
+ pccfstring_ftype_pcchar,
+ DARWIN_BUILTIN_CFSTRINGMAKECONSTANTSTRING,
+ BUILT_IN_NORMAL, NULL, NULL_TREE);
+
+ /* extern int __CFConstantStringClassReference[]; */
+ cfstring_class_reference
+ = build_decl (VAR_DECL,
+ get_identifier ("__CFConstantStringClassReference"),
+ build_array_type (integer_type_node, NULL_TREE));
+ TREE_PUBLIC (cfstring_class_reference) = 1;
+ TREE_USED (cfstring_class_reference) = 1;
+ DECL_ARTIFICIAL (cfstring_class_reference) = 1;
+ (*lang_hooks.decls.pushdecl) (cfstring_class_reference);
+ DECL_EXTERNAL (cfstring_class_reference) = 1;
+ rest_of_decl_compilation (cfstring_class_reference, 0, 0);
+
+ /* Initialize the hash table used to hold the constant CFString objects. */
+ cfstring_htab = htab_create_ggc (31, cfstring_hash,
+ cfstring_eq, NULL);
+}
+
+tree
+darwin_expand_tree_builtin (tree function, tree params,
+ tree coerced_params ATTRIBUTE_UNUSED)
+{
+ unsigned int fcode = DECL_FUNCTION_CODE (function);
+
+ switch (fcode)
+ {
+ case DARWIN_BUILTIN_CFSTRINGMAKECONSTANTSTRING:
+ if (!darwin_constant_cfstrings)
+ {
+ error ("built-in function `%s' requires `-fconstant-cfstrings' flag",
+ IDENTIFIER_POINTER (DECL_NAME (function)));
+ return error_mark_node;
+ }
+
+ return darwin_build_constant_cfstring (TREE_VALUE (params));
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+static hashval_t
+cfstring_hash (const void *ptr)
+{
+ tree str = ((struct cfstring_descriptor *)ptr)->literal;
+ const unsigned char *p = (const unsigned char *) TREE_STRING_POINTER (str);
+ int i, len = TREE_STRING_LENGTH (str);
+ hashval_t h = len;
+
+ for (i = 0; i < len; i++)
+ h = ((h * 613) + p[i]);
+
+ return h;
+}
+
+static int
+cfstring_eq (const void *ptr1, const void *ptr2)
+{
+ tree str1 = ((struct cfstring_descriptor *)ptr1)->literal;
+ tree str2 = ((struct cfstring_descriptor *)ptr2)->literal;
+ int len1 = TREE_STRING_LENGTH (str1);
+
+ return (len1 == TREE_STRING_LENGTH (str2)
+ && !memcmp (TREE_STRING_POINTER (str1), TREE_STRING_POINTER (str2),
+ len1));
+}
+
+tree
+darwin_construct_objc_string (tree str)
+{
+ if (!darwin_constant_cfstrings)
+ /* APPLE LOCAL begin 4080358 */
+ {
+ /* Even though we are not using CFStrings, place our literal
+ into the cfstring_htab hash table, so that the
+ darwin_constant_cfstring_p() function below will see it. */
+ struct cfstring_descriptor key;
+ void **loc;
+
+ key.literal = str;
+ loc = htab_find_slot (cfstring_htab, &key, INSERT);
+
+ if (!*loc)
+ {
+ /* APPLE LOCAL radar 4563012 */
+ *loc = ggc_alloc_cleared (sizeof (struct cfstring_descriptor));
+ ((struct cfstring_descriptor *)*loc)->literal = str;
+ }
+
+ return NULL_TREE; /* Fall back to NSConstantString. */
+ }
+
+ /* APPLE LOCAL end 4080358 */
+ return darwin_build_constant_cfstring (str);
+}
+
+bool
+darwin_constant_cfstring_p (tree str)
+{
+ struct cfstring_descriptor key;
+ void **loc;
+
+ if (!str)
+ return false;
+
+ STRIP_NOPS (str);
+
+ if (TREE_CODE (str) == ADDR_EXPR)
+ str = TREE_OPERAND (str, 0);
+
+ if (TREE_CODE (str) != STRING_CST)
+ return false;
+
+ key.literal = str;
+ loc = htab_find_slot (cfstring_htab, &key, NO_INSERT);
+
+ if (loc)
+ return true;
+
+ return false;
+}
+
+static tree
+darwin_build_constant_cfstring (tree str)
+{
+ struct cfstring_descriptor *desc, key;
+ void **loc;
+ tree addr;
+
+ if (!str)
+ goto invalid_string;
+
+ STRIP_NOPS (str);
+
+ if (TREE_CODE (str) == ADDR_EXPR)
+ str = TREE_OPERAND (str, 0);
+
+ if (TREE_CODE (str) != STRING_CST)
+ {
+ invalid_string:
+ error ("CFString literal expression is not constant");
+ return error_mark_node;
+ }
+
+ /* Perhaps we already constructed a constant CFString just like this one? */
+ key.literal = str;
+ loc = htab_find_slot (cfstring_htab, &key, INSERT);
+ desc = *loc;
+
+ if (!desc)
+ {
+ tree initlist, constructor, field = TYPE_FIELDS (ccfstring_type_node);
+ tree var;
+ int length = TREE_STRING_LENGTH (str) - 1;
+ /* FIXME: The CFString functionality should probably reside
+ in darwin-c.c. */
+ extern tree pushdecl_top_level (tree);
+ /* APPLE LOCAL begin radar 2996215 */
+ extern int isascii (int);
+ bool cvt_utf = false;
+ tree utf16_str = NULL_TREE;
+ const char *s = TREE_STRING_POINTER (str);
+ int l;
+ for (l = 0; l < length; l++)
+ if (!s[l] || !isascii (s[l]))
+ {
+ cvt_utf = true;
+ break;
+ }
+ if (cvt_utf)
+ {
+ size_t numUniChars;
+ const unsigned char *inbuf = (unsigned char *)TREE_STRING_POINTER (str);
+ utf16_str = create_init_utf16_var (inbuf, length, &numUniChars);
+ if (!utf16_str)
+ {
+ warning (0, "input conversion stopped due to an input byte "
+ "that does not belong to the input codeset UTF-8");
+ cvt_utf = false; /* fall thru */
+ }
+ else
+ length = (numUniChars >> 1);
+ }
+ /* APPLE LOCAL end radar 2996215 */
+ *loc = desc = ggc_alloc (sizeof (*desc));
+ desc->literal = str;
+
+ initlist = build_tree_list
+ (field, build1 (ADDR_EXPR, pcint_type_node,
+ cfstring_class_reference));
+ field = TREE_CHAIN (field);
+ /* APPLE LOCAL radar 2996215 */
+ initlist = tree_cons (field, build_int_cst (NULL_TREE, utf16_str ? 0x000007d0 : 0x000007c8),
+ initlist);
+ field = TREE_CHAIN (field);
+ initlist = tree_cons (field,
+ build1 (ADDR_EXPR, pcchar_type_node,
+ /* APPLE LOCAL radar 2996215 */
+ utf16_str ? utf16_str : str), initlist);
+ field = TREE_CHAIN (field);
+ /* APPLE LOCAL radar 4493912 */
+ initlist = tree_cons (field, build_int_cst (TREE_TYPE (field), length),
+ initlist);
+
+ constructor = build_constructor_from_list (ccfstring_type_node,
+ nreverse (initlist));
+ TREE_READONLY (constructor) = 1;
+ TREE_CONSTANT (constructor) = 1;
+ TREE_STATIC (constructor) = 1;
+
+ /* Fromage: The C++ flavor of 'build_unary_op' expects constructor nodes
+ to have the TREE_HAS_CONSTRUCTOR (...) bit set. However, this file is
+ being built without any knowledge of C++ tree accessors; hence, we shall
+ use the generic accessor that TREE_HAS_CONSTRUCTOR actually maps to! */
+ if (darwin_running_cxx)
+ TREE_LANG_FLAG_4 (constructor) = 1; /* TREE_HAS_CONSTRUCTOR */
+
+ /* Create an anonymous global variable for this CFString. */
+ var = build_decl (CONST_DECL, NULL, TREE_TYPE (constructor));
+ DECL_INITIAL (var) = constructor;
+ TREE_STATIC (var) = 1;
+ pushdecl_top_level (var);
+ desc->constructor = var;
+ }
+
+ addr = build1 (ADDR_EXPR, pccfstring_type_node, desc->constructor);
+ TREE_CONSTANT (addr) = 1;
+
+ return addr;
+}
+/* APPLE LOCAL end constant cfstrings */
+
+/* APPLE LOCAL begin CW asm blocks */
+/* Assume labels like L_foo$stub etc in CW-style inline code are
+ intended to be taken as literal labels, and return the identifier,
+ otherwise return NULL signifying that we have no special
+ knowledge. */
+tree
+darwin_iasm_special_label (tree id)
+{
+ const char *name = IDENTIFIER_POINTER (id);
+
+ if (name[0] == 'L')
+ {
+ int len = strlen (name);
+
+ if ((len > 5 && strcmp (name + len - 5, "$stub") == 0)
+ || (len > 9 && strcmp (name + len - 9, "$lazy_ptr") == 0)
+ || (len > 13 && strcmp (name + len - 13, "$non_lazy_ptr") == 0))
+ return id;
+ }
+
+ return NULL_TREE;
+}
+/* APPLE LOCAL end CW asm blocks */
+
+#if 0
+/* See TARGET_ASM_OUTPUT_ANCHOR for why we can't do this yet. */
+/* The Darwin's implementation of TARGET_ASM_OUTPUT_ANCHOR. Define the
+ anchor relative to ".", the current section position. We cannot use
+ the default one because ASM_OUTPUT_DEF is wrong for Darwin. */
+
+void
+darwin_asm_output_anchor (rtx symbol)
+{
+ fprintf (asm_out_file, "\t.set\t");
+ assemble_name (asm_out_file, XSTR (symbol, 0));
+ fprintf (asm_out_file, ", . + " HOST_WIDE_INT_PRINT_DEC "\n",
+ SYMBOL_REF_BLOCK_OFFSET (symbol));
+}
+#endif
+
+/* Set the darwin specific attributes on TYPE. */
+void
+darwin_set_default_type_attributes (tree type)
+{
+ if (darwin_ms_struct
+ && TREE_CODE (type) == RECORD_TYPE)
+ TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("ms_struct"),
+ NULL_TREE,
+ TYPE_ATTRIBUTES (type));
+}
+
+/* True, iff we're generating code for loadable kernel extentions. */
+
+bool
+darwin_kextabi_p (void) {
+ /* APPLE LOCAL kext v2 */
+ return TARGET_KEXTABI == 1;
+}
+
+/* APPLE LOCAL begin kext v2 */
+#ifndef TARGET_SUPPORTS_KEXTABI1
+#define TARGET_SUPPORTS_KEXTABI1 0
+#endif
+/* APPLE LOCAL end kext v2 */
+
+void
+darwin_override_options (void)
+{
+ /* APPLE LOCAL begin for iframework for 4.3 4094959 */
+ /* Remove this: */
+#if 0
+ if (flag_apple_kext && strcmp (lang_hooks.name, "GNU C++") != 0)
+ {
+ warning (0, "command line option %<-fapple-kext%> is only valid for C++");
+ flag_apple_kext = 0;
+ }
+#endif
+ /* APPLE LOCAL end for iframework for 4.3 4094959 */
+ if (flag_mkernel || flag_apple_kext)
+ {
+ /* -mkernel implies -fapple-kext for C++ */
+ if (strcmp (lang_hooks.name, "GNU C++") == 0)
+ flag_apple_kext = 1;
+
+ flag_no_common = 1;
+
+ /* No EH in kexts. */
+ flag_exceptions = 0;
+ /* APPLE LOCAL 5628030 */
+ flag_asynchronous_unwind_tables = 0;
+ /* No -fnon-call-exceptions data in kexts. */
+ flag_non_call_exceptions = 0;
+ /* APPLE LOCAL begin kext v2 */
+ if (flag_apple_kext &&
+ ! TARGET_SUPPORTS_KEXTABI1)
+ flag_apple_kext = 2;
+ /* APPLE LOCAL end kext v2 */
+ }
+ /* APPLE LOCAL begin axe stubs 5571540 */
+ /* APPLE LOCAL begin ARM 5683689 */
+
+ /* Go ahead and generate stubs for old systems, just in case. */
+ if (darwin_macosx_version_min
+ && strverscmp (darwin_macosx_version_min, "10.5") < 0)
+ darwin_stubs = true;
+ /* APPLE LOCAL end ARM 5683689 */
+ /* APPLE LOCAL end axe stubs 5571540 */
+ /* APPLE LOCAL begin stack-protector default 5095227 */
+ /* Default flag_stack_protect to 1 if on 10.5 or later for user code,
+ or 10.6 or later for code identified as part of the kernel. */
+ if (flag_stack_protect == -1
+ && darwin_macosx_version_min
+ && ((! flag_mkernel && ! flag_apple_kext
+ && strverscmp (darwin_macosx_version_min, "10.5") >= 0)
+ || strverscmp (darwin_macosx_version_min, "10.6") >= 0))
+ flag_stack_protect = 1;
+ /* APPLE LOCAL end stack-protector default 5095227 */
+/* APPLE LOCAL diff confuses me */
+}
+/* APPLE LOCAL begin radar 4985544 */
+bool
+darwin_cfstring_type_node (tree type_node)
+{
+ return type_node == ccfstring_type_node;
+}
+/* APPLE LOCAL end radar 4985544 */
+#include "gt-darwin.h"
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin.h b/gcc-4.2.1-5666.3/gcc/config/darwin.h
new file mode 100644
index 000000000..482cd4c8a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin.h
@@ -0,0 +1,1429 @@
+/* Target definitions for Darwin (Mac OS X) systems.
+ Copyright (C) 1989, 1990, 1991, 1992, 1993, 2000, 2001, 2002, 2003, 2004,
+ 2005
+ Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#ifndef CONFIG_DARWIN_H
+#define CONFIG_DARWIN_H
+
+/* The definitions in this file are common to all processor types
+ running Darwin, which is the kernel for Mac OS X. Darwin is
+ basically a BSD user layer laid over a Mach kernel, then evolved
+ for many years (at NeXT) in parallel with other Unix systems. So
+ while the runtime is a somewhat idiosyncratic Mach-based thing,
+ other definitions look like they would for a BSD variant. */
+
+/* Although NeXT ran on many different architectures, as of Jan 2001
+ the only supported Darwin targets are PowerPC and x86. */
+
+/* One of Darwin's NeXT legacies is the Mach-O format, which is partly
+ like a.out and partly like COFF, with additional features like
+ multi-architecture binary support. */
+
+#define OBJECT_FORMAT_MACHO
+
+/* APPLE LOCAL begin dynamic-no-pic */
+extern int machopic_symbol_defined_p (rtx);
+/* APPLE LOCAL end dynamic-no-pic */
+
+/* APPLE LOCAL begin axe stubs 5571540 */
+extern int darwin_stubs;
+/* APPLE LOCAL end axe stubs 5571540 */
+
+/* APPLE LOCAL begin mainline 2006-11-01 3334812 */
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+/* APPLE LOCAL end mainline 2006-11-01 3334812 */
+
+/* Suppress g++ attempt to link in the math library automatically. */
+#define MATH_LIBRARY ""
+
+/* We have atexit. */
+
+#define HAVE_ATEXIT
+
+/* Define an empty body for the function do_global_dtors() in libgcc2.c. */
+
+#define DO_GLOBAL_DTORS_BODY
+
+/* The string value for __SIZE_TYPE__. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+/* Type used for ptrdiff_t, as a string used in a declaration. */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+/* wchar_t is int. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Default to using the NeXT-style runtime, since that's what is
+ pre-installed on Darwin systems. */
+
+#define NEXT_OBJC_RUNTIME
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* APPLE LOCAL begin -Wfour-char-constants */
+/* Don't warn about MacOS-style 'APPL' four-char-constants. */
+#undef WARN_FOUR_CHAR_CONSTANTS
+#define WARN_FOUR_CHAR_CONSTANTS 0
+/* APPLE LOCAL end -Wfour-char-constants */
+
+/* True if pragma ms_struct is in effect. */
+extern GTY(()) int darwin_ms_struct;
+
+/* This table intercepts weirdo options whose names would interfere
+ with normal driver conventions, and either translates them into
+ standardly-named options, or adds a 'Z' so that they can get to
+ specs processing without interference.
+
+ Do not expand a linker option to "-Xlinker -<option>", since that
+ forfeits the ability to control via spec strings later. However,
+ as a special exception, do this translation with -filelist, because
+ otherwise the driver will think there are no input files and quit.
+ (The alternative would be to hack the driver to recognize -filelist
+ specially, but it's simpler to use the translation table.)
+
+ Note that an option name with a prefix that matches another option
+ name, that also takes an argument, needs to be modified so the
+ prefix is different, otherwise a '*' after the shorter option will
+ match with the longer one.
+
+ The SUBTARGET_OPTION_TRANSLATE_TABLE macro, which _must_ be defined
+ in gcc/config/{i386,rs6000}/darwin.h, should contain any additional
+ command-line option translations specific to the particular target
+ architecture. */
+
+#define TARGET_OPTION_TRANSLATE_TABLE \
+ { "-all_load", "-Zall_load" }, \
+ /* APPLE LOCAL 7519550 -force_load */ \
+ { "-force_load", "-Zforce_load" }, \
+ { "-allowable_client", "-Zallowable_client" }, \
+ { "-arch_errors_fatal", "-Zarch_errors_fatal" }, \
+ { "-bind_at_load", "-Zbind_at_load" }, \
+ { "-bundle", "-Zbundle" }, \
+ { "-bundle_loader", "-Zbundle_loader" }, \
+ { "-weak_reference_mismatches", "-Zweak_reference_mismatches" }, \
+ { "-dead_strip", "-Zdead_strip" }, \
+ { "-no_dead_strip_inits_and_terms", "-Zno_dead_strip_inits_and_terms" }, \
+ { "-dependency-file", "-MF" }, \
+ { "-dylib_file", "-Zdylib_file" }, \
+ { "-dynamic", "-Zdynamic" }, \
+ { "-dynamiclib", "-Zdynamiclib" }, \
+ { "-exported_symbols_list", "-Zexported_symbols_list" }, \
+ { "-gfull", "-g -fno-eliminate-unused-debug-symbols" }, \
+ { "-gused", "-g -feliminate-unused-debug-symbols" }, \
+ { "-segaddr", "-Zsegaddr" }, \
+ { "-segs_read_only_addr", "-Zsegs_read_only_addr" }, \
+ { "-segs_read_write_addr", "-Zsegs_read_write_addr" }, \
+ { "-seg_addr_table", "-Zseg_addr_table" }, \
+ { "-seg_addr_table_filename", "-Zfn_seg_addr_table_filename" }, \
+ /* APPLE LOCAL mainline */ \
+ { "-umbrella", "-Zumbrella" }, \
+ /* APPLE LOCAL kext weak_import 5935650 */ \
+ { "-fapple-kext", "-fapple-kext -static" }, \
+ { "-filelist", "-Xlinker -filelist -Xlinker" }, \
+ { "-findirect-virtual-calls", "-fapple-kext" }, \
+ { "-flat_namespace", "-Zflat_namespace" }, \
+ { "-force_cpusubtype_ALL", "-Zforce_cpusubtype_ALL" }, \
+ { "-force_flat_namespace", "-Zforce_flat_namespace" }, \
+ { "-framework", "-Xlinker -framework -Xlinker" }, \
+ { "-fterminated-vtables", "-fapple-kext" }, \
+ { "-image_base", "-Zimage_base" }, \
+ { "-init", "-Zinit" }, \
+ { "-install_name", "-Zinstall_name" }, \
+ /* APPLE LOCAL kext weak_import 5935650 */ \
+ { "-mkernel", "-mkernel -static" }, \
+ { "-multiply_defined_unused", "-Zmultiplydefinedunused" }, \
+ { "-multiply_defined", "-Zmultiply_defined" }, \
+ { "-multi_module", "-Zmulti_module" }, \
+ /* APPLE LOCAL begin kext weak_import 5935650 */ \
+ /* Removed -static */ \
+ /* APPLE LOCAL end kext weak_import 5935650 */ \
+ /* APPLE LOCAL mainline */ \
+ { "-shared", "-Zdynamiclib" }, \
+ { "-single_module", "-Zsingle_module" }, \
+ { "-unexported_symbols_list", "-Zunexported_symbols_list" }, \
+ /* APPLE LOCAL radar 6269491 */ \
+ /* code removed. */ \
+ /* APPLE LOCAL begin constant cfstrings */ \
+ { "-fconstant-cfstrings", "-mconstant-cfstrings" }, \
+ { "-fno-constant-cfstrings", "-mno-constant-cfstrings" }, \
+ { "-Wnonportable-cfstrings", "-mwarn-nonportable-cfstrings" }, \
+ { "-Wno-nonportable-cfstrings", "-mno-warn-nonportable-cfstrings" }, \
+ { "-fpascal-strings", "-mpascal-strings" }, \
+ { "-fno-pascal-strings", "-mno-pascal-strings" }, \
+ /* APPLE LOCAL end constant cfstrings */ \
+ SUBTARGET_OPTION_TRANSLATE_TABLE
+
+/* APPLE LOCAL begin constant cfstrings */
+extern int darwin_running_cxx;
+/* APPLE LOCAL end constant cfstrings */
+
+/* APPLE LOCAL begin pragma reverse_bitfields */
+/* True if pragma reverse_bitfields is in effect. */
+extern GTY(()) int darwin_reverse_bitfields;
+/* APPLE LOCAL end pragma reverse_bitfields */
+
+/* APPLE LOCAL begin ARM 5683689 */
+enum darwin_version_type {
+ DARWIN_VERSION_MACOSX,
+ DARWIN_VERSION_IPHONEOS
+};
+/* APPLE LOCAL end ARM 5683689 */
+
+/* APPLE LOCAL AT&T-style stub 4164563 */
+#define MACHOPIC_ATT_STUB (darwin_macho_att_stub)
+
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+ do { \
+ /* APPLE LOCAL begin constant cfstrings */ \
+ if (darwin_pascal_strings) \
+ { \
+ warn_pointer_sign = 1; \
+ CPP_OPTION (parse_in, pascal_strings) = 1; \
+ } \
+ /* APPLE LOCAL begin ARM 5683689 */ \
+ if (darwin_macosx_version_min && darwin_iphoneos_version_min) \
+ error ("-mmacosx-version-min not allowed with" \
+ " -miphoneos-version-min"); \
+ /* APPLE LOCAL end ARM 5683689 */ \
+ /* The c_dialect...() macros are not available to us here. */ \
+ darwin_running_cxx = (strstr (lang_hooks.name, "C++") != 0); \
+ /* APPLE LOCAL end constant cfstrings */ \
+ darwin_override_options (); \
+ } while (0)
+
+/* APPLE LOCAL begin constant cfstrings */
+#define SUBTARGET_INIT_BUILTINS \
+do { \
+ darwin_init_cfstring_builtins (); \
+} while(0)
+
+#undef TARGET_EXPAND_TREE_BUILTIN
+#define TARGET_EXPAND_TREE_BUILTIN darwin_expand_tree_builtin
+#undef TARGET_CONSTRUCT_OBJC_STRING
+#define TARGET_CONSTRUCT_OBJC_STRING darwin_construct_objc_string
+/* APPLE LOCAL end constant cfstrings */
+
+/* These compiler options take n arguments. */
+
+#undef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) ? 1 : \
+ /* APPLE LOCAL 7519550 -force_load */ \
+ !strcmp (STR, "Zforce_load") ? 1 : \
+ !strcmp (STR, "Zallowable_client") ? 1 : \
+ !strcmp (STR, "arch") ? 1 : \
+ !strcmp (STR, "arch_only") ? 1 : \
+ !strcmp (STR, "Zbundle_loader") ? 1 : \
+ !strcmp (STR, "client_name") ? 1 : \
+ !strcmp (STR, "compatibility_version") ? 1 : \
+ !strcmp (STR, "current_version") ? 1 : \
+ !strcmp (STR, "Zdylib_file") ? 1 : \
+ !strcmp (STR, "Zexported_symbols_list") ? 1 : \
+ !strcmp (STR, "Zimage_base") ? 1 : \
+ !strcmp (STR, "Zinit") ? 1 : \
+ !strcmp (STR, "Zinstall_name") ? 1 : \
+ !strcmp (STR, "Zmultiplydefinedunused") ? 1 : \
+ !strcmp (STR, "Zmultiply_defined") ? 1 : \
+ !strcmp (STR, "precomp-trustfile") ? 1 : \
+ !strcmp (STR, "read_only_relocs") ? 1 : \
+ !strcmp (STR, "sectcreate") ? 3 : \
+ !strcmp (STR, "sectorder") ? 3 : \
+ !strcmp (STR, "Zsegaddr") ? 2 : \
+ !strcmp (STR, "Zsegs_read_only_addr") ? 1 : \
+ !strcmp (STR, "Zsegs_read_write_addr") ? 1 : \
+ !strcmp (STR, "Zseg_addr_table") ? 1 : \
+ !strcmp (STR, "Zfn_seg_addr_table_filename") ? 1 :\
+ !strcmp (STR, "seg1addr") ? 1 : \
+ !strcmp (STR, "segprot") ? 3 : \
+ !strcmp (STR, "sub_library") ? 1 : \
+ !strcmp (STR, "sub_umbrella") ? 1 : \
+ /* APPLE LOCAL mainline */ \
+ !strcmp (STR, "Zumbrella") ? 1 : \
+ !strcmp (STR, "undefined") ? 1 : \
+ !strcmp (STR, "Zunexported_symbols_list") ? 1 : \
+ !strcmp (STR, "Zweak_reference_mismatches") ? 1 : \
+ !strcmp (STR, "pagezero_size") ? 1 : \
+ !strcmp (STR, "segs_read_only_addr") ? 1 : \
+ !strcmp (STR, "segs_read_write_addr") ? 1 : \
+ !strcmp (STR, "sectalign") ? 3 : \
+ !strcmp (STR, "sectobjectsymbols") ? 2 : \
+ !strcmp (STR, "segcreate") ? 3 : \
+ !strcmp (STR, "dylinker_install_name") ? 1 : \
+ 0)
+
+#define SUBTARGET_C_COMMON_OVERRIDE_OPTIONS do { \
+ if (flag_mkernel || flag_apple_kext) \
+ { \
+ if (flag_use_cxa_atexit == 2) \
+ flag_use_cxa_atexit = 0; \
+ /* kexts should always be built without the coalesced sections \
+ because the kernel loader doesn't grok such sections. */ \
+ flag_weak = 0; \
+ /* No RTTI in kexts. */ \
+ flag_rtti = 0; \
+ /* APPLE LOCAL begin 5731065 */ \
+ if (flag_mkernel) \
+ flag_no_builtin = 1; \
+ /* APPLE LOCAL end 5731065 */ \
+ /* APPLE LOCAL xmmintrin.h for kernel 4123064 */ \
+ flag_hosted = 0; \
+ } \
+ } while (0)
+
+/* Machine dependent cpp options. Don't add more options here, add
+ them to darwin_cpp_builtins in darwin-c.c. */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{static:%{!dynamic:-D__STATIC__}}%{!static:-D__DYNAMIC__}" \
+ " %{pthread:-D_REENTRANT}"
+
+/* APPLE LOCAL begin private extern */
+#undef CC1PLUS_SPEC
+#define CC1PLUS_SPEC "-D__private_extern__=extern"
+/* APPLE LOCAL end private extern */
+
+/* This is mostly a clone of the standard LINK_COMMAND_SPEC, plus
+ precomp, libtool, and fat build additions. Also we
+ don't specify a second %G after %L because libSystem is
+ self-contained and doesn't need to link against libgcc.a. */
+/* In general, random Darwin linker flags should go into LINK_SPEC
+ instead of LINK_COMMAND_SPEC. The command spec is better for
+ specifying the handling of options understood by generic Unix
+ linkers, and for positional arguments like libraries. */
+/* APPLE LOCAL begin mainline */
+#define LINK_COMMAND_SPEC "\
+%{!fdump=*:%{!fsyntax-only:%{!precomp:%{!c:%{!M:%{!MM:%{!E:%{!S:\
+ %(linker) %l %X %{d} %{s} %{t} %{Z} %{u*} \
+ %{A} %{e*} %{m} %{r} %{x} \
+ %{o*}%{!o:-o a.out} \
+ %{!A:%{!nostdlib:%{!nostartfiles:%S}}} \
+ %{L*} %{fopenmp:%:include(libgomp.spec)%(link_gomp)} \
+"/* APPLE LOCAL add fcreate-profile */"\
+ %(link_libgcc) %o %{fprofile-arcs|fprofile-generate|fcreate-profile|coverage:-lgcov} \
+"/* APPLE LOCAL nested functions 4357979 */"\
+ %{fnested-functions: -allow_stack_execute} \
+"/* APPLE LOCAL prefer -lSystem 6645902 */"\
+ %{!nostdlib:%{!nodefaultlibs:%(link_ssp) %(link_gcc_c_sequence)}} \
+"/* APPLE LOCAL begin mainline 4.3 2006-10-31 4370146 */"\
+ %{!A:%{!nostdlib:%{!nostartfiles:%E}}} %{T*} %{F*} }}}}}}}}\n\
+%{!fdump=*:%{!fsyntax-only:%{!c:%{!M:%{!MM:%{!E:%{!S:\
+"/* APPLE LOCAL end mainline 4.3 2006-10-31 4370146 */"\
+ %{.c|.cc|.C|.cpp|.cp|.c++|.cxx|.CPP|.m|.mm: \
+ %{g*:%{!gstabs*:%{!g0: dsymutil %{o*:%*}%{!o:a.out}}}}}}}}}}}}"
+/* APPLE LOCAL end mainline */
+
+#ifdef TARGET_SYSTEM_ROOT
+#define LINK_SYSROOT_SPEC \
+ "%{isysroot*:-syslibroot %*;:-syslibroot " TARGET_SYSTEM_ROOT "}"
+#else
+#define LINK_SYSROOT_SPEC "%{isysroot*:-syslibroot %*}"
+#endif
+
+/* Please keep the random linker options in alphabetical order (modulo
+ 'Z' and 'no' prefixes). Options that can only go to one of libtool
+ or ld must be listed twice, under both !Zdynamiclib and
+ Zdynamiclib, with one of the cases reporting an error. */
+/* Note that options taking arguments may appear multiple times on a
+ command line with different arguments each time, so put a * after
+ their names so all of them get passed. */
+/* APPLE LOCAL begin mainline */
+#define LINK_SPEC \
+ "%{static}%{!static:-dynamic} \
+ %{fgnu-runtime:%:replace-outfile(-lobjc -lobjc-gnu)}\
+ %{!Zdynamiclib: \
+ %{Zforce_cpusubtype_ALL:-arch %(darwin_arch) -force_cpusubtype_ALL} \
+ %{!Zforce_cpusubtype_ALL:-arch %(darwin_subarch)} \
+ %{Zbundle:-bundle} \
+ %{Zbundle_loader*:-bundle_loader %*} \
+ %{client_name*} \
+ %{compatibility_version*:%e-compatibility_version only allowed with -dynamiclib\
+} \
+ %{current_version*:%e-current_version only allowed with -dynamiclib} \
+ %{Zforce_flat_namespace:-force_flat_namespace} \
+ %{Zinstall_name*:%e-install_name only allowed with -dynamiclib} \
+ %{keep_private_externs} \
+ %{private_bundle} \
+ } \
+ %{Zdynamiclib: -dylib \
+ %{Zbundle:%e-bundle not allowed with -dynamiclib} \
+ %{Zbundle_loader*:%e-bundle_loader not allowed with -dynamiclib} \
+ %{client_name*:%e-client_name not allowed with -dynamiclib} \
+ %{compatibility_version*:-dylib_compatibility_version %*} \
+ %{current_version*:-dylib_current_version %*} \
+ %{Zforce_cpusubtype_ALL:-arch %(darwin_arch)} \
+ %{!Zforce_cpusubtype_ALL: -arch %(darwin_subarch)} \
+ %{Zforce_flat_namespace:%e-force_flat_namespace not allowed with -dynamiclib} \
+ %{Zinstall_name*:-dylib_install_name %*} \
+ %{keep_private_externs:%e-keep_private_externs not allowed with -dynamiclib} \
+ %{private_bundle:%e-private_bundle not allowed with -dynamiclib} \
+ } \
+ %{Zall_load:-all_load} \
+ "/* APPLE LOCAL 7519550 -force_load */" \
+ %{Zforce_load*:-force_load %*} \
+ %{Zallowable_client*:-allowable_client %*} \
+ %{Zbind_at_load:-bind_at_load} \
+ %{Zarch_errors_fatal:-arch_errors_fatal} \
+ %{Zdead_strip:-dead_strip} \
+ %{Zno_dead_strip_inits_and_terms:-no_dead_strip_inits_and_terms} \
+ %{Zdylib_file*:-dylib_file %*} \
+ %{Zdynamic:-dynamic}\
+ %{Zexported_symbols_list*:-exported_symbols_list %*} \
+ %{Zflat_namespace:-flat_namespace} \
+ %{headerpad_max_install_names*} \
+ %{Zimage_base*:-image_base %*} \
+ %{Zinit*:-init %*} \
+ "/* APPLE LOCAL begin ARM 5683689 */"\
+ %{!mmacosx-version-min=*: %{!miphoneos-version-min=*: %(darwin_ld_minversion)}} \
+ %{mmacosx-version-min=*:-macosx_version_min %*} \
+ %{miphoneos-version-min=*:-iphoneos_version_min %*} \
+ "/* APPLE LOCAL end ARM 5683689 */"\
+ %{nomultidefs} \
+ %{Zmulti_module:-multi_module} %{Zsingle_module:-single_module} \
+ %{Zmultiply_defined*:-multiply_defined %*} \
+ "/* APPLE LOCAL begin deletion 5023884 */" \
+ "/* APPLE LOCAL end deletion 5023884 */" \
+ %{Zmultiplydefinedunused*:-multiply_defined_unused %*} \
+ "/* APPLE LOCAL mainline 2007-06-01 5238485 */" \
+ %{fpie:-pie} \
+ "/* APPLE LOCAL begin <rdar://problem/7651045> */" \
+ %{fPIE:-pie} \
+ %{fno-pie:-no_pie} \
+ %{fno-PIE:-no_pie} \
+ "/* APPLE LOCAL end <rdar://problem/7651045> */" \
+ %{prebind} %{noprebind} %{nofixprebinding} %{prebind_all_twolevel_modules} \
+ %{read_only_relocs} \
+ %{sectcreate*} %{sectorder*} %{seg1addr*} %{segprot*} \
+ %{Zsegaddr*:-segaddr %*} \
+ %{Zsegs_read_only_addr*:-segs_read_only_addr %*} \
+ %{Zsegs_read_write_addr*:-segs_read_write_addr %*} \
+ %{Zseg_addr_table*: -seg_addr_table %*} \
+ %{Zfn_seg_addr_table_filename*:-seg_addr_table_filename %*} \
+ %{sub_library*} %{sub_umbrella*} \
+ " LINK_SYSROOT_SPEC " \
+ %{twolevel_namespace} %{twolevel_namespace_hints} \
+ %{Zumbrella*: -umbrella %*} \
+ %{undefined*} \
+ %{Zunexported_symbols_list*:-unexported_symbols_list %*} \
+ %{Zweak_reference_mismatches*:-weak_reference_mismatches %*} \
+ %{!Zweak_reference_mismatches*:-weak_reference_mismatches non-weak} \
+ %{X} \
+ %{y*} \
+ %{w} \
+ %{pagezero_size*} %{segs_read_*} %{seglinkedit} %{noseglinkedit} \
+ %{sectalign*} %{sectobjectsymbols*} %{segcreate*} %{whyload} \
+ %{whatsloaded} %{dylinker_install_name*} \
+ %{dylinker} %{Mach} "
+/* APPLE LOCAL end mainline */
+
+/* Machine dependent libraries. */
+
+#define LIB_SPEC "%{!static:-lSystem}"
+
+/* Support -mmacosx-version-min by supplying different (stub) libgcc_s.dylib
+ libraries to link against, and by not linking against libgcc_s on
+ earlier-than-10.3.9.
+
+ Note that by default, -lgcc_eh is not linked against! This is
+ because in a future version of Darwin the EH frame information may
+ be in a new format, or the fallback routine might be changed; if
+ you want to explicitly link against the static version of those
+ routines, because you know you don't need to unwind through system
+ libraries, you need to explicitly say -static-libgcc.
+
+ If it is linked against, it has to be before -lgcc, because it may
+ need symbols from -lgcc. */
+#undef REAL_LIBGCC_SPEC
+#define REAL_LIBGCC_SPEC \
+/* APPLE LOCAL libgcc_static.a */ \
+ "%{static:-lgcc_static; static-libgcc: -lgcc_eh -lgcc; \
+ "/* APPLE LOCAL ARM 5683689 5681645 */" \
+ miphoneos-version-min=*: %(darwin_iphoneos_libgcc); \
+ shared-libgcc|fexceptions|fgnu-runtime: \
+ %:version-compare(!> 10.5 mmacosx-version-min= -lgcc_s.10.4) \
+ "/* APPLE LOCAL link optimizations 6499452 */" \
+ %:version-compare(>< 10.5 10.6 mmacosx-version-min= -lgcc_s.10.5) \
+ -lgcc; \
+ :%:version-compare(>< 10.3.9 10.5 mmacosx-version-min= -lgcc_s.10.4) \
+ "/* APPLE LOCAL link optimizations 6499452 */" \
+ %:version-compare(>< 10.5 10.6 mmacosx-version-min= -lgcc_s.10.5) \
+ -lgcc}"
+
+/* We specify crt0.o as -lcrt0.o so that ld will search the library path.
+
+ crt3.o provides __cxa_atexit on systems that don't have it. Since
+ it's only used with C++, which requires passing -shared-libgcc, key
+ off that to avoid unnecessarily adding a destructor to every
+ powerpc program built. */
+
+/* APPLE LOCAL begin mainline */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{Zdynamiclib: %(darwin_dylib1) } \
+ "/* APPLE LOCAL link optimizations 6499452 */" \
+ %{!Zdynamiclib:%{Zbundle:%{!static: %(darwin_bundle1)}} \
+ %{!Zbundle:%{pg:%{static:-lgcrt0.o} \
+ %{!static:%{object:-lgcrt0.o} \
+ %{!object:%{preload:-lgcrt0.o} \
+ %{!preload:-lgcrt1.o %(darwin_crt2)}}}} \
+ %{!pg:%{static:-lcrt0.o} \
+ %{!static:%{object:-lcrt0.o} \
+ %{!object:%{preload:-lcrt0.o} \
+ %{!preload: %(darwin_crt1) \
+ %(darwin_crt2)}}}}}} \
+ %{shared-libgcc: \
+ %{!miphoneos-version-min=*: \
+ %:version-compare(< 10.5 mmacosx-version-min= crt3.o%s)}}"
+/* APPLE LOCAL end mainline */
+
+/* The native Darwin linker doesn't necessarily place files in the order
+ that they're specified on the link line. Thus, it is pointless
+ to put anything in ENDFILE_SPEC. */
+/* #define ENDFILE_SPEC "" */
+
+/* APPLE LOCAL begin mainline */
+#define DARWIN_EXTRA_SPECS \
+ { "darwin_crt1", DARWIN_CRT1_SPEC }, \
+ { "darwin_dylib1", DARWIN_DYLIB1_SPEC }, \
+ /* APPLE LOCAL link optimizations 6499452 */ \
+ { "darwin_bundle1", DARWIN_BUNDLE1_SPEC }, \
+ { "darwin_minversion", DARWIN_MINVERSION_SPEC }, \
+/* APPLE LOCAL end mainline */ \
+/* APPLE LOCAL begin ARM 5683689 */ \
+ { "darwin_cc1_minversion", DARWIN_CC1_MINVERSION_SPEC }, \
+ { "darwin_ld_minversion", DARWIN_LD_MINVERSION_SPEC }, \
+/* APPLE LOCAL end ARM 5683689 */ \
+/* APPLE LOCAL ARM 5681645 */ \
+ { "darwin_iphoneos_libgcc", DARWIN_IPHONEOS_LIBGCC_SPEC },
+
+/* APPLE LOCAL begin ARM 5683689 */
+/* APPLE LOCAL begin link optimizations 6999417 */
+#define DARWIN_DYLIB1_SPEC \
+ "%{miphoneos-version-min=*: \
+ %:version-compare(< 3.1 miphoneos-version-min= -ldylib1.o)} \
+ %{!miphoneos-version-min=*: \
+ %:version-compare(!> 10.5 mmacosx-version-min= -ldylib1.o) \
+ %:version-compare(>= 10.5 mmacosx-version-min= -ldylib1.10.5.o)}"
+
+/* APPLE LOCAL begin link optimizations 6499452 */
+#define DARWIN_BUNDLE1_SPEC \
+ "%{miphoneos-version-min=*: \
+ %:version-compare(< 3.1 miphoneos-version-min= -lbundle1.o)} \
+ %{!miphoneos-version-min=*: -lbundle1.o }"
+/* APPLE LOCAL end link optimizations 6499452 */
+
+#define DARWIN_CRT1_SPEC \
+/* APPLE LOCAL ARM 5823776 iphoneos should use crt1.o */ \
+ "%{miphoneos-version-min=*: \
+ %:version-compare(< 3.1 miphoneos-version-min= -lcrt1.o) \
+ %:version-compare(>= 3.1 miphoneos-version-min= -lcrt1.3.1.o)} \
+ %{!miphoneos-version-min=*: \
+ %:version-compare(!> 10.5 mmacosx-version-min= -lcrt1.o) \
+ %:version-compare(>= 10.5 mmacosx-version-min= -lcrt1.10.5.o)}"
+/* APPLE LOCAL end link optimizations 6999417 */
+/* APPLE LOCAL end ARM 5683689 */
+
+/* APPLE LOCAL begin prefer -lSystem 6645902 */
+#define LINK_GCC_C_SEQUENCE_SPEC "%G %L"
+/* APPLE LOCAL end prefer -lSystem 6645902 */
+
+/* Default Darwin ASM_SPEC, very simple. */
+/* APPLE LOCAL begin kext weak_import 5935650 */
+/* APPLE LOCAL begin radar 4161346 */
+#define ASM_SPEC "-arch %(darwin_arch) \
+ %{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \
+ %{!Zforce_cpusubtype_ALL:%{faltivec:-force_cpusubtype_ALL}} \
+ %{mkernel|static|fapple-kext:%{!Zdynamic:-static}}"
+/* APPLE LOCAL end radar 4161346 */
+/* APPLE LOCAL end kext weak_import 5935650 */
+/* APPLE LOCAL begin mainline 4.3 2006-10-31 4370143 */
+/* We still allow output of STABS. */
+
+#define DBX_DEBUGGING_INFO 1
+
+/* Prefer DWARF2. */
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* APPLE LOCAL end mainline 4.3 2006-10-31 4370143 */
+#define DEBUG_FRAME_SECTION "__DWARF,__debug_frame,regular,debug"
+#define DEBUG_INFO_SECTION "__DWARF,__debug_info,regular,debug"
+#define DEBUG_ABBREV_SECTION "__DWARF,__debug_abbrev,regular,debug"
+#define DEBUG_ARANGES_SECTION "__DWARF,__debug_aranges,regular,debug"
+#define DEBUG_MACINFO_SECTION "__DWARF,__debug_macinfo,regular,debug"
+#define DEBUG_LINE_SECTION "__DWARF,__debug_line,regular,debug"
+#define DEBUG_LOC_SECTION "__DWARF,__debug_loc,regular,debug"
+#define DEBUG_PUBNAMES_SECTION "__DWARF,__debug_pubnames,regular,debug"
+/* APPLE LOCAL begin pubtypes, approved for 4.3 4535968 */
+#define DEBUG_PUBTYPES_SECTION "__DWARF,__debug_pubtypes,regular,debug"
+/* APPLE LOCAL end pubtypes, approved for 4.3 4535968 */
+/* APPLE LOCAL radar 6275985 debug inlined section */
+#define DEBUG_INLINED_SECTION "__DWARF,__debug_inlined,regular,debug"
+#define DEBUG_STR_SECTION "__DWARF,__debug_str,regular,debug"
+#define DEBUG_RANGES_SECTION "__DWARF,__debug_ranges,regular,debug"
+
+/* APPLE LOCAL begin gdb only used symbols */
+/* Support option to generate stabs for only used symbols. */
+
+#define DBX_ONLY_USED_SYMBOLS
+/* APPLE LOCAL end gdb only used symbols */
+
+/* When generating stabs debugging, use N_BINCL entries. */
+
+#define DBX_USE_BINCL
+
+/* There is no limit to the length of stabs strings. */
+
+#define DBX_CONTIN_LENGTH 0
+
+/* gdb needs a null N_SO at the end of each file for scattered loading. */
+
+#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+
+/* GCC's definition of 'one_only' is the same as its definition of 'weak'. */
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+
+/* Mach-O supports 'weak imports', and 'weak definitions' in coalesced
+ sections. machopic_select_section ensures that weak variables go in
+ coalesced sections. Weak aliases (or any other kind of aliases) are
+ not supported. Weak symbols that aren't visible outside the .s file
+ are not supported. */
+#define ASM_WEAKEN_DECL(FILE, DECL, NAME, ALIAS) \
+ do { \
+ if (ALIAS) \
+ { \
+ warning (0, "alias definitions not supported in Mach-O; ignored"); \
+ break; \
+ } \
+ \
+ if (! DECL_EXTERNAL (DECL) && TREE_PUBLIC (DECL)) \
+ targetm.asm_out.globalize_label (FILE, NAME); \
+ if (DECL_EXTERNAL (DECL)) \
+ fputs ("\t.weak_reference ", FILE); \
+ else if (! lookup_attribute ("weak", DECL_ATTRIBUTES (DECL)) \
+ && lookup_attribute ("weak_import", DECL_ATTRIBUTES (DECL))) \
+ break; \
+ else if (TREE_PUBLIC (DECL)) \
+ fputs ("\t.weak_definition ", FILE); \
+ else \
+ break; \
+ assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } while (0)
+
+/* Darwin has the pthread routines in libSystem, which every program
+ links to, so there's no need for weak-ness for that. */
+#define GTHREAD_USE_WEAK 0
+
+/* The Darwin linker imposes two limitations on common symbols: they
+ can't have hidden visibility, and they can't appear in dylibs. As
+ a consequence, we should never use common symbols to represent
+ vague linkage. */
+#undef USE_COMMON_FOR_ONE_ONLY
+#define USE_COMMON_FOR_ONE_ONLY 0
+
+/* The Darwin linker doesn't want coalesced symbols to appear in
+ a static archive's table of contents. */
+#undef TARGET_WEAK_NOT_IN_ARCHIVE_TOC
+#define TARGET_WEAK_NOT_IN_ARCHIVE_TOC 1
+
+/* On Darwin, we don't (at the time of writing) have linkonce sections
+ with names, so it's safe to make the class data not comdat. */
+#define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT hook_bool_void_false
+
+/* APPLE LOCAL begin mainline 4.3 2006-01-10 4871915 */
+/* For efficiency, on Darwin the RTTI information that is always
+ emitted in the standard C++ library should not be COMDAT. */
+#define TARGET_CXX_LIBRARY_RTTI_COMDAT hook_bool_void_false
+
+/* APPLE LOCAL end mainline 4.3 2006-01-10 4871915 */
+/* We make exception information linkonce. */
+#undef TARGET_USES_WEAK_UNWIND_INFO
+#define TARGET_USES_WEAK_UNWIND_INFO 1
+
+/* We need to use a nonlocal label for the start of an EH frame: the
+ Darwin linker requires that a coalesced section start with a label.
+ Unfortunately, it also requires that 'debug' sections don't contain
+ labels. */
+#undef FRAME_BEGIN_LABEL
+#define FRAME_BEGIN_LABEL (for_eh ? "EH_frame" : "Lframe")
+
+/* Emit a label for the FDE corresponding to DECL. EMPTY means
+ emit a label for an empty FDE. */
+#define TARGET_ASM_EMIT_UNWIND_LABEL darwin_emit_unwind_label
+
+/* Emit a label to separate the exception table. */
+#define TARGET_ASM_EMIT_EXCEPT_TABLE_LABEL darwin_emit_except_table_label
+
+/* Our profiling scheme doesn't LP labels and counter words. */
+
+#define NO_PROFILE_COUNTERS 1
+
+#undef INIT_SECTION_ASM_OP
+#define INIT_SECTION_ASM_OP
+
+/* APPLE LOCAL begin static structors in __StaticInit section */
+#define STATIC_INIT_SECTION "__TEXT,__StaticInit,regular,pure_instructions"
+/* APPLE LOCAL end static structors in __StaticInit section */
+
+#undef INVOKE__main
+
+#define TARGET_ASM_CONSTRUCTOR machopic_asm_out_constructor
+#define TARGET_ASM_DESTRUCTOR machopic_asm_out_destructor
+
+/* Always prefix with an underscore. */
+
+#define USER_LABEL_PREFIX "_"
+
+/* Don't output a .file directive. That is only used by the assembler for
+ error reporting. */
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE false
+
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END darwin_file_end
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space "HOST_WIDE_INT_PRINT_UNSIGNED"\n", SIZE)
+
+/* Give ObjC methods pretty symbol names. */
+
+#undef OBJC_GEN_METHOD_LABEL
+#define OBJC_GEN_METHOD_LABEL(BUF,IS_INST,CLASS_NAME,CAT_NAME,SEL_NAME,NUM) \
+ do { if (CAT_NAME) \
+ sprintf (BUF, "%c[%s(%s) %s]", (IS_INST) ? '-' : '+', \
+ (CLASS_NAME), (CAT_NAME), (SEL_NAME)); \
+ else \
+ sprintf (BUF, "%c[%s %s]", (IS_INST) ? '-' : '+', \
+ (CLASS_NAME), (SEL_NAME)); \
+ } while (0)
+
+/* APPLE LOCAL begin radar 5023725 */
+#undef OBJC_FLAG_ZEROCOST_EXCEPTIONS
+#define OBJC_FLAG_ZEROCOST_EXCEPTIONS \
+ do { \
+ /* APPLE LOCAL begin ARM 5683689 */ \
+ if (darwin_macosx_version_min \
+ && strverscmp (darwin_macosx_version_min, "10.5") < 0) \
+ /* APPLE LOCAL end ARM 5683689 */ \
+ error ("Mac OS X version 10.5 or later is needed for zerocost-exceptions"); \
+ } while (0)
+/* APPLE LOCAL end radar 5023725 */
+/* APPLE LOCAL begin radar 4862848 */
+#undef OBJC_FLAG_OBJC_ABI
+#define OBJC_FLAG_OBJC_ABI \
+ do { if (flag_objc_abi > 2) \
+ { \
+ error ("Unknown objective-c abi flag"); \
+ flag_objc_abi = 1; /* recover */ \
+ } \
+ if (flag_objc_abi == -1) \
+ flag_objc_abi = (flag_next_runtime && TARGET_64BIT) ? 2 : 1; \
+ /* APPLE LOCAL begin ARM hybrid objc-2.0 */ \
+ if (flag_objc_legacy_dispatch == -1) \
+ flag_objc_legacy_dispatch = (flag_objc_abi < 2); \
+ /* APPLE LOCAL end ARM hybrid objc-2.0 */ \
+ /* APPLE LOCAL begin radar 2848255 */ \
+ /* APPLE LOCAL begin radar 5023725 */ \
+ if (flag_objc_abi == 2) \
+ flag_objc_zerocost_exceptions = 1; \
+ if (flag_objc_zerocost_exceptions) \
+ { \
+ flag_exceptions = 1; \
+ flag_objc_sjlj_exceptions = 0; \
+ } \
+ /* APPLE LOCAL end radar 5023725 */ \
+ if (flag_objc_zerocost_exceptions && flag_objc_abi != 2) \
+ { \
+ error ("zero-cost exception is available with new abi only");\
+ flag_objc_abi = 2; /* recover */ \
+ } \
+ /* APPLE LOCAL end radar 2848255 */ \
+ /* APPLE LOCAL begin 5660282 */ \
+ if (darwin_iphoneos_version_min && flag_objc_gc) \
+ { \
+ warning (0, "-fobjc-gc not supported for iPhone OS; ignoring.");\
+ flag_objc_gc = 0; \
+ } \
+ if (darwin_iphoneos_version_min && flag_objc_gc_only) \
+ { \
+ warning (0, "-fobjc-gc-only not supported for iPhone OS; ignoring.");\
+ flag_objc_gc_only = 0; \
+ } \
+ /* APPLE LOCAL end 5660282 */ \
+ } while (0)
+/* APPLE LOCAL end radar 4862848 */
+
+/* APPLE LOCAL begin radar 4531086 */
+#undef OBJC_WARN_OBJC2_FEATURES
+#define OBJC_WARN_OBJC2_FEATURES(MESSAGE) \
+ /* APPLE LOCAL begin ARM 5683689 */ \
+ do { \
+ if (darwin_macosx_version_min \
+ && strverscmp (darwin_macosx_version_min, "10.5") < 0) \
+ /* APPLE LOCAL end ARM 5683689 */ \
+ warning (0, "Mac OS X version 10.5 or later is needed for use of %s", \
+ MESSAGE); \
+ } while (0)
+/* APPLE LOCAL end radar 4531086 */
+
+/* APPLE LOCAL begin radar 6307941 */
+#undef OBJC2_ABI_DISPATCH
+#define OBJC2_ABI_DISPATCH \
+(darwin_macosx_version_min \
+ && strverscmp (darwin_macosx_version_min, "10.6") < 0)
+/* APPLE LOCAL end radar 6307941 */
+
+/* The RTTI data (e.g., __ti4name) is common and public (and static),
+ but it does need to be referenced via indirect PIC data pointers.
+ The machopic_define_symbol calls are telling the machopic subsystem
+ that the name *is* defined in this module, so it doesn't need to
+ make them indirect. */
+
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ const char *xname = NAME; \
+ if (GET_CODE (XEXP (DECL_RTL (DECL), 0)) != SYMBOL_REF) \
+ xname = IDENTIFIER_POINTER (DECL_NAME (DECL)); \
+ if (! DECL_WEAK (DECL) \
+ && ((TREE_STATIC (DECL) \
+ && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
+ || DECL_INITIAL (DECL))) \
+ machopic_define_symbol (DECL_RTL (DECL)); \
+ if ((TREE_STATIC (DECL) \
+ && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
+ || DECL_INITIAL (DECL)) \
+ (* targetm.encode_section_info) (DECL, DECL_RTL (DECL), false); \
+ ASM_OUTPUT_LABEL (FILE, xname); \
+ /* Darwin doesn't support zero-size objects, so give them a \
+ byte. */ \
+ if (tree_low_cst (DECL_SIZE_UNIT (DECL), 1) == 0) \
+ assemble_zeros (1); \
+ } while (0)
+
+/* APPLE LOCAL begin ARM darwin target */
+#ifndef SUBTARGET_ASM_DECLARE_FUNCTION_NAME
+#define SUBTARGET_ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL)
+#endif
+/* APPLE LOCAL end ARM darwin target */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ const char *xname = NAME; \
+ /* APPLE LOCAL ARM darwin target */ \
+ SUBTARGET_ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL); \
+ if (GET_CODE (XEXP (DECL_RTL (DECL), 0)) != SYMBOL_REF) \
+ xname = IDENTIFIER_POINTER (DECL_NAME (DECL)); \
+ if (! DECL_WEAK (DECL) \
+ && ((TREE_STATIC (DECL) \
+ && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
+ || DECL_INITIAL (DECL))) \
+ machopic_define_symbol (DECL_RTL (DECL)); \
+ if ((TREE_STATIC (DECL) \
+ && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
+ || DECL_INITIAL (DECL)) \
+ (* targetm.encode_section_info) (DECL, DECL_RTL (DECL), false); \
+ ASM_OUTPUT_LABEL (FILE, xname); \
+ } while (0)
+
+#define ASM_DECLARE_CONSTANT_NAME(FILE, NAME, EXP, SIZE) \
+ do { \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ /* Darwin doesn't support zero-size objects, so give them a \
+ byte. */ \
+ if ((SIZE) == 0) \
+ assemble_zeros (1); \
+ } while (0)
+
+/* Wrap new method names in quotes so the assembler doesn't gag.
+ Make Objective-C internal symbols local. */
+
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ do { \
+ const char *xname = (NAME); \
+ if (! strcmp (xname, "<pic base>")) \
+ machopic_output_function_base_name(FILE); \
+ else if (xname[0] == '&' || xname[0] == '*') \
+ { \
+ int len = strlen (xname); \
+ if (len > 6 && !strcmp ("$stub", xname + len - 5)) \
+ machopic_validate_stub_or_non_lazy_ptr (xname); \
+ else if (len > 7 && !strcmp ("$stub\"", xname + len - 6)) \
+ machopic_validate_stub_or_non_lazy_ptr (xname); \
+ else if (len > 14 && !strcmp ("$non_lazy_ptr", xname + len - 13)) \
+ machopic_validate_stub_or_non_lazy_ptr (xname); \
+ else if (len > 15 && !strcmp ("$non_lazy_ptr\"", xname + len - 14)) \
+ machopic_validate_stub_or_non_lazy_ptr (xname); \
+ if (xname[1] != '"' && name_needs_quotes (&xname[1])) \
+ fprintf (FILE, "\"%s\"", &xname[1]); \
+ else \
+ fputs (&xname[1], FILE); \
+ } \
+ else if (xname[0] == '+' || xname[0] == '-') \
+ fprintf (FILE, "\"%s\"", xname); \
+ /* APPLE LOCAL radar 5202926 */ \
+ else if (objc_anonymous_local_objc_name (xname)) \
+ fprintf (FILE, "L%s", xname); \
+ else if (!strncmp (xname, ".objc_class_name_", 17)) \
+ fprintf (FILE, "%s", xname); \
+ else if (xname[0] != '"' && name_needs_quotes (xname)) \
+ /* APPLE LOCAL 5782111 */ \
+ asm_fprintf (FILE, "\"%U%s\"", xname); \
+ else \
+ asm_fprintf (FILE, "%U%s", xname); \
+ } while (0)
+
+/* Output before executable code. */
+#undef TEXT_SECTION_ASM_OP
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+/* Output before writable data. */
+
+#undef DATA_SECTION_ASM_OP
+#define DATA_SECTION_ASM_OP "\t.data"
+
+#undef ALIGN_ASM_OP
+#define ALIGN_ASM_OP ".align"
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t%s %d\n", ALIGN_ASM_OP, (LOG))
+
+/* Ensure correct alignment of bss data. */
+
+#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+ do { \
+ unsigned HOST_WIDE_INT _new_size = SIZE; \
+ fputs (".lcomm ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ if (_new_size == 0) _new_size = 1; \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", _new_size, \
+ floor_log2 ((ALIGN) / BITS_PER_UNIT)); \
+ if ((DECL) && ((TREE_STATIC (DECL) \
+ && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
+ || DECL_INITIAL (DECL))) \
+ { \
+ (* targetm.encode_section_info) (DECL, DECL_RTL (DECL), false); \
+ machopic_define_symbol (DECL_RTL (DECL)); \
+ } \
+ } while (0)
+
+/* The maximum alignment which the object file format can support in
+ bits. For Mach-O, this is 2^15 bytes. */
+
+#undef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT (0x8000 * 8)
+
+/* APPLE LOCAL begin AT&T-style stub 4164563 */
+#ifndef MACHOPIC_NL_SYMBOL_PTR_SECTION
+#define MACHOPIC_NL_SYMBOL_PTR_SECTION ".non_lazy_symbol_pointer"
+#endif
+/* APPLE LOCAL end AT&T-style stub 4164563 */
+
+/* Declare the section variables. */
+#ifndef USED_FOR_TARGET
+enum darwin_section_enum {
+#define DEF_SECTION(NAME, FLAGS, DIRECTIVE, OBJC) NAME,
+#include "darwin-sections.def"
+#undef DEF_SECTION
+ NUM_DARWIN_SECTIONS
+};
+extern GTY(()) section * darwin_sections[NUM_DARWIN_SECTIONS];
+#endif
+
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION machopic_select_section
+#define USE_SELECT_SECTION_FOR_FUNCTIONS
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION machopic_select_rtx_section
+#undef TARGET_ASM_UNIQUE_SECTION
+#define TARGET_ASM_UNIQUE_SECTION darwin_unique_section
+#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
+#undef TARGET_ASM_RELOC_RW_MASK
+#define TARGET_ASM_RELOC_RW_MASK machopic_reloc_rw_mask
+
+
+#define ASM_DECLARE_UNRESOLVED_REFERENCE(FILE,NAME) \
+ do { \
+ if (FILE) { \
+ if (MACHOPIC_INDIRECT) \
+ fprintf (FILE, "\t.lazy_reference "); \
+ else \
+ fprintf (FILE, "\t.reference "); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } \
+ } while (0)
+
+#define ASM_DECLARE_CLASS_REFERENCE(FILE,NAME) \
+ do { \
+ if (FILE) { \
+ fprintf (FILE, "\t"); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "=0\n"); \
+ (*targetm.asm_out.globalize_label) (FILE, NAME); \
+ } \
+ } while (0)
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP ".globl "
+#define TARGET_ASM_GLOBALIZE_LABEL darwin_globalize_label
+
+/* APPLE LOCAL begin weak definition */
+#define ASM_WEAK_DEFINITIONIZE_LABEL(FILE, NAME) \
+ do { const char* _x = (NAME); if (!!strncmp (_x, "_OBJC_", 6)) { \
+ fputs (".weak_definition ", FILE); assemble_name (FILE, _x); \
+ fputs ("\n", FILE); }} while (0)
+/* APPLE LOCAL end weak definition */
+
+/* Emit an assembler directive to set visibility for a symbol. Used
+ to support visibility attribute and Darwin's private extern
+ feature. */
+#undef TARGET_ASM_ASSEMBLE_VISIBILITY
+#define TARGET_ASM_ASSEMBLE_VISIBILITY darwin_assemble_visibility
+
+/* Extra attributes for Darwin. */
+#define SUBTARGET_ATTRIBUTE_TABLE \
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ \
+ { "apple_kext_compatibility", 0, 0, false, true, false, \
+ darwin_handle_kext_attribute }, \
+ /* APPLE LOCAL ObjC GC */ \
+ { "objc_gc", 1, 1, false, true, false, darwin_handle_objc_gc_attribute }, \
+ /* APPLE LOCAL radar 5595352 */ \
+ { "NSObject", 0, 0, false, true, false, darwin_handle_nsobject_attribute },\
+ { "weak_import", 0, 0, true, false, false, \
+ darwin_handle_weak_import_attribute }
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%ld", PREFIX, (long)(NUM))
+
+#undef TARGET_ASM_MARK_DECL_PRESERVED
+#define TARGET_ASM_MARK_DECL_PRESERVED darwin_mark_decl_preserved
+
+/* Set on a symbol with SYMBOL_FLAG_FUNCTION or
+ MACHO_SYMBOL_FLAG_VARIABLE to indicate that the function or
+ variable has been defined in this translation unit.
+ When porting Mach-O to new architectures you need to make
+ sure these aren't clobbered by the backend. */
+
+#define MACHO_SYMBOL_FLAG_VARIABLE (SYMBOL_FLAG_MACH_DEP)
+#define MACHO_SYMBOL_FLAG_DEFINED ((SYMBOL_FLAG_MACH_DEP) << 1)
+
+/* Set on a symbol to indicate when fix-and-continue style code
+ generation is being used and the symbol refers to a static symbol
+ that should be rebound from new instances of a translation unit to
+ the original instance of the data. */
+
+#define MACHO_SYMBOL_STATIC ((SYMBOL_FLAG_MACH_DEP) << 2)
+
+/* Symbolic names for various things we might know about a symbol. */
+
+enum machopic_addr_class {
+ MACHOPIC_UNDEFINED,
+ MACHOPIC_DEFINED_DATA,
+ MACHOPIC_UNDEFINED_DATA,
+ MACHOPIC_DEFINED_FUNCTION,
+ MACHOPIC_UNDEFINED_FUNCTION
+};
+
+/* Macros defining the various PIC cases. */
+
+/* APPLE LOCAL mdynamic-no-pic */
+#define MACHO_DYNAMIC_NO_PIC_P (TARGET_MACHO_DYNAMIC_NO_PIC)
+/* APPLE LOCAL begin mach-o cleanup */
+#undef MACHOPIC_INDIRECT
+#define MACHOPIC_INDIRECT (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+#define MACHOPIC_JUST_INDIRECT (MACHO_DYNAMIC_NO_PIC_P)
+#undef MACHOPIC_PURE
+#define MACHOPIC_PURE (flag_pic && ! MACHO_DYNAMIC_NO_PIC_P)
+/* APPLE LOCAL end mach-o cleanup */
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO darwin_encode_section_info
+#undef TARGET_STRIP_NAME_ENCODING
+#define TARGET_STRIP_NAME_ENCODING default_strip_name_encoding
+
+#define GEN_BINDER_NAME_FOR_STUB(BUF,STUB,STUB_LENGTH) \
+ do { \
+ const char *const stub_ = (STUB); \
+ char *buffer_ = (BUF); \
+ strcpy (buffer_, stub_); \
+ if (stub_[0] == '"') \
+ { \
+ strcpy (buffer_ + (STUB_LENGTH) - 1, "_binder\""); \
+ } \
+ else \
+ { \
+ strcpy (buffer_ + (STUB_LENGTH), "_binder"); \
+ } \
+ } while (0)
+
+#define GEN_SYMBOL_NAME_FOR_SYMBOL(BUF,SYMBOL,SYMBOL_LENGTH) \
+ do { \
+ const char *const symbol_ = (SYMBOL); \
+ char *buffer_ = (BUF); \
+ if (name_needs_quotes (symbol_) && symbol_[0] != '"') \
+ { \
+ sprintf (buffer_, "\"%s\"", symbol_); \
+ } \
+ else \
+ { \
+ strcpy (buffer_, symbol_); \
+ } \
+ } while (0)
+
+/* APPLE LOCAL begin ARM 5603763 */
+/* Given a symbol name, remove quotes, prefix it with "L", suffix it
+ with SUFFIX, and re-apply quotes if needed. */
+
+#define GEN_SUFFIXED_NAME_FOR_SYMBOL(BUF,SYMBOL,SYMBOL_LENGTH,SUFFIX) \
+ do { \
+ const char *symbol_ = (SYMBOL); \
+ char *buffer_ = (BUF); \
+ if (symbol_[0] == '"') \
+ { \
+ strcpy (buffer_, "\"L"); \
+ strcpy (buffer_ + 2, symbol_ + 1); \
+ strcpy (buffer_ + (SYMBOL_LENGTH), SUFFIX "\""); \
+ } \
+ else if (name_needs_quotes (symbol_)) \
+ { \
+ strcpy (buffer_, "\"L"); \
+ strcpy (buffer_ + 2, symbol_); \
+ strcpy (buffer_ + (SYMBOL_LENGTH) + 2, SUFFIX "\""); \
+ } \
+ else \
+ { \
+ strcpy (buffer_, "L"); \
+ strcpy (buffer_ + 1, symbol_); \
+ strcpy (buffer_ + (SYMBOL_LENGTH) + 1, SUFFIX); \
+ } \
+ } while (0)
+
+/* Given a symbol name string, create the lazy pointer version
+ of the symbol name. */
+
+#define GEN_LAZY_PTR_NAME_FOR_SYMBOL(BUF,SYMBOL,SYMBOL_LENGTH) \
+ GEN_SUFFIXED_NAME_FOR_SYMBOL (BUF, SYMBOL, SYMBOL_LENGTH, "$lazy_ptr")
+/* APPLE LOCAL end ARM 5603763 */
+
+#define EH_FRAME_SECTION_NAME "__TEXT"
+#define EH_FRAME_SECTION_ATTR ",coalesced,no_toc+strip_static_syms+live_support"
+
+/* Java runtime class list. */
+#define JCR_SECTION_NAME "__DATA,jcr,regular,no_dead_strip"
+
+#undef ASM_PREFERRED_EH_DATA_FORMAT
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ /* APPLE LOCAL EH __TEXT __gcc_except_tab 5819051 */ \
+ ((((CODE) == 2 || (CODE) == 0) && (GLOBAL) == 1) \
+ ? (DW_EH_PE_pcrel | DW_EH_PE_indirect | DW_EH_PE_sdata4) : \
+ ((CODE) == 1 || (GLOBAL) == 0) ? DW_EH_PE_pcrel : DW_EH_PE_absptr)
+
+#define ASM_OUTPUT_DWARF_DELTA(FILE,SIZE,LABEL1,LABEL2) \
+ darwin_asm_output_dwarf_delta (FILE, SIZE, LABEL1, LABEL2)
+
+#define ASM_OUTPUT_DWARF_OFFSET(FILE,SIZE,LABEL,BASE) \
+ darwin_asm_output_dwarf_offset (FILE, SIZE, LABEL, BASE)
+
+/* APPLE LOCAL 64-bit eric */
+/* remove ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX */
+
+/* Experimentally, putting jump tables in text is faster on SPEC.
+ Also this is needed for correctness for coalesced functions. */
+
+#ifndef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+#endif
+
+/* APPLE LOCAL begin OS pragma hook */
+#define REGISTER_OS_PRAGMAS() \
+ do { \
+ /* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */ \
+ c_register_pragma (0, "pack", darwin_pragma_pack); \
+ /* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */ \
+ } while (0)
+/* APPLE LOCAL end OS pragma hook */
+
+#define TARGET_TERMINATE_DW2_EH_FRAME_INFO false
+
+#define TARGET_ASM_INIT_SECTIONS darwin_init_sections
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION darwin_asm_named_section
+
+/* Handle pragma weak and pragma pack. */
+#define HANDLE_SYSV_PRAGMA 1
+
+#define HANDLE_PRAGMA_PACK_PUSH_POP 1
+
+#define DARWIN_REGISTER_TARGET_PRAGMAS() \
+ do { \
+ /* APPLE LOCAL begin pragma mark 5614511 */ \
+ /* Removed mark. */ \
+ /* APPLE LOCAL end pragma mark 5614511 */ \
+ c_register_pragma (0, "options", darwin_pragma_options); \
+ c_register_pragma (0, "segment", darwin_pragma_ignore); \
+ /* APPLE LOCAL pragma fenv */ \
+ c_register_pragma ("GCC", "fenv", darwin_pragma_fenv); \
+ c_register_pragma (0, "unused", darwin_pragma_unused); \
+ c_register_pragma (0, "ms_struct", darwin_pragma_ms_struct); \
+ /* APPLE LOCAL begin pragma reverse_bitfields */ \
+ c_register_pragma (0, "reverse_bitfields", \
+ darwin_pragma_reverse_bitfields); \
+ /* APPLE LOCAL end pragma reverse_bitfields */ \
+ /* APPLE LOCAL begin optimization pragmas 3124235/3420242 */\
+ c_register_pragma (0, "optimization_level", \
+ darwin_pragma_opt_level); \
+ c_register_pragma (0, "optimize_for_size", \
+ darwin_pragma_opt_size); \
+ c_register_pragma ("GCC", "optimization_level", \
+ darwin_pragma_opt_level); \
+ c_register_pragma ("GCC", "optimize_for_size", \
+ darwin_pragma_opt_size); \
+ /* APPLE LOCAL end optimization pragmas 3124235/3420242 */ \
+ /* APPLE LOCAL begin too many changes confuse diff */ \
+ } while (0)
+/* APPLE LOCAL end too many changes confuse diff */
+
+/* APPLE LOCAL begin insert assembly ".abort" directive on fatal error */
+#define EXIT_FROM_FATAL_DIAGNOSTIC(status) abort_assembly_and_exit (status)
+extern void abort_assembly_and_exit (int status) ATTRIBUTE_NORETURN;
+/* APPLE LOCAL end insert assembly ".abort" directive on fatal error */
+
+/* APPLE LOCAL Macintosh alignment 2002-2-13 --ff */
+#define PEG_ALIGN_FOR_MAC68K(DESIRED) MIN ((DESIRED), 16)
+
+/* APPLE LOCAL begin KEXT double destructor */
+/* Need a mechanism to tell whether a C++ operator delete is empty so
+ we overload TREE_SIDE_EFFECTS here (it is unused for FUNCTION_DECLS.)
+ Fromage, c'est moi! */
+#define CHECK_TRIVIAL_FUNCTION(DECL) \
+ do { \
+ const char *_name = IDENTIFIER_POINTER (DECL_NAME (DECL)); \
+ if (TARGET_KEXTABI && DECL_SAVED_TREE (DECL) \
+ && strstr (_name, "operator delete") \
+ && TREE_CODE (DECL_SAVED_TREE (DECL)) == COMPOUND_STMT \
+ && compound_body_is_empty_p ( \
+ COMPOUND_BODY (DECL_SAVED_TREE (DECL))))\
+ TREE_SIDE_EFFECTS (DECL) = 1; \
+ } while (0)
+
+/* gcc3 initialises the vptr field of all objects so that it points at the
+ first virtual function slot, NOT the base address of the vtable proper.
+ This is different from gcc2.95 which always initialised the vptr to
+ point at the base address of the vtable. The difference here is 8 bytes.
+ So, for 2.95 compatibility, we need to:
+
+ (1) subtract 8 from the vptr initialiser, and
+ (2) add 2 to every vfunc index. (2 * 4 == 8.)
+
+ This is getting ever cheesier. */
+
+#define VPTR_INITIALIZER_ADJUSTMENT 8
+#define ADJUST_VTABLE_INDEX(IDX, VTBL) \
+ do { \
+ if (TARGET_KEXTABI == 1) \
+ (IDX) = fold (build2 (PLUS_EXPR, TREE_TYPE (IDX), IDX, size_int (2))); \
+ } while (0)
+/* APPLE LOCAL end KEXT double destructor */
+
+/* APPLE LOCAL begin zerofill 20020218 --turly */
+/* This keeps uninitialized data from bloating the data when -fno-common.
+ Radar 2863107. */
+#define ASM_OUTPUT_ZEROFILL(FILE, NAME, SECT, SIZE, ALIGNMENT) \
+ do { \
+ section *darwin_sect = SECT; \
+ \
+ unsigned HOST_WIDE_INT _new_size = SIZE; \
+ if (_new_size == 0) _new_size = 1; \
+ fputs (".zerofill ", (FILE)); \
+ if (darwin_sect->common.flags & SECTION_NAMED) \
+ { \
+ fputs (darwin_sect->named.name, (FILE)); \
+ fputs (", ", (FILE)); \
+ } \
+ else \
+ fputs ("__DATA, __common, ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ", " HOST_WIDE_INT_PRINT_DEC, _new_size); \
+ fprintf ((FILE), ", " HOST_WIDE_INT_PRINT_DEC "\n", \
+ (HOST_WIDE_INT) (ALIGNMENT)); \
+ } while (0)
+/* APPLE LOCAL end zerofill 20020218 --turly */
+
+/* APPLE LOCAL begin CW asm blocks */
+#define IASM_SPECIAL_LABEL(ID) darwin_iasm_special_label (ID)
+/* APPLE LOCAL end CW asm blocks */
+
+#undef ASM_APP_ON
+#define ASM_APP_ON ""
+#undef ASM_APP_OFF
+#define ASM_APP_OFF ""
+
+void darwin_register_frameworks (const char *, const char *, int);
+void darwin_register_objc_includes (const char *, const char *, int);
+#define TARGET_EXTRA_PRE_INCLUDES darwin_register_objc_includes
+#define TARGET_EXTRA_INCLUDES darwin_register_frameworks
+
+void add_framework_path (char *);
+#define TARGET_OPTF add_framework_path
+
+#define TARGET_POSIX_IO
+
+/* All new versions of Darwin have C99 functions. */
+
+#define TARGET_C99_FUNCTIONS 1
+
+#define WINT_TYPE "int"
+
+/* APPLE LOCAL begin iframework for 4.3 4094959 */
+#define TARGET_HANDLE_C_OPTION(CODE, ARG, VALUE) \
+ darwin_handle_c_option (CODE, ARG, VALUE)
+/* APPLE LOCAL end iframework for 4.3 4094959 */
+
+/* APPLE LOCAL begin isysroot 5083137 */
+/* Allow -sysroot to select a target system SDK. */
+#define GCC_DRIVER_HOST_INITIALIZATION1 \
+ do { \
+ int i; \
+ for (i = 0; i < argc; ++i) \
+ { \
+ if (strcmp (argv[i], "-isysroot") == 0) \
+ { \
+ if (argv[i][9]) \
+ target_system_root = &argv[i][9]; \
+ else if (i + 1 < argc) \
+ { \
+ target_system_root = argv[i+1]; \
+ ++i; \
+ } \
+ } \
+ } \
+ } while (0)
+
+#define SYSROOT_PRIORITY PREFIX_PRIORITY_FIRST
+/* APPLE LOCAL end isysroot 5083137 */
+
+/* Every program on darwin links against libSystem which contains the pthread
+ routines, so there's no need to explicitly call out when doing threaded
+ work. */
+
+#undef GOMP_SELF_SPECS
+#define GOMP_SELF_SPECS ""
+
+/* Darwin can't support anchors until we can cope with the adjustments
+ to size that ASM_DECLARE_OBJECT_NAME and ASM_DECLARE_CONSTANT_NAME
+ when outputting members of an anchor block and the linker can be
+ taught to keep them together or we find some other suitable
+ code-gen technique. */
+
+#if 0
+#define TARGET_ASM_OUTPUT_ANCHOR darwin_asm_output_anchor
+#else
+#define TARGET_ASM_OUTPUT_ANCHOR NULL
+#endif
+
+/* Attempt to turn on execute permission for the stack. This may be
+ used by INITIALIZE_TRAMPOLINE of the target needs it (that is,
+ if the target machine can change execute permissions on a page).
+
+ There is no way to query the execute permission of the stack, so
+ we always issue the mprotect() call.
+
+ Unfortunately it is not possible to make this namespace-clean.
+
+ Also note that no errors should be emitted by this code; it is
+ considered dangerous for library calls to send messages to
+ stdout/stderr. */
+
+#define ENABLE_EXECUTE_STACK \
+extern void __enable_execute_stack (void *); \
+void \
+__enable_execute_stack (void *addr) \
+{ \
+ extern int mprotect (void *, size_t, int); \
+ extern int getpagesize (void); \
+ static int size; \
+ static long mask; \
+ \
+ char *page, *end; \
+ \
+ if (size == 0) \
+ { \
+ size = getpagesize(); \
+ mask = ~((long) size - 1); \
+ } \
+ \
+ page = (char *) (((long) addr) & mask); \
+ end = (char *) ((((long) (addr + (TARGET_64BIT ? 48 : 40))) & mask) + size); \
+ \
+ /* 7 == PROT_READ | PROT_WRITE | PROT_EXEC */ \
+ (void) mprotect (page, end - page, 7); \
+}
+
+/* For Apple KEXTs, we make the constructors return this to match gcc
+ 2.95. */
+#define TARGET_CXX_CDTOR_RETURNS_THIS (darwin_kextabi_p)
+extern int flag_mkernel;
+extern int flag_apple_kext;
+#define TARGET_KEXTABI flag_apple_kext
+
+/* APPLE LOCAL begin radar 4985544 - radar 5096648 */
+#define CHECK_FORMAT_CFSTRING(ARG,NUM,ATTR) objc_check_format_cfstring (ARG,NUM,ATTR)
+#define CFSTRING_TYPE_NODE(T) darwin_cfstring_type_node (T)
+/* APPLE LOCAL end radar 4985544 - radar 5096648 */
+/* APPLE LOCAL radar 5195402 */
+#define CFSTRING_TYPE_CHECK(T) objc_check_cfstringref_type (T)
+
+/* APPLE LOCAL begin mainline 2007-06-14 5235474 */
+#ifndef CROSS_DIRECTORY_STRUCTURE
+/* APPLE LOCAL begin ARM 5683689 */
+extern void darwin_default_min_version (int * argc, char *** argv,
+ enum darwin_version_type vers_type);
+#define GCC_DRIVER_HOST_INITIALIZATION \
+ /* APPLE LOCAL isysroot 5083137 */ \
+ GCC_DRIVER_HOST_INITIALIZATION1; \
+ darwin_default_min_version (&argc, &argv, DARWIN_DEFAULT_VERSION_TYPE)
+/* APPLE LOCAL end ARM 5683689 */
+#endif /* CROSS_DIRECTORY_STRUCTURE */
+/* APPLE LOCAL end mainline 2007-06-14 5235474 */
+
+#endif /* CONFIG_DARWIN_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin.opt b/gcc-4.2.1-5666.3/gcc/config/darwin.opt
new file mode 100644
index 000000000..f87abb558
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin.opt
@@ -0,0 +1,77 @@
+; Processor-independent options for Darwin.
+
+; Copyright (C) 2005 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mfix-and-continue
+Target Report Var(darwin_fix_and_continue)
+Generate code suitable for fast turn around debugging
+
+; APPLE LOCAL begin ARM 5683689
+mmacosx-version-min=
+Target Joined Report Var(darwin_macosx_version_min) Init(NULL)
+The earliest MacOS X version on which this program will run
+
+miphoneos-version-min=
+Target Joined Report Var(darwin_iphoneos_version_min) Init(NULL)
+The earliest iPhone OS version on which this program will run
+; APPLE LOCAL end ARM 5683689
+
+mone-byte-bool
+Target RejectNegative Report Var(darwin_one_byte_bool)
+Set sizeof(bool) to 1
+
+; APPLE LOCAL begin AT&T-style stub 4164563
+matt-stubs
+Target Report Var(darwin_macho_att_stub) Init(1)
+Generate AT&T-style stubs for Mach-O
+; APPLE LOCAL end AT&T-style stub 4164563
+
+; APPLE LOCAL begin constant cfstrings
+mconstant-cfstrings
+Target Report Var(darwin_constant_cfstrings) Init(-1)
+Generate compile-time CFString objects
+
+mwarn-nonportable-cfstrings
+Target Report Var(darwin_warn_nonportable_cfstrings) Init(1)
+Warn if constant CFString objects contain non-portable characters
+
+mpascal-strings
+Target Report Var(darwin_pascal_strings)
+Allow use of Pascal strings
+; APPLE LOCAL end constant cfstrings
+
+; APPLE LOCAL begin mdynamic-no-pic
+mdynamic-no-pic
+Target Report Mask(MACHO_DYNAMIC_NO_PIC)
+Generate code suitable for executables (NOT shared libs)
+; APPLE LOCAL end mdynamic-no-pic
+
+fapple-kext
+Target Report Var(flag_apple_kext)
+Generate code for darwin loadable kernel extentions
+
+mkernel
+Target Report Var(flag_mkernel)
+Generate code for the kernel or loadable kernel extentions
+
+; APPLE LOCAL begin 5971844
+mdarwin-rtl-pre-ignore-critical-edges
+Target Report Var(flag_darwin_rtl_pre_ignore_critical_edges)
+; APPLE LOCAL end 5971844
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin10.h b/gcc-4.2.1-5666.3/gcc/config/darwin10.h
new file mode 100644
index 000000000..426071eb2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin10.h
@@ -0,0 +1,2 @@
+/* APPLE LOCAL .file/.loc 6349436 */
+#define DWARF2_ASM_LINE_DEBUG_INFO 1
diff --git a/gcc-4.2.1-5666.3/gcc/config/darwin9.h b/gcc-4.2.1-5666.3/gcc/config/darwin9.h
new file mode 100644
index 000000000..2965f1915
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/darwin9.h
@@ -0,0 +1,13 @@
+/* The linker can generate branch islands. */
+#define DARWIN_LINKER_GENERATES_ISLANDS 1
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do { \
+ unsigned HOST_WIDE_INT _new_size = (SIZE); \
+ fprintf ((FILE), ".comm "); \
+ assemble_name ((FILE), (NAME)); \
+ if (_new_size == 0) _new_size = 1; \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ _new_size, floor_log2 ((ALIGN) / BITS_PER_UNIT)); \
+ } while (0)
diff --git a/gcc-4.2.1-5666.3/gcc/config/dbx.h b/gcc-4.2.1-5666.3/gcc/config/dbx.h
new file mode 100644
index 000000000..3c8153428
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/dbx.h
@@ -0,0 +1,28 @@
+/* Prefer DBX (stabs) debugging information.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This file causes gcc to prefer using DBX (stabs) debugging
+ information. The configure script will add a #include of this file
+ to tm.h when --with-stabs is used for certain targets. */
+
+#define DBX_DEBUGGING_INFO 1
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/gcc-4.2.1-5666.3/gcc/config/dbxcoff.h b/gcc-4.2.1-5666.3/gcc/config/dbxcoff.h
new file mode 100644
index 000000000..b6eaec921
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/dbxcoff.h
@@ -0,0 +1,63 @@
+/* Definitions needed when using stabs embedded in COFF sections.
+ Copyright (C) 1996, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This file may be included by any COFF target which wishes to
+ support -gstabs generating stabs in sections, as produced by gas
+ and understood by gdb. */
+
+/* Output DBX (stabs) debugging information if doing -gstabs. */
+
+#define DBX_DEBUGGING_INFO 1
+
+/* Generate SDB debugging information by default. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE SDB_DEBUG
+#endif
+
+/* Be function-relative for block and source line stab directives. */
+
+#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+
+/* but, to make this work, functions must appear prior to line info. */
+
+#define DBX_FUNCTION_FIRST
+
+/* Generate a blank trailing N_SO to mark the end of the .o file, since
+ we can't depend upon the linker to mark .o file boundaries with
+ embedded stabs. */
+
+#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+
+/* Like block addresses, stabs line numbers are relative to the
+ current function. */
+
+#define DBX_LINES_FUNCTION_RELATIVE 1
+
+/* When generating stabs debugging, use N_BINCL entries. */
+
+#undef DBX_USE_BINCL
+#define DBX_USE_BINCL
+
+/* There is no limit to the length of stabs strings. */
+
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 0
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/dbxelf.h b/gcc-4.2.1-5666.3/gcc/config/dbxelf.h
new file mode 100644
index 000000000..a3176868c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/dbxelf.h
@@ -0,0 +1,64 @@
+/* Definitions needed when using stabs embedded in ELF sections.
+ Copyright (C) 1999, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This file may be included by any ELF target which wishes to
+ support -gstabs generating stabs in sections, as produced by gas
+ and understood by gdb. */
+
+#ifndef GCC_DBX_ELF_H
+#define GCC_DBX_ELF_H
+
+/* Output DBX (stabs) debugging information if doing -gstabs. */
+
+#define DBX_DEBUGGING_INFO 1
+
+/* Make LBRAC and RBRAC addresses relative to the start of the
+ function. The native Solaris stabs debugging format works this
+ way, gdb expects it, and it reduces the number of relocation
+ entries... */
+
+#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+
+/* ... but, to make this work, functions must appear prior to line info. */
+
+#define DBX_FUNCTION_FIRST
+
+/* When generating stabs debugging, use N_BINCL entries. */
+
+#define DBX_USE_BINCL
+
+/* There is no limit to the length of stabs strings. */
+
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 0
+#endif
+
+/* Like block addresses, stabs line numbers are relative to the
+ current function. */
+
+#define DBX_LINES_FUNCTION_RELATIVE 1
+
+/* Generate a blank trailing N_SO to mark the end of the .o file, since
+ we can't depend upon the linker to mark .o file boundaries with
+ embedded stabs. */
+
+#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+
+#endif /* ! GCC_DBX_ELF_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/dfp-bit.c b/gcc-4.2.1-5666.3/gcc/config/dfp-bit.c
new file mode 100644
index 000000000..c9374c51f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/dfp-bit.c
@@ -0,0 +1,541 @@
+/* This is a software decimal floating point library.
+ Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* This implements IEEE 754R decimal floating point arithmetic, but
+ does not provide a mechanism for setting the rounding mode, or for
+ generating or handling exceptions. Conversions between decimal
+ floating point types and other types depend on C library functions.
+
+ Contributed by Ben Elliston <bje@au.ibm.com>. */
+
+/* The intended way to use this file is to make two copies, add `#define '
+ to one copy, then compile both copies and add them to libgcc.a. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#include "config/dfp-bit.h"
+
+/* Forward declarations. */
+#if WIDTH == 32 || WIDTH_TO == 32
+void __host_to_ieee_32 (_Decimal32 in, decimal32 *out);
+void __ieee_to_host_32 (decimal32 in, _Decimal32 *out);
+#endif
+#if WIDTH == 64 || WIDTH_TO == 64
+void __host_to_ieee_64 (_Decimal64 in, decimal64 *out);
+void __ieee_to_host_64 (decimal64 in, _Decimal64 *out);
+#endif
+#if WIDTH == 128 || WIDTH_TO == 128
+void __host_to_ieee_128 (_Decimal128 in, decimal128 *out);
+void __ieee_to_host_128 (decimal128 in, _Decimal128 *out);
+#endif
+
+/* A pointer to a unary decNumber operation. */
+typedef decNumber* (*dfp_unary_func)
+ (decNumber *, decNumber *, decContext *);
+
+/* A pointer to a binary decNumber operation. */
+typedef decNumber* (*dfp_binary_func)
+ (decNumber *, decNumber *, decNumber *, decContext *);
+
+extern unsigned long __dec_byte_swap (unsigned long);
+
+/* Unary operations. */
+
+static inline DFP_C_TYPE
+dfp_unary_op (dfp_unary_func op, DFP_C_TYPE arg)
+{
+ DFP_C_TYPE result;
+ decContext context;
+ decNumber arg1, res;
+ IEEE_TYPE a, encoded_result;
+
+ HOST_TO_IEEE (arg, &a);
+
+ decContextDefault (&context, CONTEXT_INIT);
+ context.round = CONTEXT_ROUND;
+
+ TO_INTERNAL (&a, &arg1);
+
+ /* Perform the operation. */
+ op (&res, &arg1, &context);
+
+ if (CONTEXT_TRAPS && CONTEXT_ERRORS (context))
+ DFP_RAISE (0);
+
+ TO_ENCODED (&encoded_result, &res, &context);
+ IEEE_TO_HOST (encoded_result, &result);
+ return result;
+}
+
+/* Binary operations. */
+
+static inline DFP_C_TYPE
+dfp_binary_op (dfp_binary_func op, DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ DFP_C_TYPE result;
+ decContext context;
+ decNumber arg1, arg2, res;
+ IEEE_TYPE a, b, encoded_result;
+
+ HOST_TO_IEEE (arg_a, &a);
+ HOST_TO_IEEE (arg_b, &b);
+
+ decContextDefault (&context, CONTEXT_INIT);
+ context.round = CONTEXT_ROUND;
+
+ TO_INTERNAL (&a, &arg1);
+ TO_INTERNAL (&b, &arg2);
+
+ /* Perform the operation. */
+ op (&res, &arg1, &arg2, &context);
+
+ if (CONTEXT_TRAPS && CONTEXT_ERRORS (context))
+ DFP_RAISE (0);
+
+ TO_ENCODED (&encoded_result, &res, &context);
+ IEEE_TO_HOST (encoded_result, &result);
+ return result;
+}
+
+/* Comparison operations. */
+
+static inline int
+dfp_compare_op (dfp_binary_func op, DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ IEEE_TYPE a, b;
+ decContext context;
+ decNumber arg1, arg2, res;
+ int result;
+
+ HOST_TO_IEEE (arg_a, &a);
+ HOST_TO_IEEE (arg_b, &b);
+
+ decContextDefault (&context, CONTEXT_INIT);
+ context.round = CONTEXT_ROUND;
+
+ TO_INTERNAL (&a, &arg1);
+ TO_INTERNAL (&b, &arg2);
+
+ /* Perform the comparison. */
+ op (&res, &arg1, &arg2, &context);
+
+ if (CONTEXT_TRAPS && CONTEXT_ERRORS (context))
+ DFP_RAISE (0);
+
+ if (decNumberIsNegative (&res))
+ result = -1;
+ else if (decNumberIsZero (&res))
+ result = 0;
+ else
+ result = 1;
+
+ return result;
+}
+
+
+#if defined(L_conv_sd)
+void
+__host_to_ieee_32 (_Decimal32 in, decimal32 *out)
+{
+ uint32_t t;
+
+ if (!LIBGCC2_FLOAT_WORDS_BIG_ENDIAN)
+ {
+ memcpy (&t, &in, 4);
+ t = __dec_byte_swap (t);
+ memcpy (out, &t, 4);
+ }
+ else
+ memcpy (out, &in, 4);
+}
+
+void
+__ieee_to_host_32 (decimal32 in, _Decimal32 *out)
+{
+ uint32_t t;
+
+ if (!LIBGCC2_FLOAT_WORDS_BIG_ENDIAN)
+ {
+ memcpy (&t, &in, 4);
+ t = __dec_byte_swap (t);
+ memcpy (out, &t, 4);
+ }
+ else
+ memcpy (out, &in, 4);
+}
+#endif /* L_conv_sd */
+
+#if defined(L_conv_dd)
+static void
+__swap64 (char *src, char *dst)
+{
+ uint32_t t1, t2;
+
+ if (!LIBGCC2_FLOAT_WORDS_BIG_ENDIAN)
+ {
+ memcpy (&t1, src, 4);
+ memcpy (&t2, src + 4, 4);
+ t1 = __dec_byte_swap (t1);
+ t2 = __dec_byte_swap (t2);
+ memcpy (dst, &t2, 4);
+ memcpy (dst + 4, &t1, 4);
+ }
+ else
+ memcpy (dst, src, 8);
+}
+
+void
+__host_to_ieee_64 (_Decimal64 in, decimal64 *out)
+{
+ __swap64 ((char *) &in, (char *) out);
+}
+
+void
+__ieee_to_host_64 (decimal64 in, _Decimal64 *out)
+{
+ __swap64 ((char *) &in, (char *) out);
+}
+#endif /* L_conv_dd */
+
+#if defined(L_conv_td)
+static void
+__swap128 (char *src, char *dst)
+{
+ uint32_t t1, t2, t3, t4;
+
+ if (!LIBGCC2_FLOAT_WORDS_BIG_ENDIAN)
+ {
+ memcpy (&t1, src, 4);
+ memcpy (&t2, src + 4, 4);
+ memcpy (&t3, src + 8, 4);
+ memcpy (&t4, src + 12, 4);
+ t1 = __dec_byte_swap (t1);
+ t2 = __dec_byte_swap (t2);
+ t3 = __dec_byte_swap (t3);
+ t4 = __dec_byte_swap (t4);
+ memcpy (dst, &t4, 4);
+ memcpy (dst + 4, &t3, 4);
+ memcpy (dst + 8, &t2, 4);
+ memcpy (dst + 12, &t1, 4);
+ }
+ else
+ memcpy (dst, src, 16);
+}
+
+void
+__host_to_ieee_128 (_Decimal128 in, decimal128 *out)
+{
+ __swap128 ((char *) &in, (char *) out);
+}
+
+void
+__ieee_to_host_128 (decimal128 in, _Decimal128 *out)
+{
+ __swap128 ((char *) &in, (char *) out);
+}
+#endif /* L_conv_td */
+
+#if defined(L_addsub_sd) || defined(L_addsub_dd) || defined(L_addsub_td)
+DFP_C_TYPE
+DFP_ADD (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ return dfp_binary_op (decNumberAdd, arg_a, arg_b);
+}
+
+DFP_C_TYPE
+DFP_SUB (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ return dfp_binary_op (decNumberSubtract, arg_a, arg_b);
+}
+#endif /* L_addsub */
+
+#if defined(L_mul_sd) || defined(L_mul_dd) || defined(L_mul_td)
+DFP_C_TYPE
+DFP_MULTIPLY (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ return dfp_binary_op (decNumberMultiply, arg_a, arg_b);
+}
+#endif /* L_mul */
+
+#if defined(L_div_sd) || defined(L_div_dd) || defined(L_div_td)
+DFP_C_TYPE
+DFP_DIVIDE (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ return dfp_binary_op (decNumberDivide, arg_a, arg_b);
+}
+#endif /* L_div */
+
+#if defined (L_eq_sd) || defined (L_eq_dd) || defined (L_eq_td)
+CMPtype
+DFP_EQ (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ int stat;
+ stat = dfp_compare_op (decNumberCompare, arg_a, arg_b);
+ /* For EQ return zero for true, nonzero for false. */
+ return stat != 0;
+}
+#endif /* L_eq */
+
+#if defined (L_ne_sd) || defined (L_ne_dd) || defined (L_ne_td)
+CMPtype
+DFP_NE (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ int stat;
+ stat = dfp_compare_op (decNumberCompare, arg_a, arg_b);
+ /* For NE return nonzero for true, zero for false. */
+ return stat != 0;
+}
+#endif /* L_ne */
+
+#if defined (L_lt_sd) || defined (L_lt_dd) || defined (L_lt_td)
+CMPtype
+DFP_LT (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ int stat;
+ stat = dfp_compare_op (decNumberCompare, arg_a, arg_b);
+ /* For LT return -1 (<0) for true, 1 for false. */
+ return (stat == -1) ? -1 : 1;
+}
+#endif /* L_lt */
+
+#if defined (L_gt_sd) || defined (L_gt_dd) || defined (L_gt_td)
+CMPtype
+DFP_GT (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ int stat;
+ stat = dfp_compare_op (decNumberCompare, arg_a, arg_b);
+ /* For GT return 1 (>0) for true, -1 for false. */
+ return (stat == 1) ? 1 : -1;
+}
+#endif
+
+#if defined (L_le_sd) || defined (L_le_dd) || defined (L_le_td)
+CMPtype
+DFP_LE (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ int stat;
+ stat = dfp_compare_op (decNumberCompare, arg_a, arg_b);
+ /* For LE return 0 (<= 0) for true, 1 for false. */
+ return stat == 1;
+}
+#endif /* L_le */
+
+#if defined (L_ge_sd) || defined (L_ge_dd) || defined (L_ge_td)
+CMPtype
+DFP_GE (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ int stat;
+ stat = dfp_compare_op (decNumberCompare, arg_a, arg_b);
+ /* For GE return 1 (>=0) for true, -1 for false. */
+ return (stat != -1) ? 1 : -1;
+}
+#endif /* L_ge */
+
+#define BUFMAX 128
+
+#if defined (L_sd_to_dd) || defined (L_sd_to_td) || defined (L_dd_to_sd) \
+ || defined (L_dd_to_td) || defined (L_td_to_sd) || defined (L_td_to_dd)
+DFP_C_TYPE_TO
+DFP_TO_DFP (DFP_C_TYPE f_from)
+{
+ DFP_C_TYPE_TO f_to;
+ IEEE_TYPE s_from;
+ IEEE_TYPE_TO s_to;
+ decNumber d;
+ decContext context;
+
+ decContextDefault (&context, CONTEXT_INIT);
+ context.round = CONTEXT_ROUND;
+
+ HOST_TO_IEEE (f_from, &s_from);
+ TO_INTERNAL (&s_from, &d);
+ TO_ENCODED_TO (&s_to, &d, &context);
+ if (CONTEXT_TRAPS && (context.status & DEC_Inexact) != 0)
+ DFP_RAISE (DEC_Inexact);
+
+ IEEE_TO_HOST_TO (s_to, &f_to);
+ return f_to;
+}
+#endif
+
+#if defined (L_sd_to_si) || defined (L_dd_to_si) || defined (L_td_to_si) \
+ || defined (L_sd_to_di) || defined (L_dd_to_di) || defined (L_td_to_di) \
+ || defined (L_sd_to_usi) || defined (L_dd_to_usi) || defined (L_td_to_usi) \
+ || defined (L_sd_to_udi) || defined (L_dd_to_udi) || defined (L_td_to_udi)
+INT_TYPE
+DFP_TO_INT (DFP_C_TYPE x)
+{
+ /* decNumber's decimal* types have the same format as C's _Decimal*
+ types, but they have different calling conventions. */
+
+ IEEE_TYPE s;
+ char buf[BUFMAX];
+ char *pos;
+ decNumber qval, n1, n2;
+ decContext context;
+
+ decContextDefault (&context, CONTEXT_INIT);
+ /* Need non-default rounding mode here. */
+ context.round = DEC_ROUND_DOWN;
+
+ HOST_TO_IEEE (x, &s);
+ TO_INTERNAL (&s, &n1);
+ /* Rescale if the exponent is less than zero. */
+ decNumberToIntegralValue (&n2, &n1, &context);
+ /* Get a value to use for the quantize call. */
+ decNumberFromString (&qval, (char *) "1.0", &context);
+ /* Force the exponent to zero. */
+ decNumberQuantize (&n1, &n2, &qval, &context);
+ /* This is based on text in N1107 section 5.1; it might turn out to be
+ undefined behavior instead. */
+ if (context.status & DEC_Invalid_operation)
+ {
+#if defined (L_sd_to_si) || defined (L_dd_to_si) || defined (L_td_to_si)
+ if (decNumberIsNegative(&n2))
+ return INT_MIN;
+ else
+ return INT_MAX;
+#elif defined (L_sd_to_di) || defined (L_dd_to_di) || defined (L_td_to_di)
+ if (decNumberIsNegative(&n2))
+ /* Find a defined constant that will work here. */
+ return (-9223372036854775807LL - 1LL);
+ else
+ /* Find a defined constant that will work here. */
+ return 9223372036854775807LL;
+#elif defined (L_sd_to_usi) || defined (L_dd_to_usi) || defined (L_td_to_usi)
+ return UINT_MAX;
+#elif defined (L_sd_to_udi) || defined (L_dd_to_udi) || defined (L_td_to_udi)
+ /* Find a defined constant that will work here. */
+ return 18446744073709551615ULL;
+#endif
+ }
+ /* Get a string, which at this point will not include an exponent. */
+ decNumberToString (&n1, buf);
+ /* Ignore the fractional part. */
+ pos = strchr (buf, '.');
+ if (pos)
+ *pos = 0;
+ /* Use a C library function to convert to the integral type. */
+ return STR_TO_INT (buf, NULL, 10);
+}
+#endif
+
+#if defined (L_si_to_sd) || defined (L_si_to_dd) || defined (L_si_to_td) \
+ || defined (L_di_to_sd) || defined (L_di_to_dd) || defined (L_di_to_td) \
+ || defined (L_usi_to_sd) || defined (L_usi_to_dd) || defined (L_usi_to_td) \
+ || defined (L_udi_to_sd) || defined (L_udi_to_dd) || defined (L_udi_to_td)
+DFP_C_TYPE
+INT_TO_DFP (INT_TYPE i)
+{
+ DFP_C_TYPE f;
+ IEEE_TYPE s;
+ char buf[BUFMAX];
+ decContext context;
+
+ decContextDefault (&context, CONTEXT_INIT);
+ context.round = CONTEXT_ROUND;
+
+ /* Use a C library function to get a floating point string. */
+ sprintf (buf, INT_FMT ".0", CAST_FOR_FMT(i));
+ /* Convert from the floating point string to a decimal* type. */
+ FROM_STRING (&s, buf, &context);
+ IEEE_TO_HOST (s, &f);
+ if (CONTEXT_TRAPS && (context.status & DEC_Inexact) != 0)
+ DFP_RAISE (DEC_Inexact);
+ return f;
+}
+#endif
+
+#if defined (L_sd_to_sf) || defined (L_dd_to_sf) || defined (L_td_to_sf) \
+ || defined (L_sd_to_df) || defined (L_dd_to_df) || defined (L_td_to_df) \
+ || ((defined (L_sd_to_xf) || defined (L_dd_to_xf) || defined (L_td_to_xf)) \
+ && LIBGCC2_HAS_XF_MODE)
+BFP_TYPE
+DFP_TO_BFP (DFP_C_TYPE f)
+{
+ IEEE_TYPE s;
+ char buf[BUFMAX];
+
+ HOST_TO_IEEE (f, &s);
+ /* Write the value to a string. */
+ TO_STRING (&s, buf);
+ /* Read it as the binary floating point type and return that. */
+ return STR_TO_BFP (buf, NULL);
+}
+#endif
+
+#if defined (L_sf_to_sd) || defined (L_sf_to_dd) || defined (L_sf_to_td) \
+ || defined (L_df_to_sd) || defined (L_df_to_dd) || defined (L_df_to_td) \
+ || ((defined (L_xf_to_sd) || defined (L_xf_to_dd) || defined (L_xf_to_td)) \
+ && LIBGCC2_HAS_XF_MODE)
+DFP_C_TYPE
+BFP_TO_DFP (BFP_TYPE x)
+{
+ DFP_C_TYPE f;
+ IEEE_TYPE s;
+ char buf[BUFMAX];
+ decContext context;
+
+ decContextDefault (&context, CONTEXT_INIT);
+ context.round = CONTEXT_ROUND;
+
+ /* Use a C library function to write the floating point value to a string. */
+#ifdef BFP_VIA_TYPE
+ /* FIXME: Is there a better way to output an XFmode variable in C? */
+ sprintf (buf, BFP_FMT, (BFP_VIA_TYPE) x);
+#else
+ sprintf (buf, BFP_FMT, x);
+#endif
+
+ /* Convert from the floating point string to a decimal* type. */
+ FROM_STRING (&s, buf, &context);
+ IEEE_TO_HOST (s, &f);
+ if (CONTEXT_TRAPS && (context.status & DEC_Inexact) != 0)
+ DFP_RAISE (DEC_Inexact);
+ return f;
+}
+#endif
+
+#if defined (L_unord_sd) || defined (L_unord_dd) || defined (L_unord_td)
+CMPtype
+DFP_UNORD (DFP_C_TYPE arg_a, DFP_C_TYPE arg_b)
+{
+ decNumber arg1, arg2;
+ IEEE_TYPE a, b;
+
+ HOST_TO_IEEE (arg_a, &a);
+ HOST_TO_IEEE (arg_b, &b);
+ TO_INTERNAL (&a, &arg1);
+ TO_INTERNAL (&b, &arg2);
+ return (decNumberIsNaN (&arg1) || decNumberIsNaN (&arg2));
+}
+#endif /* L_unord_sd || L_unord_dd || L_unord_td */
diff --git a/gcc-4.2.1-5666.3/gcc/config/dfp-bit.h b/gcc-4.2.1-5666.3/gcc/config/dfp-bit.h
new file mode 100644
index 000000000..a6c779898
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/dfp-bit.h
@@ -0,0 +1,513 @@
+/* Header file for dfp-bit.c.
+ Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#ifndef _DFPBIT_H
+#define _DFPBIT_H
+
+#include "tconfig.h"
+#include "coretypes.h"
+#include "tm.h"
+
+#ifndef LIBGCC2_WORDS_BIG_ENDIAN
+#define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+#ifndef LIBGCC2_FLOAT_WORDS_BIG_ENDIAN
+#define LIBGCC2_FLOAT_WORDS_BIG_ENDIAN LIBGCC2_WORDS_BIG_ENDIAN
+#endif
+
+#ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
+#endif
+
+#ifndef LIBGCC2_HAS_XF_MODE
+#define LIBGCC2_HAS_XF_MODE \
+ (BITS_PER_UNIT == 8 && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 80)
+#endif
+
+/* Depending on WIDTH, define a number of macros:
+
+ DFP_C_TYPE: type of the arguments to the libgcc functions;
+ (eg _Decimal32)
+
+ IEEE_TYPE: the corresponding (encoded) IEEE754R type;
+ (eg decimal32)
+
+ TO_INTERNAL: the name of the decNumber function to convert an
+ encoded value into the decNumber internal representation;
+
+ TO_ENCODED: the name of the decNumber function to convert an
+ internally represented decNumber into the encoded
+ representation.
+
+ FROM_STRING: the name of the decNumber function to read an
+ encoded value from a string.
+
+ TO_STRING: the name of the decNumber function to write an
+ encoded value to a string. */
+
+#if WIDTH == 32
+#define DFP_C_TYPE _Decimal32
+#define IEEE_TYPE decimal32
+#define HOST_TO_IEEE __host_to_ieee_32
+#define IEEE_TO_HOST __ieee_to_host_32
+#define TO_INTERNAL __decimal32ToNumber
+#define TO_ENCODED __decimal32FromNumber
+#define FROM_STRING __decimal32FromString
+#define TO_STRING __decimal32ToString
+#elif WIDTH == 64
+#define DFP_C_TYPE _Decimal64
+#define IEEE_TYPE decimal64
+#define HOST_TO_IEEE __host_to_ieee_64
+#define IEEE_TO_HOST __ieee_to_host_64
+#define TO_INTERNAL __decimal64ToNumber
+#define TO_ENCODED __decimal64FromNumber
+#define FROM_STRING __decimal64FromString
+#define TO_STRING __decimal64ToString
+#elif WIDTH == 128
+#define DFP_C_TYPE _Decimal128
+#define IEEE_TYPE decimal128
+#define HOST_TO_IEEE __host_to_ieee_128
+#define IEEE_TO_HOST __ieee_to_host_128
+#define TO_INTERNAL __decimal128ToNumber
+#define TO_ENCODED __decimal128FromNumber
+#define FROM_STRING __decimal128FromString
+#define TO_STRING __decimal128ToString
+#else
+#error invalid decimal float word width
+#endif
+
+/* We define __DEC_EVAL_METHOD__ to 2, saying that we evaluate all
+ operations and constants to the range and precision of the _Decimal128
+ type. Make it so. */
+#if WIDTH == 32
+#define CONTEXT_INIT DEC_INIT_DECIMAL32
+#elif WIDTH == 64
+#define CONTEXT_INIT DEC_INIT_DECIMAL64
+#elif WIDTH == 128
+#define CONTEXT_INIT DEC_INIT_DECIMAL128
+#endif
+
+/* Define CONTEXT_ROUND to obtain the current decNumber rounding mode. */
+extern enum rounding __decGetRound (void);
+#define CONTEXT_ROUND __decGetRound ()
+
+extern int __dfp_traps;
+#define CONTEXT_TRAPS __dfp_traps
+#define CONTEXT_ERRORS(context) context.status & DEC_Errors
+extern void __dfp_raise (int);
+#define DFP_RAISE(A) __dfp_raise(A)
+
+/* Conversions between different decimal float types use WIDTH_TO to
+ determine additional macros to define. */
+
+#if defined (L_dd_to_sd) || defined (L_td_to_sd)
+#define WIDTH_TO 32
+#elif defined (L_sd_to_dd) || defined (L_td_to_dd)
+#define WIDTH_TO 64
+#elif defined (L_sd_to_td) || defined (L_dd_to_td)
+#define WIDTH_TO 128
+#endif
+
+/* If WIDTH_TO is defined, define additional macros:
+
+ DFP_C_TYPE_TO: type of the result of dfp to dfp conversion.
+
+ IEEE_TYPE_TO: the corresponding (encoded) IEEE754R type.
+
+ TO_ENCODED_TO: the name of the decNumber function to convert an
+ internally represented decNumber into the encoded representation
+ for the destination. */
+
+#if WIDTH_TO == 32
+#define DFP_C_TYPE_TO _Decimal32
+#define IEEE_TYPE_TO decimal32
+#define TO_ENCODED_TO __decimal32FromNumber
+#define IEEE_TO_HOST_TO __ieee_to_host_32
+#elif WIDTH_TO == 64
+#define DFP_C_TYPE_TO _Decimal64
+#define IEEE_TYPE_TO decimal64
+#define TO_ENCODED_TO __decimal64FromNumber
+#define IEEE_TO_HOST_TO __ieee_to_host_64
+#elif WIDTH_TO == 128
+#define DFP_C_TYPE_TO _Decimal128
+#define IEEE_TYPE_TO decimal128
+#define TO_ENCODED_TO __decimal128FromNumber
+#define IEEE_TO_HOST_TO __ieee_to_host_128
+#endif
+
+/* Conversions between decimal float types and integral types use INT_KIND
+ to determine the data type and C functions to use. */
+
+#if defined (L_sd_to_si) || defined (L_dd_to_si) || defined (L_td_to_si) \
+ || defined (L_si_to_sd) || defined (L_si_to_dd) || defined (L_si_to_td)
+#define INT_KIND 1
+#elif defined (L_sd_to_di) || defined (L_dd_to_di) || defined (L_td_to_di) \
+ || defined (L_di_to_sd) || defined (L_di_to_dd) || defined (L_di_to_td)
+#define INT_KIND 2
+#elif defined (L_sd_to_usi) || defined (L_dd_to_usi) || defined (L_td_to_usi) \
+ || defined (L_usi_to_sd) || defined (L_usi_to_dd) || defined (L_usi_to_td)
+#define INT_KIND 3
+#elif defined (L_sd_to_udi) || defined (L_dd_to_udi) || defined (L_td_to_udi) \
+ || defined (L_udi_to_sd) || defined (L_udi_to_dd) || defined (L_udi_to_td)
+#define INT_KIND 4
+#endif
+
+/* If INT_KIND is defined, define additional macros:
+
+ INT_TYPE: The integer data type.
+
+ INT_FMT: The format string for writing the integer to a string.
+
+ CAST_FOR_FMT: Cast variable of INT_KIND to C type for sprintf.
+ This works for ILP32 and LP64, won't for other type size systems.
+
+ STR_TO_INT: The function to read the integer from a string. */
+
+#if INT_KIND == 1
+#define INT_TYPE SItype
+#define INT_FMT "%d"
+#define CAST_FOR_FMT(A) (int)A
+#define STR_TO_INT strtol
+#elif INT_KIND == 2
+#define INT_TYPE DItype
+#define INT_FMT "%lld"
+#define CAST_FOR_FMT(A) (long long)A
+#define STR_TO_INT strtoll
+#elif INT_KIND == 3
+#define INT_TYPE USItype
+#define INT_FMT "%u"
+#define CAST_FOR_FMT(A) (unsigned int)A
+#define STR_TO_INT strtoul
+#elif INT_KIND == 4
+#define INT_TYPE UDItype
+#define INT_FMT "%llu"
+#define CAST_FOR_FMT(A) (unsigned long long)A
+#define STR_TO_INT strtoull
+#endif
+
+/* Conversions between decimal float types and binary float types use
+ BFP_KIND to determine the data type and C functions to use. */
+
+#if defined (L_sd_to_sf) || defined (L_dd_to_sf) || defined (L_td_to_sf) \
+ || defined (L_sf_to_sd) || defined (L_sf_to_dd) || defined (L_sf_to_td)
+#define BFP_KIND 1
+#elif defined (L_sd_to_df) || defined (L_dd_to_df ) || defined (L_td_to_df) \
+ || defined (L_df_to_sd) || defined (L_df_to_dd) || defined (L_df_to_td)
+#define BFP_KIND 2
+#elif defined (L_sd_to_xf) || defined (L_dd_to_xf ) || defined (L_td_to_xf) \
+ || defined (L_xf_to_sd) || defined (L_xf_to_dd) || defined (L_xf_to_td)
+#define BFP_KIND 3
+#endif
+
+/* If BFP_KIND is defined, define additional macros:
+
+ BFP_TYPE: The binary floating point data type.
+
+ BFP_FMT: The format string for writing the value to a string.
+
+ STR_TO_BFP: The function to read the value from a string. */
+
+#if BFP_KIND == 1
+/* strtof is declared in <stdlib.h> only for C99. */
+extern float strtof (const char *, char **);
+#define BFP_TYPE SFtype
+#define BFP_FMT "%e"
+#define STR_TO_BFP strtof
+
+#elif BFP_KIND == 2
+#define BFP_TYPE DFtype
+#define BFP_FMT "%e"
+#define STR_TO_BFP strtod
+
+#elif BFP_KIND == 3
+#if LIBGCC2_HAS_XF_MODE
+/* These aren't used if XF mode is not supported. */
+#define BFP_TYPE XFtype
+#define BFP_FMT "%e"
+#define BFP_VIA_TYPE double
+#define STR_TO_BFP strtod
+#endif
+
+#endif /* BFP_KIND */
+
+#if WIDTH == 128 || WIDTH_TO == 128
+#include "decimal128.h"
+#endif
+#if WIDTH == 64 || WIDTH_TO == 64
+#include "decimal64.h"
+#endif
+#if WIDTH == 32 || WIDTH_TO == 32
+#include "decimal32.h"
+#endif
+#include "decNumber.h"
+
+/* Names of arithmetic functions. */
+
+#if WIDTH == 32
+#define DFP_ADD __addsd3
+#define DFP_SUB __subsd3
+#define DFP_MULTIPLY __mulsd3
+#define DFP_DIVIDE __divsd3
+#define DFP_EQ __eqsd2
+#define DFP_NE __nesd2
+#define DFP_LT __ltsd2
+#define DFP_GT __gtsd2
+#define DFP_LE __lesd2
+#define DFP_GE __gesd2
+#define DFP_UNORD __unordsd2
+#elif WIDTH == 64
+#define DFP_ADD __adddd3
+#define DFP_SUB __subdd3
+#define DFP_MULTIPLY __muldd3
+#define DFP_DIVIDE __divdd3
+#define DFP_EQ __eqdd2
+#define DFP_NE __nedd2
+#define DFP_LT __ltdd2
+#define DFP_GT __gtdd2
+#define DFP_LE __ledd2
+#define DFP_GE __gedd2
+#define DFP_UNORD __unorddd2
+#elif WIDTH == 128
+#define DFP_ADD __addtd3
+#define DFP_SUB __subtd3
+#define DFP_MULTIPLY __multd3
+#define DFP_DIVIDE __divtd3
+#define DFP_EQ __eqtd2
+#define DFP_NE __netd2
+#define DFP_LT __lttd2
+#define DFP_GT __gttd2
+#define DFP_LE __letd2
+#define DFP_GE __getd2
+#define DFP_UNORD __unordtd2
+#endif
+
+/* Names of functions to convert between different decimal float types. */
+
+#if WIDTH == 32
+#if WIDTH_TO == 64
+#define DFP_TO_DFP __extendsddd2
+#elif WIDTH_TO == 128
+#define DFP_TO_DFP __extendsdtd2
+#endif
+#elif WIDTH == 64
+#if WIDTH_TO == 32
+#define DFP_TO_DFP __truncddsd2
+#elif WIDTH_TO == 128
+#define DFP_TO_DFP __extendddtd2
+#endif
+#elif WIDTH == 128
+#if WIDTH_TO == 32
+#define DFP_TO_DFP __trunctdsd2
+#elif WIDTH_TO == 64
+#define DFP_TO_DFP __trunctddd2
+#endif
+#endif
+
+/* Names of functions to convert between decimal float and integers. */
+
+#if WIDTH == 32
+#if INT_KIND == 1
+#define INT_TO_DFP __floatsisd
+#define DFP_TO_INT __fixsdsi
+#elif INT_KIND == 2
+#define INT_TO_DFP __floatdisd
+#define DFP_TO_INT __fixsddi
+#elif INT_KIND == 3
+#define INT_TO_DFP __floatunssisd
+#define DFP_TO_INT __fixunssdsi
+#elif INT_KIND == 4
+#define INT_TO_DFP __floatunsdisd
+#define DFP_TO_INT __fixunssddi
+#endif
+#elif WIDTH == 64
+#if INT_KIND == 1
+#define INT_TO_DFP __floatsidd
+#define DFP_TO_INT __fixddsi
+#elif INT_KIND == 2
+#define INT_TO_DFP __floatdidd
+#define DFP_TO_INT __fixdddi
+#elif INT_KIND == 3
+#define INT_TO_DFP __floatunssidd
+#define DFP_TO_INT __fixunsddsi
+#elif INT_KIND == 4
+#define INT_TO_DFP __floatunsdidd
+#define DFP_TO_INT __fixunsdddi
+#endif
+#elif WIDTH == 128
+#if INT_KIND == 1
+#define INT_TO_DFP __floatsitd
+#define DFP_TO_INT __fixtdsi
+#elif INT_KIND == 2
+#define INT_TO_DFP __floatditd
+#define DFP_TO_INT __fixtddi
+#elif INT_KIND == 3
+#define INT_TO_DFP __floatunssitd
+#define DFP_TO_INT __fixunstdsi
+#elif INT_KIND == 4
+#define INT_TO_DFP __floatunsditd
+#define DFP_TO_INT __fixunstddi
+#endif
+#endif
+
+/* Names of functions to convert between decimal float and binary float. */
+
+#if WIDTH == 32
+#if BFP_KIND == 1
+#define BFP_TO_DFP __extendsfsd
+#define DFP_TO_BFP __truncsdsf
+#elif BFP_KIND == 2
+#define BFP_TO_DFP __truncdfsd
+#define DFP_TO_BFP __extendsddf
+#elif BFP_KIND == 3
+#define BFP_TO_DFP __truncxfsd
+#define DFP_TO_BFP __extendsdxf
+#endif /* BFP_KIND */
+
+#elif WIDTH == 64
+#if BFP_KIND == 1
+#define BFP_TO_DFP __extendsfdd
+#define DFP_TO_BFP __truncddsf
+#elif BFP_KIND == 2
+#define BFP_TO_DFP __extenddfdd
+#define DFP_TO_BFP __truncdddf
+#elif BFP_KIND == 3
+#define BFP_TO_DFP __truncxfdd
+#define DFP_TO_BFP __extendddxf
+#endif /* BFP_KIND */
+
+#elif WIDTH == 128
+#if BFP_KIND == 1
+#define BFP_TO_DFP __extendsftd
+#define DFP_TO_BFP __trunctdsf
+#elif BFP_KIND == 2
+#define BFP_TO_DFP __extenddftd
+#define DFP_TO_BFP __trunctddf
+#elif BFP_KIND == 3
+#define BFP_TO_DFP __extendxftd
+#define DFP_TO_BFP __trunctdxf
+#endif /* BFP_KIND */
+
+#endif /* WIDTH */
+
+/* Some handy typedefs. */
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+#if LIBGCC2_HAS_XF_MODE
+typedef float XFtype __attribute__ ((mode (XF)));
+#endif /* LIBGCC2_HAS_XF_MODE */
+
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+
+/* The type of the result of a decimal float comparison. This must
+ match `word_mode' in GCC for the target. */
+
+typedef int CMPtype __attribute__ ((mode (word)));
+
+/* Prototypes. */
+
+#if defined (L_mul_sd) || defined (L_mul_dd) || defined (L_mul_td)
+extern DFP_C_TYPE DFP_MULTIPLY (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_div_sd) || defined (L_div_dd) || defined (L_div_td)
+extern DFP_C_TYPE DFP_DIVIDE (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_addsub_sd) || defined (L_addsub_dd) || defined (L_addsub_td)
+extern DFP_C_TYPE DFP_ADD (DFP_C_TYPE, DFP_C_TYPE);
+extern DFP_C_TYPE DFP_SUB (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_eq_sd) || defined (L_eq_dd) || defined (L_eq_td)
+extern CMPtype DFP_EQ (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_ne_sd) || defined (L_ne_dd) || defined (L_ne_td)
+extern CMPtype DFP_NE (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_lt_sd) || defined (L_lt_dd) || defined (L_lt_td)
+extern CMPtype DFP_LT (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_gt_sd) || defined (L_gt_dd) || defined (L_gt_td)
+extern CMPtype DFP_GT (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_le_sd) || defined (L_le_dd) || defined (L_le_td)
+extern CMPtype DFP_LE (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_ge_sd) || defined (L_ge_dd) || defined (L_ge_td)
+extern CMPtype DFP_GE (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_unord_sd) || defined (L_unord_dd) || defined (L_unord_td)
+extern CMPtype DFP_UNORD (DFP_C_TYPE, DFP_C_TYPE);
+#endif
+
+#if defined (L_sd_to_dd) || defined (L_sd_to_td) || defined (L_dd_to_sd) \
+ || defined (L_dd_to_td) || defined (L_td_to_sd) || defined (L_td_to_dd)
+extern DFP_C_TYPE_TO DFP_TO_DFP (DFP_C_TYPE);
+#endif
+
+#if defined (L_sd_to_si) || defined (L_dd_to_si) || defined (L_td_to_si) \
+ || defined (L_sd_to_di) || defined (L_dd_to_di) || defined (L_td_to_di) \
+ || defined (L_sd_to_usi) || defined (L_dd_to_usi) || defined (L_td_to_usi) \
+ || defined (L_sd_to_udi) || defined (L_dd_to_udi) || defined (L_td_to_udi)
+extern INT_TYPE DFP_TO_INT (DFP_C_TYPE);
+#endif
+
+#if defined (L_si_to_sd) || defined (L_si_to_dd) || defined (L_si_to_td) \
+ || defined (L_di_to_sd) || defined (L_di_to_dd) || defined (L_di_to_td) \
+ || defined (L_usi_to_sd) || defined (L_usi_to_dd) || defined (L_usi_to_td) \
+ || defined (L_udi_to_sd) || defined (L_udi_to_dd) || defined (L_udi_to_td)
+extern DFP_C_TYPE INT_TO_DFP (INT_TYPE);
+#endif
+
+#if defined (L_sd_to_sf) || defined (L_dd_to_sf) || defined (L_td_to_sf) \
+ || defined (L_sd_to_df) || defined (L_dd_to_df) || defined (L_td_to_df) \
+ || ((defined (L_sd_to_xf) || defined (L_dd_to_xf) || defined (L_td_to_xf)) \
+ && LIBGCC2_HAS_XF_MODE)
+extern BFP_TYPE DFP_TO_BFP (DFP_C_TYPE);
+#endif
+
+#if defined (L_sf_to_sd) || defined (L_sf_to_dd) || defined (L_sf_to_td) \
+ || defined (L_df_to_sd) || defined (L_df_to_dd) || defined (L_df_to_td) \
+ || ((defined (L_xf_to_sd) || defined (L_xf_to_dd) || defined (L_xf_to_td)) \
+ && LIBGCC2_HAS_XF_MODE)
+extern DFP_C_TYPE BFP_TO_DFP (BFP_TYPE);
+#endif
+
+#endif /* _DFPBIT_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/divmod.c b/gcc-4.2.1-5666.3/gcc/config/divmod.c
new file mode 100644
index 000000000..6faa09102
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/divmod.c
@@ -0,0 +1,50 @@
+long udivmodsi4 ();
+
+long
+__divsi3 (long a, long b)
+{
+ int neg = 0;
+ long res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = !neg;
+ }
+
+ if (b < 0)
+ {
+ b = -b;
+ neg = !neg;
+ }
+
+ res = udivmodsi4 (a, b, 0);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
+
+long
+__modsi3 (long a, long b)
+{
+ int neg = 0;
+ long res;
+
+ if (a < 0)
+ {
+ a = -a;
+ neg = 1;
+ }
+
+ if (b < 0)
+ b = -b;
+
+ res = udivmodsi4 (a, b, 1);
+
+ if (neg)
+ res = -res;
+
+ return res;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/elfos.h b/gcc-4.2.1-5666.3/gcc/config/elfos.h
new file mode 100644
index 000000000..a2bd49f90
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/elfos.h
@@ -0,0 +1,498 @@
+/* elfos.h -- operating system specific defines to be used when
+ targeting GCC for some generic ELF system
+ Copyright (C) 1991, 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
+ Based on svr4.h contributed by Ron Guilmette (rfg@netcom.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define TARGET_OBJFMT_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__ELF__"); \
+ } \
+ while (0)
+
+/* Define a symbol indicating that we are using elfos.h.
+ Some CPU specific configuration files use this. */
+#define USING_ELFOS_H
+
+/* The prefix to add to user-visible assembler symbols.
+
+ For ELF systems the convention is *not* to prepend a leading
+ underscore onto user-level symbol names. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* Biggest alignment supported by the object file format of this
+ machine. Use this macro to limit the alignment which can be
+ specified using the `__attribute__ ((aligned (N)))' construct. If
+ not defined, the default value is `BIGGEST_ALIGNMENT'. */
+#ifndef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT (32768 * 8)
+#endif
+
+/* Use periods rather than dollar signs in special g++ assembler names. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* Writing `int' for a bit-field forces int alignment for the structure. */
+
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS 1
+#endif
+
+/* Handle #pragma weak and #pragma pack. */
+
+#define HANDLE_SYSV_PRAGMA 1
+
+/* All ELF targets can support DWARF-2. */
+
+#define DWARF2_DEBUGGING_INFO 1
+
+/* The GNU tools operate better with dwarf2, and it is required by some
+ psABI's. Since we don't have any native tools to be compatible with,
+ default to dwarf2. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+#endif
+
+/* All SVR4 targets use the ELF object file format. */
+#define OBJECT_FORMAT_ELF
+
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "%s\"%s\"\n", IDENT_ASM_OP, NAME);
+
+#define IDENT_ASM_OP "\t.ident\t"
+
+#undef SET_ASM_OP
+#define SET_ASM_OP "\t.set\t"
+
+/* Most svr4 assemblers want a .file directive at the beginning of
+ their input file. */
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+/* This is how to allocate empty space in some section. The .zero
+ pseudo-op is used for this on most svr4 assemblers. */
+
+#define SKIP_ASM_OP "\t.zero\t"
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE, SIZE) \
+ fprintf ((FILE), "%s"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ SKIP_ASM_OP, (SIZE))
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'.
+
+ For most svr4 systems, the convention is that any symbol which begins
+ with a period is not put into the linker symbol table by the assembler. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+ do \
+ { \
+ sprintf (LABEL, "*.%s%u", PREFIX, (unsigned) (NUM)); \
+ } \
+ while (0)
+
+/* Output the label which precedes a jumptable. Note that for all svr4
+ systems where we actually generate jumptables (which is to say every
+ svr4 target except i386, where we use casesi instead) we put the jump-
+ tables into the .rodata section and since other stuff could have been
+ put into the .rodata section prior to any given jumptable, we have to
+ make sure that the location counter for the .rodata section gets pro-
+ perly re-aligned prior to the actual beginning of the jump table. */
+
+#undef ALIGN_ASM_OP
+#define ALIGN_ASM_OP "\t.align\t"
+
+#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
+ ASM_OUTPUT_ALIGN ((FILE), 2);
+#endif
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
+ do \
+ { \
+ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
+ } \
+ while (0)
+
+/* The standard SVR4 assembler seems to require that certain builtin
+ library routines (e.g. .udiv) be explicitly declared as .globl
+ in each assembly file where they are referenced. */
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0))
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define COMMON_ASM_OP "\t.comm\t"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define LOCAL_ASM_OP "\t.local\t"
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ fprintf ((FILE), "%s", LOCAL_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+ } \
+ while (0)
+
+/* This is the pseudo-op used to generate a contiguous sequence of byte
+ values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
+ AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
+
+#undef ASCII_DATA_ASM_OP
+#define ASCII_DATA_ASM_OP "\t.ascii\t"
+
+/* Support a read-only data section. */
+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+
+#define INIT_SECTION_ASM_OP "\t.section\t.init"
+#define FINI_SECTION_ASM_OP "\t.section\t.fini"
+
+/* Output assembly directive to move to the beginning of current section. */
+#ifdef HAVE_GAS_SUBSECTION_ORDERING
+# define ASM_SECTION_START_OP "\t.subsection\t-1"
+# define ASM_OUTPUT_SECTION_START(FILE) \
+ fprintf ((FILE), "%s\n", ASM_SECTION_START_OP)
+#endif
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION default_elf_select_rtx_section
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION default_elf_select_section
+#undef TARGET_HAVE_SWITCHABLE_BSS_SECTIONS
+#define TARGET_HAVE_SWITCHABLE_BSS_SECTIONS true
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#define TYPE_ASM_OP "\t.type\t"
+#define SIZE_ASM_OP "\t.size\t"
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+ } \
+ while (0)
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+#ifndef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } \
+ while (0)
+#endif
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ HOST_WIDE_INT size; \
+ \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive \
+ && (DECL) && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \
+ } \
+ \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END)\
+ do \
+ { \
+ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ HOST_WIDE_INT size; \
+ \
+ if (!flag_inhibit_size_directive \
+ && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
+ } \
+ } \
+ while (0)
+
+/* This is how to declare the size of a function. */
+#ifndef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } \
+ while (0)
+#endif
+
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+
+/* Some svr4 assemblers have a limit on the number of characters which
+ can appear in the operand of a .string directive. If your assembler
+ has such a limitation, you should define STRING_LIMIT to reflect that
+ limit. Note that at least some svr4 assemblers have a limit on the
+ actual number of bytes in the double-quoted string, and that they
+ count each character in an escape sequence as one byte. Thus, an
+ escape sequence like \377 would count as four bytes.
+
+ If your target assembler doesn't support the .string directive, you
+ should define this to zero.
+*/
+
+#define STRING_LIMIT ((unsigned) 256)
+
+#define STRING_ASM_OP "\t.string\t"
+
+/* The routine used to output NUL terminated strings. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable, especially for targets like the i386
+ (where the only alternative is to output character sequences as
+ comma separated lists of numbers). */
+
+#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \
+ do \
+ { \
+ register const unsigned char *_limited_str = \
+ (const unsigned char *) (STR); \
+ register unsigned ch; \
+ \
+ fprintf ((FILE), "%s\"", STRING_ASM_OP); \
+ \
+ for (; (ch = *_limited_str); _limited_str++) \
+ { \
+ register int escape; \
+ \
+ switch (escape = ESCAPES[ch]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ break; \
+ } \
+ } \
+ \
+ fprintf ((FILE), "\"\n"); \
+ } \
+ while (0)
+
+/* The routine used to output sequences of byte values. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable. Note that if we find subparts of the
+ character sequence which end with NUL (and which are shorter than
+ STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \
+ do \
+ { \
+ const unsigned char *_ascii_bytes = \
+ (const unsigned char *) (STR); \
+ const unsigned char *limit = _ascii_bytes + (LENGTH); \
+ const unsigned char *last_null = NULL; \
+ unsigned bytes_in_chunk = 0; \
+ \
+ for (; _ascii_bytes < limit; _ascii_bytes++) \
+ { \
+ const unsigned char *p; \
+ \
+ if (bytes_in_chunk >= 60) \
+ { \
+ fprintf ((FILE), "\"\n"); \
+ bytes_in_chunk = 0; \
+ } \
+ \
+ if (_ascii_bytes > last_null) \
+ { \
+ for (p = _ascii_bytes; p < limit && *p != '\0'; p++) \
+ continue; \
+ last_null = p; \
+ } \
+ else \
+ p = last_null; \
+ \
+ if (p < limit && (p - _ascii_bytes) <= (long)STRING_LIMIT) \
+ { \
+ if (bytes_in_chunk > 0) \
+ { \
+ fprintf ((FILE), "\"\n"); \
+ bytes_in_chunk = 0; \
+ } \
+ \
+ ASM_OUTPUT_LIMITED_STRING ((FILE), _ascii_bytes); \
+ _ascii_bytes = p; \
+ } \
+ else \
+ { \
+ register int escape; \
+ register unsigned ch; \
+ \
+ if (bytes_in_chunk == 0) \
+ fprintf ((FILE), "%s\"", ASCII_DATA_ASM_OP); \
+ \
+ switch (escape = ESCAPES[ch = *_ascii_bytes]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ bytes_in_chunk++; \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ bytes_in_chunk += 4; \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ bytes_in_chunk += 2; \
+ break; \
+ } \
+ } \
+ } \
+ \
+ if (bytes_in_chunk > 0) \
+ fprintf ((FILE), "\"\n"); \
+ } \
+ while (0)
diff --git a/gcc-4.2.1-5666.3/gcc/config/floatunsidf.c b/gcc-4.2.1-5666.3/gcc/config/floatunsidf.c
new file mode 100644
index 000000000..ff2811250
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/floatunsidf.c
@@ -0,0 +1,15 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+DFtype
+__floatunsidf (USItype u)
+{
+ SItype s = (SItype) u;
+ DFtype r = (DFtype) s;
+ if (s < 0)
+ r += (DFtype)2.0 * (DFtype) ((USItype) 1
+ << (sizeof (USItype) * __CHAR_BIT__ - 1));
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/floatunsisf.c b/gcc-4.2.1-5666.3/gcc/config/floatunsisf.c
new file mode 100644
index 000000000..11d4aa78c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/floatunsisf.c
@@ -0,0 +1,18 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float SFtype __attribute__ ((mode (SF)));
+
+SFtype
+__floatunsisf (USItype u)
+{
+ SItype s = (SItype) u;
+ if (s < 0)
+ {
+ /* As in expand_float, compute (u & 1) | (u >> 1) to ensure
+ correct rounding if a nonzero bit is shifted out. */
+ return (SFtype) 2.0 * (SFtype) (SItype) ((u & 1) | (u >> 1));
+ }
+ else
+ return (SFtype) s;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/floatunsitf.c b/gcc-4.2.1-5666.3/gcc/config/floatunsitf.c
new file mode 100644
index 000000000..955d67666
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/floatunsitf.c
@@ -0,0 +1,15 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float TFtype __attribute__ ((mode (TF)));
+
+TFtype
+__floatunsitf (USItype u)
+{
+ SItype s = (SItype) u;
+ TFtype r = (TFtype) s;
+ if (s < 0)
+ r += (TFtype)2.0 * (TFtype) ((USItype) 1
+ << (sizeof (USItype) * __CHAR_BIT__ - 1));
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/floatunsixf.c b/gcc-4.2.1-5666.3/gcc/config/floatunsixf.c
new file mode 100644
index 000000000..52511688d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/floatunsixf.c
@@ -0,0 +1,15 @@
+/* Public domain. */
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef float XFtype __attribute__ ((mode (XF)));
+
+XFtype
+__floatunsixf (USItype u)
+{
+ SItype s = (SItype) u;
+ XFtype r = (XFtype) s;
+ if (s < 0)
+ r += (XFtype)2.0 * (XFtype) ((USItype) 1
+ << (sizeof (USItype) * __CHAR_BIT__ - 1));
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/fp-bit.c b/gcc-4.2.1-5666.3/gcc/config/fp-bit.c
new file mode 100644
index 000000000..bdf04ffd3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/fp-bit.c
@@ -0,0 +1,1678 @@
+/* This is a software floating point library which can be used
+ for targets without hardware floating point.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003,
+ 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* This implements IEEE 754 format arithmetic, but does not provide a
+ mechanism for setting the rounding mode, or for generating or handling
+ exceptions.
+
+ The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
+ Wilson, all of Cygnus Support. */
+
+/* The intended way to use this file is to make two copies, add `#define FLOAT'
+ to one copy, then compile both copies and add them to libgcc.a. */
+
+#include "tconfig.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "config/fp-bit.h"
+
+/* The following macros can be defined to change the behavior of this file:
+ FLOAT: Implement a `float', aka SFmode, fp library. If this is not
+ defined, then this file implements a `double', aka DFmode, fp library.
+ FLOAT_ONLY: Used with FLOAT, to implement a `float' only library, i.e.
+ don't include float->double conversion which requires the double library.
+ This is useful only for machines which can't support doubles, e.g. some
+ 8-bit processors.
+ CMPtype: Specify the type that floating point compares should return.
+ This defaults to SItype, aka int.
+ US_SOFTWARE_GOFAST: This makes all entry points use the same names as the
+ US Software goFast library.
+ _DEBUG_BITFLOAT: This makes debugging the code a little easier, by adding
+ two integers to the FLO_union_type.
+ NO_DENORMALS: Disable handling of denormals.
+ NO_NANS: Disable nan and infinity handling
+ SMALL_MACHINE: Useful when operations on QIs and HIs are faster
+ than on an SI */
+
+/* We don't currently support extended floats (long doubles) on machines
+ without hardware to deal with them.
+
+ These stubs are just to keep the linker from complaining about unresolved
+ references which can be pulled in from libio & libstdc++, even if the
+ user isn't using long doubles. However, they may generate an unresolved
+ external to abort if abort is not used by the function, and the stubs
+ are referenced from within libc, since libgcc goes before and after the
+ system library. */
+
+#ifdef DECLARE_LIBRARY_RENAMES
+ DECLARE_LIBRARY_RENAMES
+#endif
+
+#ifdef EXTENDED_FLOAT_STUBS
+extern void abort (void);
+void __extendsfxf2 (void) { abort(); }
+void __extenddfxf2 (void) { abort(); }
+void __truncxfdf2 (void) { abort(); }
+void __truncxfsf2 (void) { abort(); }
+void __fixxfsi (void) { abort(); }
+void __floatsixf (void) { abort(); }
+void __addxf3 (void) { abort(); }
+void __subxf3 (void) { abort(); }
+void __mulxf3 (void) { abort(); }
+void __divxf3 (void) { abort(); }
+void __negxf2 (void) { abort(); }
+void __eqxf2 (void) { abort(); }
+void __nexf2 (void) { abort(); }
+void __gtxf2 (void) { abort(); }
+void __gexf2 (void) { abort(); }
+void __lexf2 (void) { abort(); }
+void __ltxf2 (void) { abort(); }
+
+void __extendsftf2 (void) { abort(); }
+void __extenddftf2 (void) { abort(); }
+void __trunctfdf2 (void) { abort(); }
+void __trunctfsf2 (void) { abort(); }
+void __fixtfsi (void) { abort(); }
+void __floatsitf (void) { abort(); }
+void __addtf3 (void) { abort(); }
+void __subtf3 (void) { abort(); }
+void __multf3 (void) { abort(); }
+void __divtf3 (void) { abort(); }
+void __negtf2 (void) { abort(); }
+void __eqtf2 (void) { abort(); }
+void __netf2 (void) { abort(); }
+void __gttf2 (void) { abort(); }
+void __getf2 (void) { abort(); }
+void __letf2 (void) { abort(); }
+void __lttf2 (void) { abort(); }
+#else /* !EXTENDED_FLOAT_STUBS, rest of file */
+
+/* IEEE "special" number predicates */
+
+#ifdef NO_NANS
+
+#define nan() 0
+#define isnan(x) 0
+#define isinf(x) 0
+#else
+
+#if defined L_thenan_sf
+const fp_number_type __thenan_sf = { CLASS_SNAN, 0, 0, {(fractype) 0} };
+#elif defined L_thenan_df
+const fp_number_type __thenan_df = { CLASS_SNAN, 0, 0, {(fractype) 0} };
+#elif defined L_thenan_tf
+const fp_number_type __thenan_tf = { CLASS_SNAN, 0, 0, {(fractype) 0} };
+#elif defined TFLOAT
+extern const fp_number_type __thenan_tf;
+#elif defined FLOAT
+extern const fp_number_type __thenan_sf;
+#else
+extern const fp_number_type __thenan_df;
+#endif
+
+INLINE
+static fp_number_type *
+nan (void)
+{
+ /* Discard the const qualifier... */
+#ifdef TFLOAT
+ return (fp_number_type *) (& __thenan_tf);
+#elif defined FLOAT
+ return (fp_number_type *) (& __thenan_sf);
+#else
+ return (fp_number_type *) (& __thenan_df);
+#endif
+}
+
+INLINE
+static int
+isnan ( fp_number_type * x)
+{
+ return __builtin_expect (x->class == CLASS_SNAN || x->class == CLASS_QNAN,
+ 0);
+}
+
+INLINE
+static int
+isinf ( fp_number_type * x)
+{
+ return __builtin_expect (x->class == CLASS_INFINITY, 0);
+}
+
+#endif /* NO_NANS */
+
+INLINE
+static int
+iszero ( fp_number_type * x)
+{
+ return x->class == CLASS_ZERO;
+}
+
+INLINE
+static void
+flip_sign ( fp_number_type * x)
+{
+ x->sign = !x->sign;
+}
+
+/* Count leading zeroes in N. */
+INLINE
+static int
+clzusi (USItype n)
+{
+ extern int __clzsi2 (USItype);
+ if (sizeof (USItype) == sizeof (unsigned int))
+ return __builtin_clz (n);
+ else if (sizeof (USItype) == sizeof (unsigned long))
+ return __builtin_clzl (n);
+ else if (sizeof (USItype) == sizeof (unsigned long long))
+ return __builtin_clzll (n);
+ else
+ return __clzsi2 (n);
+}
+
+extern FLO_type pack_d ( fp_number_type * );
+
+#if defined(L_pack_df) || defined(L_pack_sf) || defined(L_pack_tf)
+FLO_type
+pack_d ( fp_number_type * src)
+{
+ FLO_union_type dst;
+ fractype fraction = src->fraction.ll; /* wasn't unsigned before? */
+ int sign = src->sign;
+ int exp = 0;
+
+ if (LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS) && (isnan (src) || isinf (src)))
+ {
+ /* We can't represent these values accurately. By using the
+ largest possible magnitude, we guarantee that the conversion
+ of infinity is at least as big as any finite number. */
+ exp = EXPMAX;
+ fraction = ((fractype) 1 << FRACBITS) - 1;
+ }
+ else if (isnan (src))
+ {
+ exp = EXPMAX;
+ if (src->class == CLASS_QNAN || 1)
+ {
+#ifdef QUIET_NAN_NEGATED
+ fraction |= QUIET_NAN - 1;
+#else
+ fraction |= QUIET_NAN;
+#endif
+ }
+ }
+ else if (isinf (src))
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else if (iszero (src))
+ {
+ exp = 0;
+ fraction = 0;
+ }
+ else if (fraction == 0)
+ {
+ exp = 0;
+ }
+ else
+ {
+ if (__builtin_expect (src->normal_exp < NORMAL_EXPMIN, 0))
+ {
+#ifdef NO_DENORMALS
+ /* Go straight to a zero representation if denormals are not
+ supported. The denormal handling would be harmless but
+ isn't unnecessary. */
+ exp = 0;
+ fraction = 0;
+#else /* NO_DENORMALS */
+ /* This number's exponent is too low to fit into the bits
+ available in the number, so we'll store 0 in the exponent and
+ shift the fraction to the right to make up for it. */
+
+ int shift = NORMAL_EXPMIN - src->normal_exp;
+
+ exp = 0;
+
+ if (shift > FRAC_NBITS - NGARDS)
+ {
+ /* No point shifting, since it's more that 64 out. */
+ fraction = 0;
+ }
+ else
+ {
+ int lowbit = (fraction & (((fractype)1 << shift) - 1)) ? 1 : 0;
+ fraction = (fraction >> shift) | lowbit;
+ }
+ if ((fraction & GARDMASK) == GARDMSB)
+ {
+ if ((fraction & (1 << NGARDS)))
+ fraction += GARDROUND + 1;
+ }
+ else
+ {
+ /* Add to the guards to round up. */
+ fraction += GARDROUND;
+ }
+ /* Perhaps the rounding means we now need to change the
+ exponent, because the fraction is no longer denormal. */
+ if (fraction >= IMPLICIT_1)
+ {
+ exp += 1;
+ }
+ fraction >>= NGARDS;
+#endif /* NO_DENORMALS */
+ }
+ else if (!LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS)
+ && __builtin_expect (src->normal_exp > EXPBIAS, 0))
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else
+ {
+ exp = src->normal_exp + EXPBIAS;
+ if (!ROUND_TOWARDS_ZERO)
+ {
+ /* IF the gard bits are the all zero, but the first, then we're
+ half way between two numbers, choose the one which makes the
+ lsb of the answer 0. */
+ if ((fraction & GARDMASK) == GARDMSB)
+ {
+ if (fraction & (1 << NGARDS))
+ fraction += GARDROUND + 1;
+ }
+ else
+ {
+ /* Add a one to the guards to round up */
+ fraction += GARDROUND;
+ }
+ if (fraction >= IMPLICIT_2)
+ {
+ fraction >>= 1;
+ exp += 1;
+ }
+ }
+ fraction >>= NGARDS;
+
+ if (LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS) && exp > EXPMAX)
+ {
+ /* Saturate on overflow. */
+ exp = EXPMAX;
+ fraction = ((fractype) 1 << FRACBITS) - 1;
+ }
+ }
+ }
+
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ dst.bits.fraction = fraction;
+ dst.bits.exp = exp;
+ dst.bits.sign = sign;
+#else
+# if defined TFLOAT && defined HALFFRACBITS
+ {
+ halffractype high, low, unity;
+ int lowsign, lowexp;
+
+ unity = (halffractype) 1 << HALFFRACBITS;
+
+ /* Set HIGH to the high double's significand, masking out the implicit 1.
+ Set LOW to the low double's full significand. */
+ high = (fraction >> (FRACBITS - HALFFRACBITS)) & (unity - 1);
+ low = fraction & (unity * 2 - 1);
+
+ /* Get the initial sign and exponent of the low double. */
+ lowexp = exp - HALFFRACBITS - 1;
+ lowsign = sign;
+
+ /* HIGH should be rounded like a normal double, making |LOW| <=
+ 0.5 ULP of HIGH. Assume round-to-nearest. */
+ if (exp < EXPMAX)
+ if (low > unity || (low == unity && (high & 1) == 1))
+ {
+ /* Round HIGH up and adjust LOW to match. */
+ high++;
+ if (high == unity)
+ {
+ /* May make it infinite, but that's OK. */
+ high = 0;
+ exp++;
+ }
+ low = unity * 2 - low;
+ lowsign ^= 1;
+ }
+
+ high |= (halffractype) exp << HALFFRACBITS;
+ high |= (halffractype) sign << (HALFFRACBITS + EXPBITS);
+
+ if (exp == EXPMAX || exp == 0 || low == 0)
+ low = 0;
+ else
+ {
+ while (lowexp > 0 && low < unity)
+ {
+ low <<= 1;
+ lowexp--;
+ }
+
+ if (lowexp <= 0)
+ {
+ halffractype roundmsb, round;
+ int shift;
+
+ shift = 1 - lowexp;
+ roundmsb = (1 << (shift - 1));
+ round = low & ((roundmsb << 1) - 1);
+
+ low >>= shift;
+ lowexp = 0;
+
+ if (round > roundmsb || (round == roundmsb && (low & 1) == 1))
+ {
+ low++;
+ if (low == unity)
+ /* LOW rounds up to the smallest normal number. */
+ lowexp++;
+ }
+ }
+
+ low &= unity - 1;
+ low |= (halffractype) lowexp << HALFFRACBITS;
+ low |= (halffractype) lowsign << (HALFFRACBITS + EXPBITS);
+ }
+ dst.value_raw = ((fractype) high << HALFSHIFT) | low;
+ }
+# else
+ dst.value_raw = fraction & ((((fractype)1) << FRACBITS) - (fractype)1);
+ dst.value_raw |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << FRACBITS;
+ dst.value_raw |= ((fractype) (sign & 1)) << (FRACBITS | EXPBITS);
+# endif
+#endif
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+#ifdef TFLOAT
+ {
+ qrtrfractype tmp1 = dst.words[0];
+ qrtrfractype tmp2 = dst.words[1];
+ dst.words[0] = dst.words[3];
+ dst.words[1] = dst.words[2];
+ dst.words[2] = tmp2;
+ dst.words[3] = tmp1;
+ }
+#else
+ {
+ halffractype tmp = dst.words[0];
+ dst.words[0] = dst.words[1];
+ dst.words[1] = tmp;
+ }
+#endif
+#endif
+
+ return dst.value;
+}
+#endif
+
+#if defined(L_unpack_df) || defined(L_unpack_sf) || defined(L_unpack_tf)
+void
+unpack_d (FLO_union_type * src, fp_number_type * dst)
+{
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+ fractype fraction;
+ int exp;
+ int sign;
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+ FLO_union_type swapped;
+
+#ifdef TFLOAT
+ swapped.words[0] = src->words[3];
+ swapped.words[1] = src->words[2];
+ swapped.words[2] = src->words[1];
+ swapped.words[3] = src->words[0];
+#else
+ swapped.words[0] = src->words[1];
+ swapped.words[1] = src->words[0];
+#endif
+ src = &swapped;
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ fraction = src->bits.fraction;
+ exp = src->bits.exp;
+ sign = src->bits.sign;
+#else
+# if defined TFLOAT && defined HALFFRACBITS
+ {
+ halffractype high, low;
+
+ high = src->value_raw >> HALFSHIFT;
+ low = src->value_raw & (((fractype)1 << HALFSHIFT) - 1);
+
+ fraction = high & ((((fractype)1) << HALFFRACBITS) - 1);
+ fraction <<= FRACBITS - HALFFRACBITS;
+ exp = ((int)(high >> HALFFRACBITS)) & ((1 << EXPBITS) - 1);
+ sign = ((int)(high >> (((HALFFRACBITS + EXPBITS))))) & 1;
+
+ if (exp != EXPMAX && exp != 0 && low != 0)
+ {
+ int lowexp = ((int)(low >> HALFFRACBITS)) & ((1 << EXPBITS) - 1);
+ int lowsign = ((int)(low >> (((HALFFRACBITS + EXPBITS))))) & 1;
+ int shift;
+ fractype xlow;
+
+ xlow = low & ((((fractype)1) << HALFFRACBITS) - 1);
+ if (lowexp)
+ xlow |= (((halffractype)1) << HALFFRACBITS);
+ else
+ lowexp = 1;
+ shift = (FRACBITS - HALFFRACBITS) - (exp - lowexp);
+ if (shift > 0)
+ xlow <<= shift;
+ else if (shift < 0)
+ xlow >>= -shift;
+ if (sign == lowsign)
+ fraction += xlow;
+ else if (fraction >= xlow)
+ fraction -= xlow;
+ else
+ {
+ /* The high part is a power of two but the full number is lower.
+ This code will leave the implicit 1 in FRACTION, but we'd
+ have added that below anyway. */
+ fraction = (((fractype) 1 << FRACBITS) - xlow) << 1;
+ exp--;
+ }
+ }
+ }
+# else
+ fraction = src->value_raw & ((((fractype)1) << FRACBITS) - 1);
+ exp = ((int)(src->value_raw >> FRACBITS)) & ((1 << EXPBITS) - 1);
+ sign = ((int)(src->value_raw >> (FRACBITS + EXPBITS))) & 1;
+# endif
+#endif
+
+ dst->sign = sign;
+ if (exp == 0)
+ {
+ /* Hmm. Looks like 0 */
+ if (fraction == 0
+#ifdef NO_DENORMALS
+ || 1
+#endif
+ )
+ {
+ /* tastes like zero */
+ dst->class = CLASS_ZERO;
+ }
+ else
+ {
+ /* Zero exponent with nonzero fraction - it's denormalized,
+ so there isn't a leading implicit one - we'll shift it so
+ it gets one. */
+ dst->normal_exp = exp - EXPBIAS + 1;
+ fraction <<= NGARDS;
+
+ dst->class = CLASS_NUMBER;
+#if 1
+ while (fraction < IMPLICIT_1)
+ {
+ fraction <<= 1;
+ dst->normal_exp--;
+ }
+#endif
+ dst->fraction.ll = fraction;
+ }
+ }
+ else if (!LARGEST_EXPONENT_IS_NORMAL (FRAC_NBITS)
+ && __builtin_expect (exp == EXPMAX, 0))
+ {
+ /* Huge exponent*/
+ if (fraction == 0)
+ {
+ /* Attached to a zero fraction - means infinity */
+ dst->class = CLASS_INFINITY;
+ }
+ else
+ {
+ /* Nonzero fraction, means nan */
+#ifdef QUIET_NAN_NEGATED
+ if ((fraction & QUIET_NAN) == 0)
+#else
+ if (fraction & QUIET_NAN)
+#endif
+ {
+ dst->class = CLASS_QNAN;
+ }
+ else
+ {
+ dst->class = CLASS_SNAN;
+ }
+ /* Keep the fraction part as the nan number */
+ dst->fraction.ll = fraction;
+ }
+ }
+ else
+ {
+ /* Nothing strange about this number */
+ dst->normal_exp = exp - EXPBIAS;
+ dst->class = CLASS_NUMBER;
+ dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1;
+ }
+}
+#endif /* L_unpack_df || L_unpack_sf */
+
+#if defined(L_addsub_sf) || defined(L_addsub_df) || defined(L_addsub_tf)
+static fp_number_type *
+_fpadd_parts (fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ intfrac tfraction;
+
+ /* Put commonly used fields in local variables. */
+ int a_normal_exp;
+ int b_normal_exp;
+ fractype a_fraction;
+ fractype b_fraction;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+ if (isinf (a))
+ {
+ /* Adding infinities with opposite signs yields a NaN. */
+ if (isinf (b) && a->sign != b->sign)
+ return nan ();
+ return a;
+ }
+ if (isinf (b))
+ {
+ return b;
+ }
+ if (iszero (b))
+ {
+ if (iszero (a))
+ {
+ *tmp = *a;
+ tmp->sign = a->sign & b->sign;
+ return tmp;
+ }
+ return a;
+ }
+ if (iszero (a))
+ {
+ return b;
+ }
+
+ /* Got two numbers. shift the smaller and increment the exponent till
+ they're the same */
+ {
+ int diff;
+ int sdiff;
+
+ a_normal_exp = a->normal_exp;
+ b_normal_exp = b->normal_exp;
+ a_fraction = a->fraction.ll;
+ b_fraction = b->fraction.ll;
+
+ diff = a_normal_exp - b_normal_exp;
+ sdiff = diff;
+
+ if (diff < 0)
+ diff = -diff;
+ if (diff < FRAC_NBITS)
+ {
+ if (sdiff > 0)
+ {
+ b_normal_exp += diff;
+ LSHIFT (b_fraction, diff);
+ }
+ else if (sdiff < 0)
+ {
+ a_normal_exp += diff;
+ LSHIFT (a_fraction, diff);
+ }
+ }
+ else
+ {
+ /* Somethings's up.. choose the biggest */
+ if (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp = a_normal_exp;
+ b_fraction = 0;
+ }
+ else
+ {
+ a_normal_exp = b_normal_exp;
+ a_fraction = 0;
+ }
+ }
+ }
+
+ if (a->sign != b->sign)
+ {
+ if (a->sign)
+ {
+ tfraction = -a_fraction + b_fraction;
+ }
+ else
+ {
+ tfraction = a_fraction - b_fraction;
+ }
+ if (tfraction >= 0)
+ {
+ tmp->sign = 0;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = tfraction;
+ }
+ else
+ {
+ tmp->sign = 1;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = -tfraction;
+ }
+ /* and renormalize it */
+
+ while (tmp->fraction.ll < IMPLICIT_1 && tmp->fraction.ll)
+ {
+ tmp->fraction.ll <<= 1;
+ tmp->normal_exp--;
+ }
+ }
+ else
+ {
+ tmp->sign = a->sign;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = a_fraction + b_fraction;
+ }
+ tmp->class = CLASS_NUMBER;
+ /* Now the fraction is added, we have to shift down to renormalize the
+ number */
+
+ if (tmp->fraction.ll >= IMPLICIT_2)
+ {
+ LSHIFT (tmp->fraction.ll, 1);
+ tmp->normal_exp++;
+ }
+ return tmp;
+
+}
+
+FLO_type
+add (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+
+FLO_type
+sub (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ b.sign ^= 1;
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+#endif /* L_addsub_sf || L_addsub_df */
+
+#if defined(L_mul_sf) || defined(L_mul_df) || defined(L_mul_tf)
+static inline __attribute__ ((__always_inline__)) fp_number_type *
+_fpmul_parts ( fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ fractype low = 0;
+ fractype high = 0;
+
+ if (isnan (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isnan (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (isinf (a))
+ {
+ if (iszero (b))
+ return nan ();
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isinf (b))
+ {
+ if (iszero (a))
+ {
+ return nan ();
+ }
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (iszero (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (iszero (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+
+ /* Calculate the mantissa by multiplying both numbers to get a
+ twice-as-wide number. */
+ {
+#if defined(NO_DI_MODE) || defined(TFLOAT)
+ {
+ fractype x = a->fraction.ll;
+ fractype ylow = b->fraction.ll;
+ fractype yhigh = 0;
+ int bit;
+
+ /* ??? This does multiplies one bit at a time. Optimize. */
+ for (bit = 0; bit < FRAC_NBITS; bit++)
+ {
+ int carry;
+
+ if (x & 1)
+ {
+ carry = (low += ylow) < ylow;
+ high += yhigh + carry;
+ }
+ yhigh <<= 1;
+ if (ylow & FRACHIGH)
+ {
+ yhigh |= 1;
+ }
+ ylow <<= 1;
+ x >>= 1;
+ }
+ }
+#elif defined(FLOAT)
+ /* Multiplying two USIs to get a UDI, we're safe. */
+ {
+ UDItype answer = (UDItype)a->fraction.ll * (UDItype)b->fraction.ll;
+
+ high = answer >> BITS_PER_SI;
+ low = answer;
+ }
+#else
+ /* fractype is DImode, but we need the result to be twice as wide.
+ Assuming a widening multiply from DImode to TImode is not
+ available, build one by hand. */
+ {
+ USItype nl = a->fraction.ll;
+ USItype nh = a->fraction.ll >> BITS_PER_SI;
+ USItype ml = b->fraction.ll;
+ USItype mh = b->fraction.ll >> BITS_PER_SI;
+ UDItype pp_ll = (UDItype) ml * nl;
+ UDItype pp_hl = (UDItype) mh * nl;
+ UDItype pp_lh = (UDItype) ml * nh;
+ UDItype pp_hh = (UDItype) mh * nh;
+ UDItype res2 = 0;
+ UDItype res0 = 0;
+ UDItype ps_hh__ = pp_hl + pp_lh;
+ if (ps_hh__ < pp_hl)
+ res2 += (UDItype)1 << BITS_PER_SI;
+ pp_hl = (UDItype)(USItype)ps_hh__ << BITS_PER_SI;
+ res0 = pp_ll + pp_hl;
+ if (res0 < pp_ll)
+ res2++;
+ res2 += (ps_hh__ >> BITS_PER_SI) + pp_hh;
+ high = res2;
+ low = res0;
+ }
+#endif
+ }
+
+ tmp->normal_exp = a->normal_exp + b->normal_exp
+ + FRAC_NBITS - (FRACBITS + NGARDS);
+ tmp->sign = a->sign != b->sign;
+ while (high >= IMPLICIT_2)
+ {
+ tmp->normal_exp++;
+ if (high & 1)
+ {
+ low >>= 1;
+ low |= FRACHIGH;
+ }
+ high >>= 1;
+ }
+ while (high < IMPLICIT_1)
+ {
+ tmp->normal_exp--;
+
+ high <<= 1;
+ if (low & FRACHIGH)
+ high |= 1;
+ low <<= 1;
+ }
+
+ if (!ROUND_TOWARDS_ZERO && (high & GARDMASK) == GARDMSB)
+ {
+ if (high & (1 << NGARDS))
+ {
+ /* Because we're half way, we would round to even by adding
+ GARDROUND + 1, except that's also done in the packing
+ function, and rounding twice will lose precision and cause
+ the result to be too far off. Example: 32-bit floats with
+ bit patterns 0xfff * 0x3f800400 ~= 0xfff (less than 0.5ulp
+ off), not 0x1000 (more than 0.5ulp off). */
+ }
+ else if (low)
+ {
+ /* We're a further than half way by a small amount corresponding
+ to the bits set in "low". Knowing that, we round here and
+ not in pack_d, because there we don't have "low" available
+ anymore. */
+ high += GARDROUND + 1;
+
+ /* Avoid further rounding in pack_d. */
+ high &= ~(fractype) GARDMASK;
+ }
+ }
+ tmp->fraction.ll = high;
+ tmp->class = CLASS_NUMBER;
+ return tmp;
+}
+
+FLO_type
+multiply (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ res = _fpmul_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+#endif /* L_mul_sf || L_mul_df || L_mul_tf */
+
+#if defined(L_div_sf) || defined(L_div_df) || defined(L_div_tf)
+static inline __attribute__ ((__always_inline__)) fp_number_type *
+_fpdiv_parts (fp_number_type * a,
+ fp_number_type * b)
+{
+ fractype bit;
+ fractype numerator;
+ fractype denominator;
+ fractype quotient;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+
+ a->sign = a->sign ^ b->sign;
+
+ if (isinf (a) || iszero (a))
+ {
+ if (a->class == b->class)
+ return nan ();
+ return a;
+ }
+
+ if (isinf (b))
+ {
+ a->fraction.ll = 0;
+ a->normal_exp = 0;
+ return a;
+ }
+ if (iszero (b))
+ {
+ a->class = CLASS_INFINITY;
+ return a;
+ }
+
+ /* Calculate the mantissa by multiplying both 64bit numbers to get a
+ 128 bit number */
+ {
+ /* quotient =
+ ( numerator / denominator) * 2^(numerator exponent - denominator exponent)
+ */
+
+ a->normal_exp = a->normal_exp - b->normal_exp;
+ numerator = a->fraction.ll;
+ denominator = b->fraction.ll;
+
+ if (numerator < denominator)
+ {
+ /* Fraction will be less than 1.0 */
+ numerator *= 2;
+ a->normal_exp--;
+ }
+ bit = IMPLICIT_1;
+ quotient = 0;
+ /* ??? Does divide one bit at a time. Optimize. */
+ while (bit)
+ {
+ if (numerator >= denominator)
+ {
+ quotient |= bit;
+ numerator -= denominator;
+ }
+ bit >>= 1;
+ numerator *= 2;
+ }
+
+ if (!ROUND_TOWARDS_ZERO && (quotient & GARDMASK) == GARDMSB)
+ {
+ if (quotient & (1 << NGARDS))
+ {
+ /* Because we're half way, we would round to even by adding
+ GARDROUND + 1, except that's also done in the packing
+ function, and rounding twice will lose precision and cause
+ the result to be too far off. */
+ }
+ else if (numerator)
+ {
+ /* We're a further than half way by the small amount
+ corresponding to the bits set in "numerator". Knowing
+ that, we round here and not in pack_d, because there we
+ don't have "numerator" available anymore. */
+ quotient += GARDROUND + 1;
+
+ /* Avoid further rounding in pack_d. */
+ quotient &= ~(fractype) GARDMASK;
+ }
+ }
+
+ a->fraction.ll = quotient;
+ return (a);
+ }
+}
+
+FLO_type
+divide (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type *res;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ res = _fpdiv_parts (&a, &b);
+
+ return pack_d (res);
+}
+#endif /* L_div_sf || L_div_df */
+
+#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df) \
+ || defined(L_fpcmp_parts_tf)
+/* according to the demo, fpcmp returns a comparison with 0... thus
+ a<b -> -1
+ a==b -> 0
+ a>b -> +1
+ */
+
+int
+__fpcmp_parts (fp_number_type * a, fp_number_type * b)
+{
+#if 0
+ /* either nan -> unordered. Must be checked outside of this routine. */
+ if (isnan (a) && isnan (b))
+ {
+ return 1; /* still unordered! */
+ }
+#endif
+
+ if (isnan (a) || isnan (b))
+ {
+ return 1; /* how to indicate unordered compare? */
+ }
+ if (isinf (a) && isinf (b))
+ {
+ /* +inf > -inf, but +inf != +inf */
+ /* b \a| +inf(0)| -inf(1)
+ ______\+--------+--------
+ +inf(0)| a==b(0)| a<b(-1)
+ -------+--------+--------
+ -inf(1)| a>b(1) | a==b(0)
+ -------+--------+--------
+ So since unordered must be nonzero, just line up the columns...
+ */
+ return b->sign - a->sign;
+ }
+ /* but not both... */
+ if (isinf (a))
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (isinf (b))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (a) && iszero (b))
+ {
+ return 0;
+ }
+ if (iszero (a))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (b))
+ {
+ return a->sign ? -1 : 1;
+ }
+ /* now both are "normal". */
+ if (a->sign != b->sign)
+ {
+ /* opposite signs */
+ return a->sign ? -1 : 1;
+ }
+ /* same sign; exponents? */
+ if (a->normal_exp > b->normal_exp)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->normal_exp < b->normal_exp)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* same exponents; check size. */
+ if (a->fraction.ll > b->fraction.ll)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->fraction.ll < b->fraction.ll)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* after all that, they're equal. */
+ return 0;
+}
+#endif
+
+#if defined(L_compare_sf) || defined(L_compare_df) || defined(L_compoare_tf)
+CMPtype
+compare (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif /* L_compare_sf || L_compare_df */
+
+#ifndef US_SOFTWARE_GOFAST
+
+/* These should be optimized for their specific tasks someday. */
+
+#if defined(L_eq_sf) || defined(L_eq_df) || defined(L_eq_tf)
+CMPtype
+_eq_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth == 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif /* L_eq_sf || L_eq_df */
+
+#if defined(L_ne_sf) || defined(L_ne_df) || defined(L_ne_tf)
+CMPtype
+_ne_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* true, truth != 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif /* L_ne_sf || L_ne_df */
+
+#if defined(L_gt_sf) || defined(L_gt_df) || defined(L_gt_tf)
+CMPtype
+_gt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth > 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif /* L_gt_sf || L_gt_df */
+
+#if defined(L_ge_sf) || defined(L_ge_df) || defined(L_ge_tf)
+CMPtype
+_ge_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth >= 0 */
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif /* L_ge_sf || L_ge_df */
+
+#if defined(L_lt_sf) || defined(L_lt_df) || defined(L_lt_tf)
+CMPtype
+_lt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth < 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif /* L_lt_sf || L_lt_df */
+
+#if defined(L_le_sf) || defined(L_le_df) || defined(L_le_tf)
+CMPtype
+_le_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth <= 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif /* L_le_sf || L_le_df */
+
+#endif /* ! US_SOFTWARE_GOFAST */
+
+#if defined(L_unord_sf) || defined(L_unord_df) || defined(L_unord_tf)
+CMPtype
+_unord_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ FLO_union_type au, bu;
+
+ au.value = arg_a;
+ bu.value = arg_b;
+
+ unpack_d (&au, &a);
+ unpack_d (&bu, &b);
+
+ return (isnan (&a) || isnan (&b));
+}
+#endif /* L_unord_sf || L_unord_df */
+
+#if defined(L_si_to_sf) || defined(L_si_to_df) || defined(L_si_to_tf)
+FLO_type
+si_to_float (SItype arg_a)
+{
+ fp_number_type in;
+
+ in.class = CLASS_NUMBER;
+ in.sign = arg_a < 0;
+ if (!arg_a)
+ {
+ in.class = CLASS_ZERO;
+ }
+ else
+ {
+ USItype uarg;
+ int shift;
+ in.normal_exp = FRACBITS + NGARDS;
+ if (in.sign)
+ {
+ /* Special case for minint, since there is no +ve integer
+ representation for it */
+ if (arg_a == (- MAX_SI_INT - 1))
+ {
+ return (FLO_type)(- MAX_SI_INT - 1);
+ }
+ uarg = (-arg_a);
+ }
+ else
+ uarg = arg_a;
+
+ in.fraction.ll = uarg;
+ shift = clzusi (uarg) - (BITS_PER_SI - 1 - FRACBITS - NGARDS);
+ if (shift > 0)
+ {
+ in.fraction.ll <<= shift;
+ in.normal_exp -= shift;
+ }
+ }
+ return pack_d (&in);
+}
+#endif /* L_si_to_sf || L_si_to_df */
+
+#if defined(L_usi_to_sf) || defined(L_usi_to_df) || defined(L_usi_to_tf)
+FLO_type
+usi_to_float (USItype arg_a)
+{
+ fp_number_type in;
+
+ in.sign = 0;
+ if (!arg_a)
+ {
+ in.class = CLASS_ZERO;
+ }
+ else
+ {
+ int shift;
+ in.class = CLASS_NUMBER;
+ in.normal_exp = FRACBITS + NGARDS;
+ in.fraction.ll = arg_a;
+
+ shift = clzusi (arg_a) - (BITS_PER_SI - 1 - FRACBITS - NGARDS);
+ if (shift < 0)
+ {
+ fractype guard = in.fraction.ll & (((fractype)1 << -shift) - 1);
+ in.fraction.ll >>= -shift;
+ in.fraction.ll |= (guard != 0);
+ in.normal_exp -= shift;
+ }
+ else if (shift > 0)
+ {
+ in.fraction.ll <<= shift;
+ in.normal_exp -= shift;
+ }
+ }
+ return pack_d (&in);
+}
+#endif
+
+#if defined(L_sf_to_si) || defined(L_df_to_si) || defined(L_tf_to_si)
+SItype
+float_to_si (FLO_type arg_a)
+{
+ fp_number_type a;
+ SItype tmp;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &a);
+
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* get reasonable MAX_SI_INT... */
+ if (isinf (&a))
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > BITS_PER_SI - 2)
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+ return a.sign ? (-tmp) : (tmp);
+}
+#endif /* L_sf_to_si || L_df_to_si */
+
+#if defined(L_sf_to_usi) || defined(L_df_to_usi) || defined(L_tf_to_usi)
+#if defined US_SOFTWARE_GOFAST || defined(L_tf_to_usi)
+/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines,
+ we also define them for GOFAST because the ones in libgcc2.c have the
+ wrong names and I'd rather define these here and keep GOFAST CYG-LOC's
+ out of libgcc2.c. We can't define these here if not GOFAST because then
+ there'd be duplicate copies. */
+
+USItype
+float_to_usi (FLO_type arg_a)
+{
+ fp_number_type a;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &a);
+
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* it is a negative number */
+ if (a.sign)
+ return 0;
+ /* get reasonable MAX_USI_INT... */
+ if (isinf (&a))
+ return MAX_USI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > BITS_PER_SI - 1)
+ return MAX_USI_INT;
+ else if (a.normal_exp > (FRACBITS + NGARDS))
+ return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS));
+ else
+ return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+}
+#endif /* US_SOFTWARE_GOFAST */
+#endif /* L_sf_to_usi || L_df_to_usi */
+
+#if defined(L_negate_sf) || defined(L_negate_df) || defined(L_negate_tf)
+FLO_type
+negate (FLO_type arg_a)
+{
+ fp_number_type a;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &a);
+
+ flip_sign (&a);
+ return pack_d (&a);
+}
+#endif /* L_negate_sf || L_negate_df */
+
+#ifdef FLOAT
+
+#if defined(L_make_sf)
+SFtype
+__make_fp(fp_class_type class,
+ unsigned int sign,
+ int exp,
+ USItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif /* L_make_sf */
+
+#ifndef FLOAT_ONLY
+
+/* This enables one to build an fp library that supports float but not double.
+ Otherwise, we would get an undefined reference to __make_dp.
+ This is needed for some 8-bit ports that can't handle well values that
+ are 8-bytes in size, so we just don't support double for them at all. */
+
+#if defined(L_sf_to_df)
+DFtype
+sf_to_df (SFtype arg_a)
+{
+ fp_number_type in;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ return __make_dp (in.class, in.sign, in.normal_exp,
+ ((UDItype) in.fraction.ll) << F_D_BITOFF);
+}
+#endif /* L_sf_to_df */
+
+#if defined(L_sf_to_tf) && defined(TMODES)
+TFtype
+sf_to_tf (SFtype arg_a)
+{
+ fp_number_type in;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ return __make_tp (in.class, in.sign, in.normal_exp,
+ ((UTItype) in.fraction.ll) << F_T_BITOFF);
+}
+#endif /* L_sf_to_df */
+
+#endif /* ! FLOAT_ONLY */
+#endif /* FLOAT */
+
+#ifndef FLOAT
+
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+
+#if defined(L_make_df)
+DFtype
+__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif /* L_make_df */
+
+#if defined(L_df_to_sf)
+SFtype
+df_to_sf (DFtype arg_a)
+{
+ fp_number_type in;
+ USItype sffrac;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ sffrac = in.fraction.ll >> F_D_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
+}
+#endif /* L_df_to_sf */
+
+#if defined(L_df_to_tf) && defined(TMODES) \
+ && !defined(FLOAT) && !defined(TFLOAT)
+TFtype
+df_to_tf (DFtype arg_a)
+{
+ fp_number_type in;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ return __make_tp (in.class, in.sign, in.normal_exp,
+ ((UTItype) in.fraction.ll) << D_T_BITOFF);
+}
+#endif /* L_sf_to_df */
+
+#ifdef TFLOAT
+#if defined(L_make_tf)
+TFtype
+__make_tp(fp_class_type class,
+ unsigned int sign,
+ int exp,
+ UTItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif /* L_make_tf */
+
+#if defined(L_tf_to_df)
+DFtype
+tf_to_df (TFtype arg_a)
+{
+ fp_number_type in;
+ UDItype sffrac;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ sffrac = in.fraction.ll >> D_T_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((UTItype) 1 << D_T_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_dp (in.class, in.sign, in.normal_exp, sffrac);
+}
+#endif /* L_tf_to_df */
+
+#if defined(L_tf_to_sf)
+SFtype
+tf_to_sf (TFtype arg_a)
+{
+ fp_number_type in;
+ USItype sffrac;
+ FLO_union_type au;
+
+ au.value = arg_a;
+ unpack_d (&au, &in);
+
+ sffrac = in.fraction.ll >> F_T_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((UTItype) 1 << F_T_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
+}
+#endif /* L_tf_to_sf */
+#endif /* TFLOAT */
+
+#endif /* ! FLOAT */
+#endif /* !EXTENDED_FLOAT_STUBS */
diff --git a/gcc-4.2.1-5666.3/gcc/config/fp-bit.h b/gcc-4.2.1-5666.3/gcc/config/fp-bit.h
new file mode 100644
index 000000000..0f03f87ee
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/fp-bit.h
@@ -0,0 +1,543 @@
+/* Header file for fp-bit.c. */
+/* Copyright (C) 2000, 2002, 2003, 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#ifndef GCC_FP_BIT_H
+#define GCC_FP_BIT_H
+
+/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines
+ from this file are compiled via additional -D options.
+
+ This avoids the need to pull in the entire fp emulation library
+ when only a small number of functions are needed.
+
+ If FINE_GRAINED_LIBRARIES is not defined, then compile every
+ suitable routine. */
+#ifndef FINE_GRAINED_LIBRARIES
+#define L_pack_df
+#define L_unpack_df
+#define L_pack_sf
+#define L_unpack_sf
+#define L_addsub_sf
+#define L_addsub_df
+#define L_mul_sf
+#define L_mul_df
+#define L_div_sf
+#define L_div_df
+#define L_fpcmp_parts_sf
+#define L_fpcmp_parts_df
+#define L_compare_sf
+#define L_compare_df
+#define L_eq_sf
+#define L_eq_df
+#define L_ne_sf
+#define L_ne_df
+#define L_gt_sf
+#define L_gt_df
+#define L_ge_sf
+#define L_ge_df
+#define L_lt_sf
+#define L_lt_df
+#define L_le_sf
+#define L_le_df
+#define L_unord_sf
+#define L_unord_df
+#define L_usi_to_sf
+#define L_usi_to_df
+#define L_si_to_sf
+#define L_si_to_df
+#define L_sf_to_si
+#define L_df_to_si
+#define L_f_to_usi
+#define L_df_to_usi
+#define L_negate_sf
+#define L_negate_df
+#define L_make_sf
+#define L_make_df
+#define L_sf_to_df
+#define L_df_to_sf
+#ifdef FLOAT
+#define L_thenan_sf
+#else
+#define L_thenan_df
+#endif
+#endif /* ! FINE_GRAINED_LIBRARIES */
+
+#if __LDBL_MANT_DIG__ == 113 || __LDBL_MANT_DIG__ == 106
+# if defined(TFLOAT) || defined(L_sf_to_tf) || defined(L_df_to_tf)
+# define TMODES
+# endif
+#endif
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+#ifdef TMODES
+typedef float TFtype __attribute__ ((mode (TF)));
+#endif
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+#ifdef TMODES
+typedef int TItype __attribute__ ((mode (TI)));
+#endif
+
+/* The type of the result of a floating point comparison. This must
+ match `word_mode' in GCC for the target. */
+#ifndef CMPtype
+typedef int CMPtype __attribute__ ((mode (word)));
+#endif
+
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+#ifdef TMODES
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+#endif
+
+#define MAX_USI_INT (~(USItype)0)
+#define MAX_SI_INT ((SItype) (MAX_USI_INT >> 1))
+#define BITS_PER_SI (4 * BITS_PER_UNIT)
+#ifdef TMODES
+#define MAX_UDI_INT (~(UDItype)0)
+#define MAX_DI_INT ((DItype) (MAX_UDI_INT >> 1))
+#define BITS_PER_DI (8 * BITS_PER_UNIT)
+#endif
+
+#ifdef FLOAT_ONLY
+#define NO_DI_MODE
+#endif
+
+#ifdef TFLOAT
+# ifndef TMODES
+# error "TFLOAT requires long double to have 113 bits of mantissa"
+# endif
+
+# define PREFIXFPDP tp
+# define PREFIXSFDF tf
+# define NGARDS 10L /* Is this right? */
+# define GARDROUND 0x1ff
+# define GARDMASK 0x3ff
+# define GARDMSB 0x200
+# define FRAC_NBITS 128
+
+# if __LDBL_MANT_DIG__ == 113 /* IEEE quad */
+# define EXPBITS 15
+# define EXPBIAS 16383
+# define EXPMAX (0x7fff)
+# define QUIET_NAN ((TItype)0x8 << 108)
+# define FRACHIGH ((TItype)0x8 << 124)
+# define FRACHIGH2 ((TItype)0xc << 124)
+# define FRACBITS 112
+# endif
+
+# if __LDBL_MANT_DIG__ == 106 /* IBM extended (double+double) */
+# define EXPBITS 11
+# define EXPBIAS 1023
+# define EXPMAX (0x7ff)
+# define QUIET_NAN ((TItype)0x8 << (48 + 64))
+# define FRACHIGH ((TItype)0x8 << 124)
+# define FRACHIGH2 ((TItype)0xc << 124)
+# define FRACBITS 105
+# define HALFFRACBITS 52
+# define HALFSHIFT 64
+# endif
+
+# define pack_d __pack_t
+# define unpack_d __unpack_t
+# define __fpcmp_parts __fpcmp_parts_t
+ typedef UTItype fractype;
+ typedef UDItype halffractype;
+ typedef USItype qrtrfractype;
+#define qrtrfractype qrtrfractype
+ typedef TFtype FLO_type;
+ typedef TItype intfrac;
+#elif defined FLOAT
+# define NGARDS 7L
+# define GARDROUND 0x3f
+# define GARDMASK 0x7f
+# define GARDMSB 0x40
+# define EXPBITS 8
+# define EXPBIAS 127
+# define FRACBITS 23
+# define EXPMAX (0xff)
+# define QUIET_NAN 0x100000L
+# define FRAC_NBITS 32
+# define FRACHIGH 0x80000000L
+# define FRACHIGH2 0xc0000000L
+# define pack_d __pack_f
+# define unpack_d __unpack_f
+# define __fpcmp_parts __fpcmp_parts_f
+ typedef USItype fractype;
+ typedef UHItype halffractype;
+ typedef SFtype FLO_type;
+ typedef SItype intfrac;
+
+#else
+# define PREFIXFPDP dp
+# define PREFIXSFDF df
+# define NGARDS 8L
+# define GARDROUND 0x7f
+# define GARDMASK 0xff
+# define GARDMSB 0x80
+# define EXPBITS 11
+# define EXPBIAS 1023
+# define FRACBITS 52
+# define EXPMAX (0x7ff)
+# define QUIET_NAN 0x8000000000000LL
+# define FRAC_NBITS 64
+# define FRACHIGH 0x8000000000000000LL
+# define FRACHIGH2 0xc000000000000000LL
+# define pack_d __pack_d
+# define unpack_d __unpack_d
+# define __fpcmp_parts __fpcmp_parts_d
+ typedef UDItype fractype;
+ typedef USItype halffractype;
+ typedef DFtype FLO_type;
+ typedef DItype intfrac;
+#endif /* FLOAT */
+
+#ifdef US_SOFTWARE_GOFAST
+# ifdef TFLOAT
+# error "GOFAST TFmode not supported"
+# elif defined FLOAT
+# define add fpadd
+# define sub fpsub
+# define multiply fpmul
+# define divide fpdiv
+# define compare fpcmp
+# define _unord_f2 __unordsf2
+# define usi_to_float __floatunsisf
+# define si_to_float sitofp
+# define float_to_si fptosi
+# define float_to_usi fptoui
+# define negate __negsf2
+# define sf_to_df fptodp
+# define sf_to_tf __extendsftf2
+# else
+# define add dpadd
+# define sub dpsub
+# define multiply dpmul
+# define divide dpdiv
+# define compare dpcmp
+# define _unord_f2 __unorddf2
+# define usi_to_float __floatunsidf
+# define si_to_float litodp
+# define float_to_si dptoli
+# define float_to_usi dptoul
+# define negate __negdf2
+# define df_to_sf dptofp
+# define df_to_tf __extenddftf2
+# endif /* FLOAT */
+#else
+# ifdef TFLOAT
+# define add __addtf3
+# define sub __subtf3
+# define multiply __multf3
+# define divide __divtf3
+# define compare __cmptf2
+# define _eq_f2 __eqtf2
+# define _ne_f2 __netf2
+# define _gt_f2 __gttf2
+# define _ge_f2 __getf2
+# define _lt_f2 __lttf2
+# define _le_f2 __letf2
+# define _unord_f2 __unordtf2
+# define usi_to_float __floatunsitf
+# define si_to_float __floatsitf
+# define float_to_si __fixtfsi
+# define float_to_usi __fixunstfsi
+# define negate __negtf2
+# define tf_to_sf __trunctfsf2
+# define tf_to_df __trunctfdf2
+# elif defined FLOAT
+# define add __addsf3
+# define sub __subsf3
+# define multiply __mulsf3
+# define divide __divsf3
+# define compare __cmpsf2
+# define _eq_f2 __eqsf2
+# define _ne_f2 __nesf2
+# define _gt_f2 __gtsf2
+# define _ge_f2 __gesf2
+# define _lt_f2 __ltsf2
+# define _le_f2 __lesf2
+# define _unord_f2 __unordsf2
+# define usi_to_float __floatunsisf
+# define si_to_float __floatsisf
+# define float_to_si __fixsfsi
+# define float_to_usi __fixunssfsi
+# define negate __negsf2
+# define sf_to_df __extendsfdf2
+# define sf_to_tf __extendsftf2
+# else
+# define add __adddf3
+# define sub __subdf3
+# define multiply __muldf3
+# define divide __divdf3
+# define compare __cmpdf2
+# define _eq_f2 __eqdf2
+# define _ne_f2 __nedf2
+# define _gt_f2 __gtdf2
+# define _ge_f2 __gedf2
+# define _lt_f2 __ltdf2
+# define _le_f2 __ledf2
+# define _unord_f2 __unorddf2
+# define usi_to_float __floatunsidf
+# define si_to_float __floatsidf
+# define float_to_si __fixdfsi
+# define float_to_usi __fixunsdfsi
+# define negate __negdf2
+# define df_to_sf __truncdfsf2
+# define df_to_tf __extenddftf2
+# endif /* FLOAT */
+#endif /* US_SOFTWARE_GOFAST */
+
+#ifndef INLINE
+#define INLINE __inline__
+#endif
+
+/* Preserve the sticky-bit when shifting fractions to the right. */
+#define LSHIFT(a, s) { a = (a >> s) | !!(a & (((fractype) 1 << s) - 1)); }
+
+/* numeric parameters */
+/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa
+ of a float and of a double. Assumes there are only two float types.
+ (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS))
+ */
+#define F_D_BITOFF (52+8-(23+7))
+
+#ifdef TMODES
+# define F_T_BITOFF (__LDBL_MANT_DIG__-1+10-(23+7))
+# define D_T_BITOFF (__LDBL_MANT_DIG__-1+10-(52+8))
+#endif
+
+
+#define NORMAL_EXPMIN (-(EXPBIAS)+1)
+#define IMPLICIT_1 ((fractype)1<<(FRACBITS+NGARDS))
+#define IMPLICIT_2 ((fractype)1<<(FRACBITS+1+NGARDS))
+
+/* common types */
+
+typedef enum
+{
+ CLASS_SNAN,
+ CLASS_QNAN,
+ CLASS_ZERO,
+ CLASS_NUMBER,
+ CLASS_INFINITY
+} fp_class_type;
+
+typedef struct
+{
+#ifdef SMALL_MACHINE
+ char class;
+ unsigned char sign;
+ short normal_exp;
+#else
+ fp_class_type class;
+ unsigned int sign;
+ int normal_exp;
+#endif
+
+ union
+ {
+ fractype ll;
+ halffractype l[2];
+ } fraction;
+} fp_number_type;
+
+typedef union
+{
+ FLO_type value;
+ fractype value_raw;
+
+#ifndef FLOAT
+# ifdef qrtrfractype
+ qrtrfractype qwords[4];
+# else
+ halffractype words[2];
+# endif
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits;
+#endif
+
+#ifdef _DEBUG_BITFLOAT
+ struct
+ {
+ unsigned int sign:1 __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ }
+ bits_big_endian;
+
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits_little_endian;
+#endif
+}
+FLO_union_type;
+
+/* Prototypes */
+
+#if defined(L_pack_df) || defined(L_pack_sf) || defined(L_pack_tf)
+extern FLO_type pack_d (fp_number_type *);
+#endif
+
+extern void unpack_d (FLO_union_type *, fp_number_type *);
+
+#if defined(L_addsub_sf) || defined(L_addsub_df) || defined(L_addsub_tf)
+extern FLO_type add (FLO_type, FLO_type);
+extern FLO_type sub (FLO_type, FLO_type);
+#endif
+
+#if defined(L_mul_sf) || defined(L_mul_df) || defined(L_mul_tf)
+extern FLO_type multiply (FLO_type, FLO_type);
+#endif
+
+#if defined(L_div_sf) || defined(L_div_df) || defined(L_div_tf)
+extern FLO_type divide (FLO_type, FLO_type);
+#endif
+
+extern int __fpcmp_parts (fp_number_type *, fp_number_type *);
+
+#if defined(L_compare_sf) || defined(L_compare_df) || defined(L_compare_tf)
+extern CMPtype compare (FLO_type, FLO_type);
+#endif
+
+#ifndef US_SOFTWARE_GOFAST
+
+#if defined(L_eq_sf) || defined(L_eq_df) || defined(L_eq_tf)
+extern CMPtype _eq_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_ne_sf) || defined(L_ne_df) || defined(L_ne_tf)
+extern CMPtype _ne_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_gt_sf) || defined(L_gt_df) || defined(L_gt_tf)
+extern CMPtype _gt_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_ge_sf) || defined(L_ge_df) || defined(L_ge_tf)
+extern CMPtype _ge_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_lt_sf) || defined(L_lt_df) || defined(L_lt_tf)
+extern CMPtype _lt_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_le_sf) || defined(L_le_df) || defined(L_le_tf)
+extern CMPtype _le_f2 (FLO_type, FLO_type);
+#endif
+
+#if defined(L_unord_sf) || defined(L_unord_df) || defined(L_unord_tf)
+extern CMPtype _unord_f2 (FLO_type, FLO_type);
+#endif
+
+#endif /* ! US_SOFTWARE_GOFAST */
+
+#if defined(L_si_to_sf) || defined(L_si_to_df) || defined(L_si_to_tf)
+extern FLO_type si_to_float (SItype);
+#endif
+
+#if defined(L_sf_to_si) || defined(L_df_to_si) || defined(L_tf_to_si)
+extern SItype float_to_si (FLO_type);
+#endif
+
+#if defined(L_sf_to_usi) || defined(L_df_to_usi) || defined(L_tf_to_usi)
+#if defined(US_SOFTWARE_GOFAST) || defined(L_tf_to_usi)
+extern USItype float_to_usi (FLO_type);
+#endif
+#endif
+
+#if defined(L_usi_to_sf) || defined(L_usi_to_df) || defined(L_usi_to_tf)
+extern FLO_type usi_to_float (USItype);
+#endif
+
+#if defined(L_negate_sf) || defined(L_negate_df) || defined(L_negate_tf)
+extern FLO_type negate (FLO_type);
+#endif
+
+#ifdef FLOAT
+#if defined(L_make_sf)
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+#endif
+#ifndef FLOAT_ONLY
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype);
+#if defined(L_sf_to_df)
+extern DFtype sf_to_df (SFtype);
+#endif
+#if defined(L_sf_to_tf) && defined(TMODES)
+extern TFtype sf_to_tf (SFtype);
+#endif
+#endif /* ! FLOAT_ONLY */
+#endif /* FLOAT */
+
+#ifndef FLOAT
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+#if defined(L_make_df)
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype);
+#endif
+#if defined(L_df_to_sf)
+extern SFtype df_to_sf (DFtype);
+#endif
+#if defined(L_df_to_tf) && defined(TMODES)
+extern TFtype df_to_tf (DFtype);
+#endif
+#endif /* ! FLOAT */
+
+#ifdef TMODES
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype);
+extern TFtype __make_tp (fp_class_type, unsigned int, int, UTItype);
+#ifdef TFLOAT
+#if defined(L_tf_to_sf)
+extern SFtype tf_to_sf (TFtype);
+#endif
+#if defined(L_tf_to_df)
+extern DFtype tf_to_df (TFtype);
+#endif
+#if defined(L_di_to_tf)
+extern TFtype di_to_df (DItype);
+#endif
+#endif /* TFLOAT */
+#endif /* TMODES */
+
+#endif /* ! GCC_FP_BIT_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/freebsd-nthr.h b/gcc-4.2.1-5666.3/gcc/config/freebsd-nthr.h
new file mode 100644
index 000000000..711ce0e65
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/freebsd-nthr.h
@@ -0,0 +1,22 @@
+/* FreeBSD configuration setting for FreeBSD systems.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Loren J. Rittle <ljrittle@acm.org>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define FBSD_NO_THREADS
diff --git a/gcc-4.2.1-5666.3/gcc/config/freebsd-spec.h b/gcc-4.2.1-5666.3/gcc/config/freebsd-spec.h
new file mode 100644
index 000000000..b3f18e20b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/freebsd-spec.h
@@ -0,0 +1,146 @@
+/* Base configuration file for all FreeBSD targets.
+ Copyright (C) 1999, 2000, 2001, 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Common FreeBSD configuration.
+ All FreeBSD architectures should include this file, which will specify
+ their commonalities.
+ Adapted from gcc/config/freebsd.h by
+ David O'Brien <obrien@FreeBSD.org>
+ Loren J. Rittle <ljrittle@acm.org>. */
+
+
+/* In case we need to know. */
+#define USING_CONFIG_FREEBSD_SPEC 1
+
+/* This defines which switch letters take arguments. On FreeBSD, most of
+ the normal cases (defined in gcc.c) apply, and we also have -h* and
+ -z* options (for the linker) (coming from SVR4).
+ We also have -R (alias --rpath), no -z, --soname (-h), --assert etc. */
+
+#define FBSD_SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z' /* ignored by ld */ \
+ || (CHAR) == 'R')
+
+/* This defines which multi-letter switches take arguments. */
+
+#define FBSD_WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ || !strcmp ((STR), "rpath") || !strcmp ((STR), "rpath-link") \
+ || !strcmp ((STR), "soname") || !strcmp ((STR), "defsym") \
+ || !strcmp ((STR), "assert") || !strcmp ((STR), "dynamic-linker"))
+
+#define FBSD_TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_with_int_value ("__FreeBSD__", FBSD_MAJOR); \
+ builtin_define_std ("unix"); \
+ builtin_define ("__KPRINTF_ATTRIBUTE__"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=bsd"); \
+ builtin_assert ("system=FreeBSD"); \
+ FBSD_TARGET_CPU_CPP_BUILTINS(); \
+ } \
+ while (0)
+
+/* Define the default FreeBSD-specific per-CPU hook code. */
+#define FBSD_TARGET_CPU_CPP_BUILTINS() do {} while (0)
+
+/* Provide a CPP_SPEC appropriate for FreeBSD. We just deal with the GCC
+ option `-posix', and PIC issues. */
+
+#define FBSD_CPP_SPEC " \
+ %(cpp_cpu) \
+ %(cpp_arch) \
+ %{posix:-D_POSIX_SOURCE}"
+
+/* Provide a STARTFILE_SPEC appropriate for FreeBSD. Here we add
+ the magical crtbegin.o file (see crtstuff.c) which provides part
+ of the support for getting C++ file-scope static object constructed
+ before entering `main'. */
+
+#define FBSD_STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
+ %{!p:%{profile:gcrt1.o%s} \
+ %{!profile:crt1.o%s}}}} \
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for FreeBSD. Here we tack on
+ the magical crtend.o file (see crtstuff.c) which provides part of
+ the support for getting C++ file-scope static object constructed
+ before entering `main', followed by a normal "finalizer" file,
+ `crtn.o'. */
+
+#define FBSD_ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+/* Provide a LIB_SPEC appropriate for FreeBSD as configured and as
+ required by the user-land thread model. Before __FreeBSD_version
+ 500016, select the appropriate libc, depending on whether we're
+ doing profiling or need threads support. At __FreeBSD_version
+ 500016 and later, when threads support is requested include both
+ -lc and the threading lib instead of only -lc_r. To make matters
+ interesting, we can't actually use __FreeBSD_version provided by
+ <osreldate.h> directly since it breaks cross-compiling. As a final
+ twist, make it a hard error if -pthread is provided on the command
+ line and gcc was configured with --disable-threads (this will help
+ avoid bug reports from users complaining about threading when they
+ misconfigured the gcc bootstrap but are later consulting FreeBSD
+ manual pages that refer to the mythical -pthread option). */
+
+/* Provide a LIB_SPEC appropriate for FreeBSD. Just select the appropriate
+ libc, depending on whether we're doing profiling or need threads support.
+ (similar to the default, except no -lg, and no -p). */
+
+#ifdef FBSD_NO_THREADS
+#define FBSD_LIB_SPEC " \
+ %{pthread: %eThe -pthread option is only supported on FreeBSD when gcc \
+is built with the --enable-threads configure-time option.} \
+ %{!shared: \
+ %{!pg: -lc} \
+ %{pg: -lc_p} \
+ }"
+#else
+#if FBSD_MAJOR < 5
+#define FBSD_LIB_SPEC " \
+ %{!shared: \
+ %{!pg: \
+ %{!pthread:-lc} \
+ %{pthread:-lc_r}} \
+ %{pg: \
+ %{!pthread:-lc_p} \
+ %{pthread:-lc_r_p}} \
+ }"
+#else
+#define FBSD_LIB_SPEC " \
+ %{!shared: \
+ %{!pg: %{pthread:-lpthread} -lc} \
+ %{pg: %{pthread:-lpthread_p} -lc_p} \
+ }"
+#endif
+#endif
+
+#if FBSD_MAJOR < 6
+#define FBSD_DYNAMIC_LINKER "/usr/libexec/ld-elf.so.1"
+#else
+#define FBSD_DYNAMIC_LINKER "/libexec/ld-elf.so.1"
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/freebsd.h b/gcc-4.2.1-5666.3/gcc/config/freebsd.h
new file mode 100644
index 000000000..21dc81e60
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/freebsd.h
@@ -0,0 +1,87 @@
+/* Base configuration file for all FreeBSD targets.
+ Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Common FreeBSD configuration.
+ All FreeBSD architectures should include this file, which will specify
+ their commonalities.
+ Adapted from gcc/config/i386/freebsd-elf.h by
+ David O'Brien <obrien@FreeBSD.org>.
+ Further work by David O'Brien <obrien@FreeBSD.org> and
+ Loren J. Rittle <ljrittle@acm.org>. */
+
+
+/* In case we need to know. */
+#define USING_CONFIG_FREEBSD 1
+
+/* This defines which switch letters take arguments. On FreeBSD, most of
+ the normal cases (defined in gcc.c) apply, and we also have -h* and
+ -z* options (for the linker) (coming from SVR4).
+ We also have -R (alias --rpath), no -z, --soname (-h), --assert etc. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) (FBSD_SWITCH_TAKES_ARG(CHAR))
+
+#undef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) (FBSD_WORD_SWITCH_TAKES_ARG(STR))
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() FBSD_TARGET_OS_CPP_BUILTINS()
+
+#undef CPP_SPEC
+#define CPP_SPEC FBSD_CPP_SPEC
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC FBSD_STARTFILE_SPEC
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC FBSD_ENDFILE_SPEC
+
+#undef LIB_SPEC
+#define LIB_SPEC FBSD_LIB_SPEC
+
+
+/************************[ Target stuff ]***********************************/
+
+/* All FreeBSD Architectures support the ELF object file format. */
+#undef OBJECT_FORMAT_ELF
+#define OBJECT_FORMAT_ELF
+
+/* Don't assume anything about the header files. */
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C 1
+
+/* Make gcc agree with FreeBSD's standard headers (<machine/ansi.h>, etc...) */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#define MATH_LIBRARY_PROFILE "-lm_p"
+
+/* Code generation parameters. */
+
+/* Use periods rather than dollar signs in special g++ assembler names.
+ This ensures the configuration knows our system correctly so we can link
+ with libraries compiled with the native cc. */
+#undef NO_DOLLAR_IN_LABEL
+
+/* Used by libgcc2.c. We support file locking with fcntl / F_SETLKW.
+ This enables the test coverage code to use file locking when exiting a
+ program, which avoids race conditions if the program has forked. */
+#define TARGET_POSIX_IO
diff --git a/gcc-4.2.1-5666.3/gcc/config/gnu.h b/gcc-4.2.1-5666.3/gcc/config/gnu.h
new file mode 100644
index 000000000..79c64c77b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/gnu.h
@@ -0,0 +1,28 @@
+/* Configuration common to all targets running the GNU system. */
+
+/* Provide GCC options for standard feature-test macros. */
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{bsd:-D_BSD_SOURCE}"
+
+/* Default C library spec. Use -lbsd-compat for gcc -bsd. */
+#undef LIB_SPEC
+#define LIB_SPEC "%{bsd:-lbsd-compat} %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
+
+/* Standard include directory. In GNU, "/usr" is a four-letter word. */
+#undef STANDARD_INCLUDE_DIR
+#define STANDARD_INCLUDE_DIR "/include"
+
+/* The system headers under GNU are C++-aware. */
+#define NO_IMPLICIT_EXTERN_C
+
+#define HURD_TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__gnu_hurd__"); \
+ builtin_define ("__GNU__"); \
+ builtin_define_std ("unix"); \
+ builtin_define_std ("MACH"); \
+ builtin_assert ("system=gnu"); \
+ builtin_assert ("system=mach"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ } while (0)
diff --git a/gcc-4.2.1-5666.3/gcc/config/gofast.h b/gcc-4.2.1-5666.3/gcc/config/gofast.h
new file mode 100644
index 000000000..67b5c489f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/gofast.h
@@ -0,0 +1,81 @@
+/* US Software GOFAST floating point library support.
+ Copyright (C) 1994, 1998, 1999, 2002, 2003, 2004
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* The US Software GOFAST library requires special optabs support.
+ This file is intended to be included by config/ARCH/ARCH.c. It
+ defines one function, gofast_maybe_init_libfuncs, which should be
+ called from the TARGET_INIT_LIBFUNCS hook. When tm.h has defined
+ US_SOFTWARE_GOFAST, this function will adjust all the optabs and
+ libfuncs appropriately. Otherwise it will do nothing. */
+
+static void
+gofast_maybe_init_libfuncs (void)
+{
+#ifdef US_SOFTWARE_GOFAST
+ int mode;
+
+ set_optab_libfunc (add_optab, SFmode, "fpadd");
+ set_optab_libfunc (add_optab, DFmode, "dpadd");
+ set_optab_libfunc (sub_optab, SFmode, "fpsub");
+ set_optab_libfunc (sub_optab, DFmode, "dpsub");
+ set_optab_libfunc (smul_optab, SFmode, "fpmul");
+ set_optab_libfunc (smul_optab, DFmode, "dpmul");
+ set_optab_libfunc (sdiv_optab, SFmode, "fpdiv");
+ set_optab_libfunc (sdiv_optab, DFmode, "dpdiv");
+ set_optab_libfunc (cmp_optab, SFmode, "fpcmp");
+ set_optab_libfunc (cmp_optab, DFmode, "dpcmp");
+
+ /* GOFAST does not provide libfuncs for negation, so we use the
+ standard names. */
+
+ /* GCC does not use fpcmp/dpcmp for gt or ge because its own
+ FP-emulation library returns +1 for both > and unord. So we
+ leave gt and ge unset, such that, instead of fpcmp(a,b) >[=], we
+ generate fpcmp(b,a) <[=] 0, which is unambiguous. For unord
+ libfuncs, we use our own functions, since GOFAST doesn't supply
+ them. */
+
+ set_optab_libfunc (eq_optab, SFmode, "fpcmp");
+ set_optab_libfunc (ne_optab, SFmode, "fpcmp");
+ set_optab_libfunc (gt_optab, SFmode, 0);
+ set_optab_libfunc (ge_optab, SFmode, 0);
+ set_optab_libfunc (lt_optab, SFmode, "fpcmp");
+ set_optab_libfunc (le_optab, SFmode, "fpcmp");
+
+ set_optab_libfunc (eq_optab, DFmode, "dpcmp");
+ set_optab_libfunc (ne_optab, DFmode, "dpcmp");
+ set_optab_libfunc (gt_optab, DFmode, 0);
+ set_optab_libfunc (ge_optab, DFmode, 0);
+ set_optab_libfunc (lt_optab, DFmode, "dpcmp");
+ set_optab_libfunc (le_optab, DFmode, "dpcmp");
+
+ set_conv_libfunc (sext_optab, DFmode, SFmode, "fptodp");
+ set_conv_libfunc (trunc_optab, SFmode, DFmode, "dptofp");
+
+ set_conv_libfunc (sfix_optab, SImode, SFmode, "fptosi");
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "dptoli");
+ set_conv_libfunc (ufix_optab, SImode, SFmode, "fptoui");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "dptoul");
+
+ set_conv_libfunc (sfloat_optab, SFmode, SImode, "sitofp");
+ set_conv_libfunc (sfloat_optab, DFmode, SImode, "litodp");
+#endif
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/host-darwin.c b/gcc-4.2.1-5666.3/gcc/config/host-darwin.c
new file mode 100644
index 000000000..24f39cd1d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/host-darwin.c
@@ -0,0 +1,82 @@
+/* Darwin host-specific hook definitions.
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include <sys/mman.h>
+#include "toplev.h"
+#include "config/host-darwin.h"
+
+/* Yes, this is really supposed to work. */
+/* APPLE LOCAL begin ARM native compiler support */
+/* Not all Darwins are created equal. */
+static char pch_address_space[DARWIN_PCH_ADDR_SPACE_SIZE] __attribute__((aligned (4096)));
+/* APPLE LOCAL end ARM native compiler support */
+
+/* Return the address of the PCH address space, if the PCH will fit in it. */
+
+void *
+darwin_gt_pch_get_address (size_t sz, int fd ATTRIBUTE_UNUSED)
+{
+ if (sz <= sizeof (pch_address_space))
+ return pch_address_space;
+ else
+ return NULL;
+}
+
+/* Check ADDR and SZ for validity, and deallocate (using munmap) that part of
+ pch_address_space beyond SZ. */
+
+int
+darwin_gt_pch_use_address (void *addr, size_t sz, int fd, size_t off)
+{
+ const size_t pagesize = getpagesize();
+ void *mmap_result;
+ int ret;
+
+ gcc_assert ((size_t)pch_address_space % pagesize == 0
+ && sizeof (pch_address_space) % pagesize == 0);
+
+ ret = (addr == pch_address_space && sz <= sizeof (pch_address_space));
+ if (! ret)
+ sz = 0;
+
+ /* Round the size to a whole page size. Normally this is a no-op. */
+ sz = (sz + pagesize - 1) / pagesize * pagesize;
+
+ if (munmap (pch_address_space + sz, sizeof (pch_address_space) - sz) != 0)
+ fatal_error ("couldn't unmap pch_address_space: %m");
+
+ if (ret)
+ {
+ mmap_result = mmap (addr, sz,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
+ fd, off);
+
+ /* The file might not be mmap-able. */
+ ret = mmap_result != (void *) MAP_FAILED;
+
+ /* Sanity check for broken MAP_FIXED. */
+ gcc_assert (!ret || mmap_result == addr);
+ }
+
+ return ret;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/host-darwin.h b/gcc-4.2.1-5666.3/gcc/config/host-darwin.h
new file mode 100644
index 000000000..98b2a3fc3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/host-darwin.h
@@ -0,0 +1,28 @@
+/* Darwin host-specific hook definitions.
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+extern void * darwin_gt_pch_get_address (size_t sz, int fd);
+extern int darwin_gt_pch_use_address (void *addr, size_t sz, int fd,
+ size_t off);
+
+#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
+#define HOST_HOOKS_GT_PCH_GET_ADDRESS darwin_gt_pch_get_address
+#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS darwin_gt_pch_use_address
diff --git a/gcc-4.2.1-5666.3/gcc/config/host-hpux.c b/gcc-4.2.1-5666.3/gcc/config/host-hpux.c
new file mode 100644
index 000000000..2ca5ba9bc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/host-hpux.c
@@ -0,0 +1,136 @@
+/* HP-UX host-specific hook definitions.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include <sys/mman.h>
+#include <unistd.h>
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+
+#ifndef MAP_FAILED
+#define MAP_FAILED (void *)-1L
+#endif
+
+static void *hpux_gt_pch_get_address (size_t, int);
+static int hpux_gt_pch_use_address (void *, size_t, int, size_t);
+
+#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
+#define HOST_HOOKS_GT_PCH_GET_ADDRESS hpux_gt_pch_get_address
+#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS hpux_gt_pch_use_address
+
+/* For various ports, try to guess a fixed spot in the vm space
+ that's probably free. */
+#if (defined(__hppa__) || defined(__ia64__)) && defined(__LP64__)
+# define TRY_EMPTY_VM_SPACE 0x8000000000000000
+#elif defined(__hppa__) || defined(__ia64__)
+# define TRY_EMPTY_VM_SPACE 0x60000000
+#else
+# define TRY_EMPTY_VM_SPACE 0
+#endif
+
+/* Determine a location where we might be able to reliably allocate
+ SIZE bytes. FD is the PCH file, though we should return with the
+ file unmapped. */
+
+static void *
+hpux_gt_pch_get_address (size_t size, int fd)
+{
+ void *addr;
+
+ addr = mmap ((void *)TRY_EMPTY_VM_SPACE, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, fd, 0);
+
+ /* If we failed the map, that means there's *no* free space. */
+ if (addr == (void *) MAP_FAILED)
+ return NULL;
+ /* Unmap the area before returning. */
+ munmap (addr, size);
+
+ return addr;
+}
+
+/* Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
+ mapping the data at BASE, -1 if we couldn't.
+
+ It's not possibly to reliably mmap a file using MAP_PRIVATE to
+ a specific START address on either hpux or linux. First we see
+ if mmap with MAP_PRIVATE works. If it does, we are off to the
+ races. If it doesn't, we try an anonymous private mmap since the
+ kernel is more likely to honor the BASE address in anonymous maps.
+ We then copy the data to the anonymous private map. This assumes
+ of course that we don't need to change the data in the PCH file
+ after it is created.
+
+ This approach obviously causes a performance penalty but there is
+ little else we can do given the current PCH implementation. */
+
+static int
+hpux_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
+{
+ void *addr;
+
+ /* We're called with size == 0 if we're not planning to load a PCH
+ file at all. This allows the hook to free any static space that
+ we might have allocated at link time. */
+ if (size == 0)
+ return -1;
+
+ /* Try to map the file with MAP_PRIVATE. */
+ addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, offset);
+
+ if (addr == base)
+ return 1;
+
+ if (addr != (void *) MAP_FAILED)
+ munmap (addr, size);
+
+ /* Try to make an anonymous private mmap at the desired location. */
+ addr = mmap (base, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (addr != base)
+ {
+ if (addr != (void *) MAP_FAILED)
+ munmap (addr, size);
+ return -1;
+ }
+
+ if (lseek (fd, offset, SEEK_SET) == (off_t)-1)
+ return -1;
+
+ while (size)
+ {
+ ssize_t nbytes;
+
+ nbytes = read (fd, base, MIN (size, SSIZE_MAX));
+ if (nbytes <= 0)
+ return -1;
+ base = (char *) base + nbytes;
+ size -= nbytes;
+ }
+
+ return 1;
+}
+
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.2.1-5666.3/gcc/config/host-linux.c b/gcc-4.2.1-5666.3/gcc/config/host-linux.c
new file mode 100644
index 000000000..795bb5522
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/host-linux.c
@@ -0,0 +1,219 @@
+/* Linux host-specific hook definitions.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include <sys/mman.h>
+#include <limits.h>
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+
+
+/* Linux has a feature called exec-shield-randomize that perturbs the
+ address of non-fixed mapped segments by a (relatively) small amount.
+ The feature is intended to make it harder to attack the system with
+ buffer overflow attacks, since every invocation of a program will
+ have its libraries and data segments at slightly different addresses.
+
+ This feature causes us problems with PCH because it makes it that
+ much harder to acquire a stable location at which to map our PCH
+ data file.
+
+ [ The feature causes other points of non-determinism within the
+ compiler as well, so we'd *really* like to be able to have the
+ driver disable exec-shield-randomize for the process group, but
+ that isn't possible at present. ]
+
+ We're going to try several things:
+
+ * Select an architecture specific address as "likely" and see
+ if that's free. For our 64-bit hosts, we can easily choose
+ an address in Never Never Land.
+
+ * If exec-shield-randomize is disabled, then just use the
+ address chosen by mmap in step one.
+
+ * If exec-shield-randomize is enabled, then temporarily allocate
+ 32M of memory as a buffer, then allocate PCH memory, then
+ free the buffer. The theory here is that the perturbation is
+ no more than 16M, and so by allocating our buffer larger than
+ that we make it considerably more likely that the address will
+ be free when we want to load the data back.
+*/
+
+#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
+#define HOST_HOOKS_GT_PCH_GET_ADDRESS linux_gt_pch_get_address
+
+#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS linux_gt_pch_use_address
+
+/* For various ports, try to guess a fixed spot in the vm space
+ that's probably free. */
+#if defined(__alpha)
+# define TRY_EMPTY_VM_SPACE 0x10000000000
+#elif defined(__ia64)
+# define TRY_EMPTY_VM_SPACE 0x2000000100000000
+#elif defined(__x86_64)
+# define TRY_EMPTY_VM_SPACE 0x1000000000
+#elif defined(__i386)
+# define TRY_EMPTY_VM_SPACE 0x60000000
+#elif defined(__powerpc__)
+# define TRY_EMPTY_VM_SPACE 0x60000000
+#elif defined(__s390x__)
+# define TRY_EMPTY_VM_SPACE 0x8000000000
+#elif defined(__s390__)
+# define TRY_EMPTY_VM_SPACE 0x60000000
+#elif defined(__sparc__) && defined(__LP64__)
+# define TRY_EMPTY_VM_SPACE 0x8000000000
+#elif defined(__sparc__)
+# define TRY_EMPTY_VM_SPACE 0x60000000
+#else
+# define TRY_EMPTY_VM_SPACE 0
+#endif
+
+/* Determine a location where we might be able to reliably allocate SIZE
+ bytes. FD is the PCH file, though we should return with the file
+ unmapped. */
+
+static void *
+linux_gt_pch_get_address (size_t size, int fd)
+{
+ size_t buffer_size = 32 * 1024 * 1024;
+ void *addr, *buffer;
+ FILE *f;
+ bool randomize_on;
+
+ addr = mmap ((void *)TRY_EMPTY_VM_SPACE, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, fd, 0);
+
+ /* If we failed the map, that means there's *no* free space. */
+ if (addr == (void *) MAP_FAILED)
+ return NULL;
+ /* Unmap the area before returning. */
+ munmap (addr, size);
+
+ /* If we got the exact area we requested, then that's great. */
+ if (TRY_EMPTY_VM_SPACE && addr == (void *) TRY_EMPTY_VM_SPACE)
+ return addr;
+
+ /* If we didn't, then we need to look to see if virtual address
+ randomization is on. That is recorded in
+ kernel.randomize_va_space. An older implementation used
+ kernel.exec-shield-randomize. */
+ f = fopen ("/proc/sys/kernel/randomize_va_space", "r");
+ if (f == NULL)
+ f = fopen ("/proc/sys/kernel/exec-shield-randomize", "r");
+ randomize_on = false;
+ if (f != NULL)
+ {
+ char buf[100];
+ size_t c;
+
+ c = fread (buf, 1, sizeof buf - 1, f);
+ if (c > 0)
+ {
+ buf[c] = '\0';
+ randomize_on = (atoi (buf) > 0);
+ }
+ fclose (f);
+ }
+
+ /* If it isn't, then accept the address that mmap selected as fine. */
+ if (!randomize_on)
+ return addr;
+
+ /* Otherwise, we need to try again with buffer space. */
+ buffer = mmap (0, buffer_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ addr = mmap (0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+ if (buffer != (void *) MAP_FAILED)
+ munmap (buffer, buffer_size);
+ if (addr == (void *) MAP_FAILED)
+ return NULL;
+ munmap (addr, size);
+
+ return addr;
+}
+
+/* Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
+ mapping the data at BASE, -1 if we couldn't.
+
+ It's not possibly to reliably mmap a file using MAP_PRIVATE to
+ a specific START address on either hpux or linux. First we see
+ if mmap with MAP_PRIVATE works. If it does, we are off to the
+ races. If it doesn't, we try an anonymous private mmap since the
+ kernel is more likely to honor the BASE address in anonymous maps.
+ We then copy the data to the anonymous private map. This assumes
+ of course that we don't need to change the data in the PCH file
+ after it is created.
+
+ This approach obviously causes a performance penalty but there is
+ little else we can do given the current PCH implementation. */
+
+static int
+linux_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
+{
+ void *addr;
+
+ /* We're called with size == 0 if we're not planning to load a PCH
+ file at all. This allows the hook to free any static space that
+ we might have allocated at link time. */
+ if (size == 0)
+ return -1;
+
+ /* Try to map the file with MAP_PRIVATE. */
+ addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, offset);
+
+ if (addr == base)
+ return 1;
+
+ if (addr != (void *) MAP_FAILED)
+ munmap (addr, size);
+
+ /* Try to make an anonymous private mmap at the desired location. */
+ addr = mmap (base, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (addr != base)
+ {
+ if (addr != (void *) MAP_FAILED)
+ munmap (addr, size);
+ return -1;
+ }
+
+ if (lseek (fd, offset, SEEK_SET) == (off_t)-1)
+ return -1;
+
+ while (size)
+ {
+ ssize_t nbytes;
+
+ nbytes = read (fd, base, MIN (size, SSIZE_MAX));
+ if (nbytes <= 0)
+ return -1;
+ base = (char *) base + nbytes;
+ size -= nbytes;
+ }
+
+ return 1;
+}
+
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.2.1-5666.3/gcc/config/host-solaris.c b/gcc-4.2.1-5666.3/gcc/config/host-solaris.c
new file mode 100644
index 000000000..df5f47391
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/host-solaris.c
@@ -0,0 +1,79 @@
+/* Solaris host-specific hook definitions.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include <sys/mman.h>
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+
+
+#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS sol_gt_pch_use_address
+
+/* Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
+ mapping the data at BASE, -1 if we couldn't. */
+
+static int
+sol_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
+{
+ void *addr;
+
+ /* We're called with size == 0 if we're not planning to load a PCH
+ file at all. This allows the hook to free any static space that
+ we might have allocated at link time. */
+ if (size == 0)
+ return -1;
+
+ addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
+ fd, offset);
+
+ /* Solaris isn't good about honoring the mmap START parameter
+ without MAP_FIXED set. Before we give up, search the desired
+ address space with mincore to see if the space is really free. */
+ if (addr != base)
+ {
+ size_t page_size = getpagesize();
+ char one_byte;
+ size_t i;
+
+ if (addr != (void *) MAP_FAILED)
+ munmap (addr, size);
+
+ errno = 0;
+ for (i = 0; i < size; i += page_size)
+ if (mincore ((char *)base + i, page_size, (void *)&one_byte) == -1
+ && errno == ENOMEM)
+ continue; /* The page is not mapped. */
+ else
+ break;
+
+ if (i >= size)
+ addr = mmap (base, size,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
+ fd, offset);
+ }
+
+ return addr == base ? 1 : -1;
+}
+
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/ammintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/ammintrin.h
new file mode 100644
index 000000000..8a466d914
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/ammintrin.h
@@ -0,0 +1,106 @@
+/* APPLE LOCAL file 5612787 mainline sse4 */
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the AMD Programmers
+ Manual Update, version 2.x */
+
+#ifndef _AMMINTRIN_H_INCLUDED
+#define _AMMINTRIN_H_INCLUDED
+
+#ifndef __SSE4A__
+# error "SSE4A instruction set not enabled"
+#else
+
+/* We need definitions from the SSE3, SSE2 and SSE header files*/
+#include <pmmintrin.h>
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+__STATIC_INLINE void __attribute__((__always_inline__))
+_mm_stream_sd (double * __P, __m128d __Y)
+{
+ __builtin_ia32_movntsd (__P, (__v2df) __Y);
+}
+
+__STATIC_INLINE void __attribute__((__always_inline__))
+_mm_stream_ss (float * __P, __m128 __Y)
+{
+ __builtin_ia32_movntss (__P, (__v4sf) __Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_extract_si64 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y);
+}
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L)
+{
+ return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L);
+}
+#else
+#define _mm_extracti_si64(X, I, L) \
+ ((__m128i) __builtin_ia32_extrqi ((__v2di)(X), I, L))
+#endif
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_insert_si64 (__m128i __X,__m128i __Y)
+{
+ return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
+}
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L)
+{
+ return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L);
+}
+#else
+#define _mm_inserti_si64(X, Y, I, L) \
+ ((__m128i) __builtin_ia32_insertqi ((__v2di)(X), (__v2di)(Y), I, L))
+#endif
+
+#endif /* __SSE4A__ */
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+#endif /* _AMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/athlon.md b/gcc-4.2.1-5666.3/gcc/config/i386/athlon.md
new file mode 100644
index 000000000..6d92b948b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/athlon.md
@@ -0,0 +1,874 @@
+;; AMD Athlon Scheduling
+;;
+;; The Athlon does contain three pipelined FP units, three integer units and
+;; three address generation units.
+;;
+;; The predecode logic is determining boundaries of instructions in the 64
+;; byte cache line. So the cache line straddling problem of K6 might be issue
+;; here as well, but it is not noted in the documentation.
+;;
+;; Three DirectPath instructions decoders and only one VectorPath decoder
+;; is available. They can decode three DirectPath instructions or one VectorPath
+;; instruction per cycle.
+;; Decoded macro instructions are then passed to 72 entry instruction control
+;; unit, that passes
+;; it to the specialized integer (18 entry) and fp (36 entry) schedulers.
+;;
+;; The load/store queue unit is not attached to the schedulers but
+;; communicates with all the execution units separately instead.
+
+(define_attr "athlon_decode" "direct,vector,double"
+ (cond [(eq_attr "type" "call,imul,idiv,other,multi,fcmov,fpspc,str,pop,cld,leave")
+ (const_string "vector")
+ (and (eq_attr "type" "push")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "vector")
+ (and (eq_attr "type" "fmov")
+ (and (eq_attr "memory" "load,store")
+ (eq_attr "mode" "XF")))
+ (const_string "vector")]
+ (const_string "direct")))
+
+;;
+;; decode0 decode1 decode2
+;; \ | /
+;; instruction control unit (72 entry scheduler)
+;; | |
+;; integer scheduler (18) stack map
+;; / | | | | \ stack rename
+;; ieu0 agu0 ieu1 agu1 ieu2 agu2 scheduler
+;; | agu0 | agu1 agu2 register file
+;; | \ | | / | | |
+;; \ /\ | / fadd fmul fstore
+;; \ / \ | / fadd fmul fstore
+;; imul load/store (2x) fadd fmul fstore
+
+(define_automaton "athlon,athlon_load,athlon_mult,athlon_fp")
+(define_cpu_unit "athlon-decode0" "athlon")
+(define_cpu_unit "athlon-decode1" "athlon")
+(define_cpu_unit "athlon-decode2" "athlon")
+(define_cpu_unit "athlon-decodev" "athlon")
+;; Model the fact that double decoded instruction may take 2 cycles
+;; to decode when decoder2 and decoder0 in next cycle
+;; is used (this is needed to allow troughput of 1.5 double decoded
+;; instructions per cycle).
+;;
+;; In order to avoid dependence between reservation of decoder
+;; and other units, we model decoder as two stage fully pipelined unit
+;; and only double decoded instruction may occupy unit in the first cycle.
+;; With this scheme however two double instructions can be issued cycle0.
+;;
+;; Avoid this by using presence set requiring decoder0 to be allocated
+;; too. Vector decoded instructions then can't be issued when
+;; modeled as consuming decoder0+decoder1+decoder2.
+;; We solve that by specialized vector decoder unit and exclusion set.
+(presence_set "athlon-decode2" "athlon-decode0")
+(exclusion_set "athlon-decodev" "athlon-decode0,athlon-decode1,athlon-decode2")
+(define_reservation "athlon-vector" "nothing,athlon-decodev")
+(define_reservation "athlon-direct0" "nothing,athlon-decode0")
+(define_reservation "athlon-direct" "nothing,
+ (athlon-decode0 | athlon-decode1
+ | athlon-decode2)")
+;; Double instructions behaves like two direct instructions.
+(define_reservation "athlon-double" "((athlon-decode2, athlon-decode0)
+ | (nothing,(athlon-decode0 + athlon-decode1))
+ | (nothing,(athlon-decode1 + athlon-decode2)))")
+
+;; Agu and ieu unit results in extremely large automatons and
+;; in our approximation they are hardly filled in. Only ieu
+;; unit can, as issue rate is 3 and agu unit is always used
+;; first in the insn reservations. Skip the models.
+
+;(define_cpu_unit "athlon-ieu0" "athlon_ieu")
+;(define_cpu_unit "athlon-ieu1" "athlon_ieu")
+;(define_cpu_unit "athlon-ieu2" "athlon_ieu")
+;(define_reservation "athlon-ieu" "(athlon-ieu0 | athlon-ieu1 | athlon-ieu2)")
+(define_reservation "athlon-ieu" "nothing")
+(define_cpu_unit "athlon-ieu0" "athlon")
+;(define_cpu_unit "athlon-agu0" "athlon_agu")
+;(define_cpu_unit "athlon-agu1" "athlon_agu")
+;(define_cpu_unit "athlon-agu2" "athlon_agu")
+;(define_reservation "athlon-agu" "(athlon-agu0 | athlon-agu1 | athlon-agu2)")
+(define_reservation "athlon-agu" "nothing")
+
+(define_cpu_unit "athlon-mult" "athlon_mult")
+
+(define_cpu_unit "athlon-load0" "athlon_load")
+(define_cpu_unit "athlon-load1" "athlon_load")
+(define_reservation "athlon-load" "athlon-agu,
+ (athlon-load0 | athlon-load1),nothing")
+;; 128bit SSE instructions issue two loads at once
+(define_reservation "athlon-load2" "athlon-agu,
+ (athlon-load0 + athlon-load1),nothing")
+
+(define_reservation "athlon-store" "(athlon-load0 | athlon-load1)")
+;; 128bit SSE instructions issue two stores at once
+(define_reservation "athlon-store2" "(athlon-load0 + athlon-load1)")
+
+
+;; The FP operations start to execute at stage 12 in the pipeline, while
+;; integer operations start to execute at stage 9 for Athlon and 11 for K8
+;; Compensate the difference for Athlon because it results in significantly
+;; smaller automata.
+(define_reservation "athlon-fpsched" "nothing,nothing,nothing")
+;; The floating point loads.
+(define_reservation "athlon-fpload" "(athlon-fpsched + athlon-load)")
+(define_reservation "athlon-fpload2" "(athlon-fpsched + athlon-load2)")
+(define_reservation "athlon-fploadk8" "(athlon-fpsched + athlon-load)")
+(define_reservation "athlon-fpload2k8" "(athlon-fpsched + athlon-load2)")
+
+
+;; The three fp units are fully pipelined with latency of 3
+(define_cpu_unit "athlon-fadd" "athlon_fp")
+(define_cpu_unit "athlon-fmul" "athlon_fp")
+(define_cpu_unit "athlon-fstore" "athlon_fp")
+(define_reservation "athlon-fany" "(athlon-fstore | athlon-fmul | athlon-fadd)")
+(define_reservation "athlon-faddmul" "(athlon-fadd | athlon-fmul)")
+
+;; Vector operations usually consume many of pipes.
+(define_reservation "athlon-fvector" "(athlon-fadd + athlon-fmul + athlon-fstore)")
+
+
+;; Jump instructions are executed in the branch unit completely transparent to us
+(define_insn_reservation "athlon_branch" 0
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "ibr"))
+ "athlon-direct,athlon-ieu")
+(define_insn_reservation "athlon_call" 0
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "call,callv"))
+ "athlon-vector,athlon-ieu")
+
+;; Latency of push operation is 3 cycles, but ESP value is available
+;; earlier
+(define_insn_reservation "athlon_push" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "push"))
+ "athlon-direct,athlon-agu,athlon-store")
+(define_insn_reservation "athlon_pop" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "pop"))
+ "athlon-vector,athlon-load,athlon-ieu")
+(define_insn_reservation "athlon_pop_k8" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "pop"))
+ "athlon-double,(athlon-ieu+athlon-load)")
+(define_insn_reservation "athlon_leave" 3
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "leave"))
+ "athlon-vector,(athlon-ieu+athlon-load)")
+(define_insn_reservation "athlon_leave_k8" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "leave"))
+ "athlon-double,(athlon-ieu+athlon-load)")
+
+;; Lea executes in AGU unit with 2 cycles latency.
+(define_insn_reservation "athlon_lea" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "lea"))
+ "athlon-direct,athlon-agu,nothing")
+
+;; Mul executes in special multiplier unit attached to IEU0
+(define_insn_reservation "athlon_imul" 5
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "imul")
+ (eq_attr "memory" "none,unknown")))
+ "athlon-vector,athlon-ieu0,athlon-mult,nothing,nothing,athlon-ieu0")
+;; ??? Widening multiply is vector or double.
+(define_insn_reservation "athlon_imul_k8_DI" 4
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "imul")
+ (and (eq_attr "mode" "DI")
+ (eq_attr "memory" "none,unknown"))))
+ "athlon-direct0,athlon-ieu0,athlon-mult,nothing,athlon-ieu0")
+(define_insn_reservation "athlon_imul_k8" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "imul")
+ (eq_attr "memory" "none,unknown")))
+ "athlon-direct0,athlon-ieu0,athlon-mult,athlon-ieu0")
+(define_insn_reservation "athlon_imul_mem" 8
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "imul")
+ (eq_attr "memory" "load,both")))
+ "athlon-vector,athlon-load,athlon-ieu,athlon-mult,nothing,nothing,athlon-ieu")
+(define_insn_reservation "athlon_imul_mem_k8_DI" 7
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "imul")
+ (and (eq_attr "mode" "DI")
+ (eq_attr "memory" "load,both"))))
+ "athlon-vector,athlon-load,athlon-ieu,athlon-mult,nothing,athlon-ieu")
+(define_insn_reservation "athlon_imul_mem_k8" 6
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "imul")
+ (eq_attr "memory" "load,both")))
+ "athlon-vector,athlon-load,athlon-ieu,athlon-mult,athlon-ieu")
+
+;; Idiv cannot execute in parallel with other instructions. Dealing with it
+;; as with short latency vector instruction is good approximation avoiding
+;; scheduler from trying too hard to can hide it's latency by overlap with
+;; other instructions.
+;; ??? Experiments show that the idiv can overlap with roughly 6 cycles
+;; of the other code
+
+(define_insn_reservation "athlon_idiv" 6
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "memory" "none,unknown")))
+ "athlon-vector,(athlon-ieu0*6+(athlon-fpsched,athlon-fvector))")
+(define_insn_reservation "athlon_idiv_mem" 9
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "memory" "load,both")))
+ "athlon-vector,((athlon-load,athlon-ieu0*6)+(athlon-fpsched,athlon-fvector))")
+;; The parallelism of string instructions is not documented. Model it same way
+;; as idiv to create smaller automata. This probably does not matter much.
+(define_insn_reservation "athlon_str" 6
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "str")
+ (eq_attr "memory" "load,both,store")))
+ "athlon-vector,athlon-load,athlon-ieu0*6")
+
+(define_insn_reservation "athlon_idirect" 1
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "direct")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "none,unknown"))))
+ "athlon-direct,athlon-ieu")
+(define_insn_reservation "athlon_ivector" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "none,unknown"))))
+ "athlon-vector,athlon-ieu,athlon-ieu")
+(define_insn_reservation "athlon_idirect_loadmov" 3
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-load")
+(define_insn_reservation "athlon_idirect_load" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "direct")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-load,athlon-ieu")
+(define_insn_reservation "athlon_ivector_load" 6
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "load"))))
+ "athlon-vector,athlon-load,athlon-ieu,athlon-ieu")
+(define_insn_reservation "athlon_idirect_movstore" 1
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "store")))
+ "athlon-direct,athlon-agu,athlon-store")
+(define_insn_reservation "athlon_idirect_both" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "direct")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "both"))))
+ "athlon-direct,athlon-load,
+ athlon-ieu,athlon-store,
+ athlon-store")
+(define_insn_reservation "athlon_ivector_both" 6
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "both"))))
+ "athlon-vector,athlon-load,
+ athlon-ieu,
+ athlon-ieu,
+ athlon-store")
+(define_insn_reservation "athlon_idirect_store" 1
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "direct")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "store"))))
+ "athlon-direct,(athlon-ieu+athlon-agu),
+ athlon-store")
+(define_insn_reservation "athlon_ivector_store" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "unit" "integer,unknown")
+ (eq_attr "memory" "store"))))
+ "athlon-vector,(athlon-ieu+athlon-agu),athlon-ieu,
+ athlon-store")
+
+;; Athlon floatin point unit
+(define_insn_reservation "athlon_fldxf" 12
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fmov")
+ (and (eq_attr "memory" "load")
+ (eq_attr "mode" "XF"))))
+ "athlon-vector,athlon-fpload2,athlon-fvector*9")
+(define_insn_reservation "athlon_fldxf_k8" 13
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fmov")
+ (and (eq_attr "memory" "load")
+ (eq_attr "mode" "XF"))))
+ "athlon-vector,athlon-fpload2k8,athlon-fvector*9")
+;; Assume superforwarding to take place so effective latency of fany op is 0.
+(define_insn_reservation "athlon_fld" 0
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fany")
+(define_insn_reservation "athlon_fld_k8" 2
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fploadk8,athlon-fstore")
+
+(define_insn_reservation "athlon_fstxf" 10
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fmov")
+ (and (eq_attr "memory" "store,both")
+ (eq_attr "mode" "XF"))))
+ "athlon-vector,(athlon-fpsched+athlon-agu),(athlon-store2+(athlon-fvector*7))")
+(define_insn_reservation "athlon_fstxf_k8" 8
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fmov")
+ (and (eq_attr "memory" "store,both")
+ (eq_attr "mode" "XF"))))
+ "athlon-vector,(athlon-fpsched+athlon-agu),(athlon-store2+(athlon-fvector*6))")
+(define_insn_reservation "athlon_fst" 4
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "store,both")))
+ "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
+(define_insn_reservation "athlon_fst_k8" 2
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "store,both")))
+ "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
+(define_insn_reservation "athlon_fist" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fistp,fisttp"))
+ "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
+(define_insn_reservation "athlon_fmov" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fmov"))
+ "athlon-direct,athlon-fpsched,athlon-faddmul")
+(define_insn_reservation "athlon_fadd_load" 4
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fop")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fadd")
+(define_insn_reservation "athlon_fadd_load_k8" 6
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fop")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fploadk8,athlon-fadd")
+(define_insn_reservation "athlon_fadd" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fop"))
+ "athlon-direct,athlon-fpsched,athlon-fadd")
+(define_insn_reservation "athlon_fmul_load" 4
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fmul")
+(define_insn_reservation "athlon_fmul_load_k8" 6
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fploadk8,athlon-fmul")
+(define_insn_reservation "athlon_fmul" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fmul"))
+ "athlon-direct,athlon-fpsched,athlon-fmul")
+(define_insn_reservation "athlon_fsgn" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fsgn"))
+ "athlon-direct,athlon-fpsched,athlon-fmul")
+(define_insn_reservation "athlon_fdiv_load" 24
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fmul")
+(define_insn_reservation "athlon_fdiv_load_k8" 13
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fploadk8,athlon-fmul")
+(define_insn_reservation "athlon_fdiv" 24
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "fdiv"))
+ "athlon-direct,athlon-fpsched,athlon-fmul")
+(define_insn_reservation "athlon_fdiv_k8" 11
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "fdiv"))
+ "athlon-direct,athlon-fpsched,athlon-fmul")
+(define_insn_reservation "athlon_fpspc_load" 103
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "fpspc")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload,athlon-fvector")
+(define_insn_reservation "athlon_fpspc" 100
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fpspc"))
+ "athlon-vector,athlon-fpsched,athlon-fvector")
+(define_insn_reservation "athlon_fcmov_load" 7
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fcmov")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload,athlon-fvector")
+(define_insn_reservation "athlon_fcmov" 7
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "fcmov"))
+ "athlon-vector,athlon-fpsched,athlon-fvector")
+(define_insn_reservation "athlon_fcmov_load_k8" 17
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fcmov")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fploadk8,athlon-fvector")
+(define_insn_reservation "athlon_fcmov_k8" 15
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "fcmov"))
+ "athlon-vector,athlon-fpsched,athlon-fvector")
+;; fcomi is vector decoded by uses only one pipe.
+(define_insn_reservation "athlon_fcomi_load" 3
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fcmp")
+ (and (eq_attr "athlon_decode" "vector")
+ (eq_attr "memory" "load"))))
+ "athlon-vector,athlon-fpload,athlon-fadd")
+(define_insn_reservation "athlon_fcomi_load_k8" 5
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fcmp")
+ (and (eq_attr "athlon_decode" "vector")
+ (eq_attr "memory" "load"))))
+ "athlon-vector,athlon-fploadk8,athlon-fadd")
+(define_insn_reservation "athlon_fcomi" 3
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "athlon_decode" "vector")
+ (eq_attr "type" "fcmp")))
+ "athlon-vector,athlon-fpsched,athlon-fadd")
+(define_insn_reservation "athlon_fcom_load" 2
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "fcmp")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fadd")
+(define_insn_reservation "athlon_fcom_load_k8" 4
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "fcmp")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fploadk8,athlon-fadd")
+(define_insn_reservation "athlon_fcom" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "fcmp"))
+ "athlon-direct,athlon-fpsched,athlon-fadd")
+;; Never seen by the scheduler because we still don't do post reg-stack
+;; scheduling.
+;(define_insn_reservation "athlon_fxch" 2
+; (and (eq_attr "cpu" "athlon,k8,generic64")
+; (eq_attr "type" "fxch"))
+; "athlon-direct,athlon-fpsched,athlon-fany")
+
+;; Athlon handle MMX operations in the FPU unit with shorter latencies
+
+(define_insn_reservation "athlon_movlpd_load" 0
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssemov")
+ (match_operand:DF 1 "memory_operand" "")))
+ "athlon-direct,athlon-fpload,athlon-fany")
+(define_insn_reservation "athlon_movlpd_load_k8" 2
+ (and (eq_attr "cpu" "k8")
+ (and (eq_attr "type" "ssemov")
+ (match_operand:DF 1 "memory_operand" "")))
+ "athlon-direct,athlon-fploadk8,athlon-fstore")
+(define_insn_reservation "athlon_movsd_load_generic64" 2
+ (and (eq_attr "cpu" "generic64")
+ (and (eq_attr "type" "ssemov")
+ (match_operand:DF 1 "memory_operand" "")))
+ "athlon-double,athlon-fploadk8,(athlon-fstore+athlon-fmul)")
+(define_insn_reservation "athlon_movaps_load_k8" 2
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssemov")
+ (and (eq_attr "mode" "V4SF,V2DF,TI")
+ (eq_attr "memory" "load"))))
+ "athlon-double,athlon-fpload2k8,athlon-fstore,athlon-fstore")
+(define_insn_reservation "athlon_movaps_load" 0
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssemov")
+ (and (eq_attr "mode" "V4SF,V2DF,TI")
+ (eq_attr "memory" "load"))))
+ "athlon-vector,athlon-fpload2,(athlon-fany+athlon-fany)")
+(define_insn_reservation "athlon_movss_load" 1
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssemov")
+ (and (eq_attr "mode" "SF,DI")
+ (eq_attr "memory" "load"))))
+ "athlon-vector,athlon-fpload,(athlon-fany*2)")
+(define_insn_reservation "athlon_movss_load_k8" 1
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssemov")
+ (and (eq_attr "mode" "SF,DI")
+ (eq_attr "memory" "load"))))
+ "athlon-double,athlon-fploadk8,(athlon-fstore+athlon-fany)")
+(define_insn_reservation "athlon_mmxsseld" 0
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "mmxmov,ssemov")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fany")
+(define_insn_reservation "athlon_mmxsseld_k8" 2
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "mmxmov,ssemov")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fploadk8,athlon-fstore")
+(define_insn_reservation "athlon_mmxssest" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "mmxmov,ssemov")
+ (and (eq_attr "mode" "V4SF,V2DF,TI")
+ (eq_attr "memory" "store,both"))))
+ "athlon-vector,(athlon-fpsched+athlon-agu),((athlon-fstore+athlon-store2)*2)")
+(define_insn_reservation "athlon_mmxssest_k8" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "mmxmov,ssemov")
+ (and (eq_attr "mode" "V4SF,V2DF,TI")
+ (eq_attr "memory" "store,both"))))
+ "athlon-double,(athlon-fpsched+athlon-agu),((athlon-fstore+athlon-store2)*2)")
+(define_insn_reservation "athlon_mmxssest_short" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "mmxmov,ssemov")
+ (eq_attr "memory" "store,both")))
+ "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
+(define_insn_reservation "athlon_movaps_k8" 2
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssemov")
+ (eq_attr "mode" "V4SF,V2DF,TI")))
+ "athlon-double,athlon-fpsched,((athlon-faddmul+athlon-faddmul) | (athlon-faddmul, athlon-faddmul))")
+(define_insn_reservation "athlon_movaps" 2
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssemov")
+ (eq_attr "mode" "V4SF,V2DF,TI")))
+ "athlon-vector,athlon-fpsched,(athlon-faddmul+athlon-faddmul)")
+(define_insn_reservation "athlon_mmxssemov" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "mmxmov,ssemov"))
+ "athlon-direct,athlon-fpsched,athlon-faddmul")
+(define_insn_reservation "athlon_mmxmul_load" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "mmxmul")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-fmul")
+(define_insn_reservation "athlon_mmxmul" 3
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "mmxmul"))
+ "athlon-direct,athlon-fpsched,athlon-fmul")
+(define_insn_reservation "athlon_mmx_load" 3
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "unit" "mmx")
+ (eq_attr "memory" "load")))
+ "athlon-direct,athlon-fpload,athlon-faddmul")
+(define_insn_reservation "athlon_mmx" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "unit" "mmx"))
+ "athlon-direct,athlon-fpsched,athlon-faddmul")
+;; SSE operations are handled by the i387 unit as well. The latency
+;; is same as for i387 operations for scalar operations
+
+(define_insn_reservation "athlon_sselog_load" 3
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "sselog,sselog1")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload2,(athlon-fmul*2)")
+(define_insn_reservation "athlon_sselog_load_k8" 5
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "sselog,sselog1")
+ (eq_attr "memory" "load")))
+ "athlon-double,athlon-fpload2k8,(athlon-fmul*2)")
+(define_insn_reservation "athlon_sselog" 3
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "sselog,sselog1"))
+ "athlon-vector,athlon-fpsched,athlon-fmul*2")
+(define_insn_reservation "athlon_sselog_k8" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "sselog,sselog1"))
+ "athlon-double,athlon-fpsched,athlon-fmul")
+;; ??? pcmp executes in addmul, probably not worthwhile to bother about that.
+(define_insn_reservation "athlon_ssecmp_load" 2
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssecmp")
+ (and (eq_attr "mode" "SF,DF,DI")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fpload,athlon-fadd")
+(define_insn_reservation "athlon_ssecmp_load_k8" 4
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssecmp")
+ (and (eq_attr "mode" "SF,DF,DI,TI")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fploadk8,athlon-fadd")
+(define_insn_reservation "athlon_ssecmp" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssecmp")
+ (eq_attr "mode" "SF,DF,DI,TI")))
+ "athlon-direct,athlon-fpsched,athlon-fadd")
+(define_insn_reservation "athlon_ssecmpvector_load" 3
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssecmp")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload2,(athlon-fadd*2)")
+(define_insn_reservation "athlon_ssecmpvector_load_k8" 5
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssecmp")
+ (eq_attr "memory" "load")))
+ "athlon-double,athlon-fpload2k8,(athlon-fadd*2)")
+(define_insn_reservation "athlon_ssecmpvector" 3
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "ssecmp"))
+ "athlon-vector,athlon-fpsched,(athlon-fadd*2)")
+(define_insn_reservation "athlon_ssecmpvector_k8" 3
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "ssecmp"))
+ "athlon-double,athlon-fpsched,(athlon-fadd*2)")
+(define_insn_reservation "athlon_ssecomi_load" 4
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssecomi")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload,athlon-fadd")
+(define_insn_reservation "athlon_ssecomi_load_k8" 6
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssecomi")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fploadk8,athlon-fadd")
+(define_insn_reservation "athlon_ssecomi" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (eq_attr "type" "ssecmp"))
+ "athlon-vector,athlon-fpsched,athlon-fadd")
+(define_insn_reservation "athlon_sseadd_load" 4
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "sseadd")
+ (and (eq_attr "mode" "SF,DF,DI")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fpload,athlon-fadd")
+(define_insn_reservation "athlon_sseadd_load_k8" 6
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "sseadd")
+ (and (eq_attr "mode" "SF,DF,DI")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fploadk8,athlon-fadd")
+(define_insn_reservation "athlon_sseadd" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "sseadd")
+ (eq_attr "mode" "SF,DF,DI")))
+ "athlon-direct,athlon-fpsched,athlon-fadd")
+(define_insn_reservation "athlon_sseaddvector_load" 5
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "sseadd")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload2,(athlon-fadd*2)")
+(define_insn_reservation "athlon_sseaddvector_load_k8" 7
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "sseadd")
+ (eq_attr "memory" "load")))
+ "athlon-double,athlon-fpload2k8,(athlon-fadd*2)")
+(define_insn_reservation "athlon_sseaddvector" 5
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "sseadd"))
+ "athlon-vector,athlon-fpsched,(athlon-fadd*2)")
+(define_insn_reservation "athlon_sseaddvector_k8" 5
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "sseadd"))
+ "athlon-double,athlon-fpsched,(athlon-fadd*2)")
+
+;; Conversions behaves very irregularly and the scheduling is critical here.
+;; Take each instruction separately. Assume that the mode is always set to the
+;; destination one and athlon_decode is set to the K8 versions.
+
+;; cvtss2sd
+(define_insn_reservation "athlon_ssecvt_cvtss2sd_load_k8" 4
+ (and (eq_attr "cpu" "k8,athlon,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "direct")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "memory" "load")))))
+ "athlon-direct,athlon-fploadk8,athlon-fstore")
+(define_insn_reservation "athlon_ssecvt_cvtss2sd" 2
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "direct")
+ (eq_attr "mode" "DF"))))
+ "athlon-direct,athlon-fpsched,athlon-fstore")
+;; cvtps2pd. Model same way the other double decoded FP conversions.
+(define_insn_reservation "athlon_ssecvt_cvtps2pd_load_k8" 5
+ (and (eq_attr "cpu" "k8,athlon,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "V2DF,V4SF,TI")
+ (eq_attr "memory" "load")))))
+ "athlon-double,athlon-fpload2k8,(athlon-fstore*2)")
+(define_insn_reservation "athlon_ssecvt_cvtps2pd_k8" 3
+ (and (eq_attr "cpu" "k8,athlon,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "double")
+ (eq_attr "mode" "V2DF,V4SF,TI"))))
+ "athlon-double,athlon-fpsched,athlon-fstore,athlon-fstore")
+;; cvtsi2sd mem,reg is directpath path (cvtsi2sd reg,reg is doublepath)
+;; cvtsi2sd has troughput 1 and is executed in store unit with latency of 6
+(define_insn_reservation "athlon_sseicvt_cvtsi2sd_load" 6
+ (and (eq_attr "cpu" "athlon,k8")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "direct")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load")))))
+ "athlon-direct,athlon-fploadk8,athlon-fstore")
+;; cvtsi2ss mem, reg is doublepath
+(define_insn_reservation "athlon_sseicvt_cvtsi2ss_load" 9
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load")))))
+ "athlon-vector,athlon-fpload,(athlon-fstore*2)")
+(define_insn_reservation "athlon_sseicvt_cvtsi2ss_load_k8" 9
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load")))))
+ "athlon-double,athlon-fploadk8,(athlon-fstore*2)")
+;; cvtsi2sd reg,reg is double decoded (vector on Athlon)
+(define_insn_reservation "athlon_sseicvt_cvtsi2sd_k8" 11
+ (and (eq_attr "cpu" "k8,athlon,generic64")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "none")))))
+ "athlon-double,athlon-fploadk8,athlon-fstore")
+;; cvtsi2ss reg, reg is doublepath
+(define_insn_reservation "athlon_sseicvt_cvtsi2ss" 14
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "none")))))
+ "athlon-vector,athlon-fploadk8,(athlon-fvector*2)")
+;; cvtsd2ss mem,reg is doublepath, troughput unknown, latency 9
+(define_insn_reservation "athlon_ssecvt_cvtsd2ss_load_k8" 9
+ (and (eq_attr "cpu" "k8,athlon,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "memory" "load")))))
+ "athlon-double,athlon-fploadk8,(athlon-fstore*3)")
+;; cvtsd2ss reg,reg is vectorpath, troughput unknown, latency 12
+(define_insn_reservation "athlon_ssecvt_cvtsd2ss" 12
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "memory" "none")))))
+ "athlon-vector,athlon-fpsched,(athlon-fvector*3)")
+(define_insn_reservation "athlon_ssecvt_cvtpd2ps_load_k8" 8
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "mode" "V4SF,V2DF,TI")
+ (eq_attr "memory" "load")))))
+ "athlon-double,athlon-fpload2k8,(athlon-fstore*3)")
+;; cvtpd2ps mem,reg is vectorpath, troughput unknown, latency 10
+;; ??? Why it is fater than cvtsd2ss?
+(define_insn_reservation "athlon_ssecvt_cvtpd2ps" 8
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssecvt")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "mode" "V4SF,V2DF,TI")
+ (eq_attr "memory" "none")))))
+ "athlon-vector,athlon-fpsched,athlon-fvector*2")
+;; cvtsd2si mem,reg is doublepath, troughput 1, latency 9
+(define_insn_reservation "athlon_secvt_cvtsX2si_load" 9
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "vector")
+ (and (eq_attr "mode" "SI,DI")
+ (eq_attr "memory" "load")))))
+ "athlon-vector,athlon-fploadk8,athlon-fvector")
+;; cvtsd2si reg,reg is doublepath, troughput 1, latency 9
+(define_insn_reservation "athlon_ssecvt_cvtsX2si" 9
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "SI,DI")
+ (eq_attr "memory" "none")))))
+ "athlon-vector,athlon-fpsched,athlon-fvector")
+(define_insn_reservation "athlon_ssecvt_cvtsX2si_k8" 9
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "sseicvt")
+ (and (eq_attr "athlon_decode" "double")
+ (and (eq_attr "mode" "SI,DI")
+ (eq_attr "memory" "none")))))
+ "athlon-double,athlon-fpsched,athlon-fstore")
+
+
+(define_insn_reservation "athlon_ssemul_load" 4
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssemul")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fpload,athlon-fmul")
+(define_insn_reservation "athlon_ssemul_load_k8" 6
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssemul")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fploadk8,athlon-fmul")
+(define_insn_reservation "athlon_ssemul" 4
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssemul")
+ (eq_attr "mode" "SF,DF")))
+ "athlon-direct,athlon-fpsched,athlon-fmul")
+(define_insn_reservation "athlon_ssemulvector_load" 5
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssemul")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload2,(athlon-fmul*2)")
+(define_insn_reservation "athlon_ssemulvector_load_k8" 7
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssemul")
+ (eq_attr "memory" "load")))
+ "athlon-double,athlon-fpload2k8,(athlon-fmul*2)")
+(define_insn_reservation "athlon_ssemulvector" 5
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "ssemul"))
+ "athlon-vector,athlon-fpsched,(athlon-fmul*2)")
+(define_insn_reservation "athlon_ssemulvector_k8" 5
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "ssemul"))
+ "athlon-double,athlon-fpsched,(athlon-fmul*2)")
+;; divsd timings. divss is faster
+(define_insn_reservation "athlon_ssediv_load" 20
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssediv")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fpload,athlon-fmul*17")
+(define_insn_reservation "athlon_ssediv_load_k8" 22
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssediv")
+ (and (eq_attr "mode" "SF,DF")
+ (eq_attr "memory" "load"))))
+ "athlon-direct,athlon-fploadk8,athlon-fmul*17")
+(define_insn_reservation "athlon_ssediv" 20
+ (and (eq_attr "cpu" "athlon,k8,generic64")
+ (and (eq_attr "type" "ssediv")
+ (eq_attr "mode" "SF,DF")))
+ "athlon-direct,athlon-fpsched,athlon-fmul*17")
+(define_insn_reservation "athlon_ssedivvector_load" 39
+ (and (eq_attr "cpu" "athlon")
+ (and (eq_attr "type" "ssediv")
+ (eq_attr "memory" "load")))
+ "athlon-vector,athlon-fpload2,athlon-fmul*34")
+(define_insn_reservation "athlon_ssedivvector_load_k8" 35
+ (and (eq_attr "cpu" "k8,generic64")
+ (and (eq_attr "type" "ssediv")
+ (eq_attr "memory" "load")))
+ "athlon-double,athlon-fpload2k8,athlon-fmul*34")
+(define_insn_reservation "athlon_ssedivvector" 39
+ (and (eq_attr "cpu" "athlon")
+ (eq_attr "type" "ssediv"))
+ "athlon-vector,athlon-fmul*34")
+(define_insn_reservation "athlon_ssedivvector_k8" 39
+ (and (eq_attr "cpu" "k8,generic64")
+ (eq_attr "type" "ssediv"))
+ "athlon-double,athlon-fmul*34")
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/biarch64.h b/gcc-4.2.1-5666.3/gcc/config/i386/biarch64.h
new file mode 100644
index 000000000..46a55b0d1
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/biarch64.h
@@ -0,0 +1,25 @@
+/* Make configure files to produce biarch compiler defaulting to 64bit mode.
+ This file must be included very first, while the OS specific file later
+ to overwrite otherwise wrong defaults.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bo Thorsen <bo@suse.de>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#define TARGET_64BIT_DEFAULT MASK_64BIT
+#define TARGET_BI_ARCH 1
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/constraints.md b/gcc-4.2.1-5666.3/gcc/config/i386/constraints.md
new file mode 100644
index 000000000..5d76ac523
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/constraints.md
@@ -0,0 +1,171 @@
+;; Constraint definitions for IA-32 and x86-64.
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;;; Unused letters:
+;;; B H TU W
+;;; h jk vw z
+
+;; Integer register constraints.
+;; It is not necessary to define 'r' here.
+(define_register_constraint "R" "LEGACY_REGS"
+ "Legacy register---the eight integer registers available on all
+ i386 processors (@code{a}, @code{b}, @code{c}, @code{d},
+ @code{si}, @code{di}, @code{bp}, @code{sp}).")
+
+(define_register_constraint "q" "TARGET_64BIT ? GENERAL_REGS : Q_REGS"
+ "Any register accessible as @code{@var{r}l}. In 32-bit mode, @code{a},
+ @code{b}, @code{c}, and @code{d}; in 64-bit mode, any integer register.")
+
+(define_register_constraint "Q" "Q_REGS"
+ "Any register accessible as @code{@var{r}h}: @code{a}, @code{b},
+ @code{c}, and @code{d}.")
+
+(define_register_constraint "l" "INDEX_REGS"
+ "@internal Any register that can be used as the index in a base+index
+ memory access: that is, any general register except the stack pointer.")
+
+(define_register_constraint "a" "AREG"
+ "The @code{a} register.")
+
+(define_register_constraint "b" "BREG"
+ "The @code{b} register.")
+
+(define_register_constraint "c" "CREG"
+ "The @code{c} register.")
+
+(define_register_constraint "d" "DREG"
+ "The @code{d} register.")
+
+(define_register_constraint "S" "SIREG"
+ "The @code{si} register.")
+
+(define_register_constraint "D" "DIREG"
+ "The @code{di} register.")
+
+(define_register_constraint "A" "AD_REGS"
+ "The @code{a} and @code{d} registers, as a pair (for instructions
+ that return half the result in one and half in the other).")
+
+;; Floating-point register constraints.
+(define_register_constraint "f"
+ "TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 ? FLOAT_REGS : NO_REGS"
+ "Any 80387 floating-point (stack) register.")
+
+(define_register_constraint "t"
+ "TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 ? FP_TOP_REG : NO_REGS"
+ "Top of 80387 floating-point stack (@code{%st(0)}).")
+
+(define_register_constraint "u"
+ "TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 ? FP_SECOND_REG : NO_REGS"
+ "Second from top of 80387 floating-point stack (@code{%st(1)}).")
+
+;; Vector registers (also used for plain floating point nowadays).
+(define_register_constraint "y" "TARGET_MMX ? MMX_REGS : NO_REGS"
+ "Any MMX register.")
+
+(define_register_constraint "x" "TARGET_SSE ? SSE_REGS : NO_REGS"
+ "Any SSE register.")
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; We use the Y prefix to denote any number of conditional register sets:
+;; 0 First SSE register.
+;; t SSE2 enabled
+;; i SSE2 inter-unit moves enabled
+;; m MMX inter-unit moves enabled
+
+(define_register_constraint "Y0" "TARGET_SSE ? SSE_FIRST_REG : NO_REGS"
+ "First SSE register (@code{%xmm0}).")
+
+(define_register_constraint "Yt" "TARGET_SSE2 ? SSE_REGS : NO_REGS"
+ "@internal Any SSE register, when SSE2 is enabled.")
+
+(define_register_constraint "Yi"
+ "TARGET_SSE2 && TARGET_INTER_UNIT_MOVES ? SSE_REGS : NO_REGS"
+ "@internal Any SSE register, when SSE2 and inter-unit moves are enabled.")
+
+(define_register_constraint "Ym"
+ "TARGET_MMX && TARGET_INTER_UNIT_MOVES ? MMX_REGS : NO_REGS"
+ "@internal Any MMX register, when inter-unit moves are enabled.")
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; Integer constant constraints.
+(define_constraint "I"
+ "Integer constant in the range 0 @dots{} 31, for 32-bit shifts."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 31")))
+
+(define_constraint "J"
+ "Integer constant in the range 0 @dots{} 63, for 64-bit shifts."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 63")))
+
+(define_constraint "K"
+ "Signed 8-bit integer constant."
+ (and (match_code "const_int")
+ (match_test "ival >= -128 && ival <= 127")))
+
+(define_constraint "L"
+ "@code{0xFF} or @code{0xFFFF}, for andsi as a zero-extending move."
+ (and (match_code "const_int")
+ (match_test "ival == 0xFF || ival == 0xFFFF")))
+
+(define_constraint "M"
+ "0, 1, 2, or 3 (shifts for the @code{lea} instruction)."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 3")))
+
+(define_constraint "N"
+ "Unsigned 8-bit integer constant (for @code{in} and @code{out}
+ instructions)."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 255")))
+
+(define_constraint "O"
+ "@internal Integer constant in the range 0 @dots{} 127, for 128-bit shifts."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 127")))
+
+;; Floating-point constant constraints.
+;; We allow constants even if TARGET_80387 isn't set, because the
+;; stack register converter may need to load 0.0 into the function
+;; value register (top of stack).
+(define_constraint "G"
+ "Standard 80387 floating point constant."
+ (and (match_code "const_double")
+ (match_test "standard_80387_constant_p (op)")))
+
+;; This can theoretically be any mode's CONST0_RTX.
+(define_constraint "C"
+ "Standard SSE floating point constant."
+ (match_test "standard_sse_constant_p (op)"))
+
+;; Constant-or-symbol-reference constraints.
+
+(define_constraint "e"
+ "32-bit signed integer constant, or a symbolic reference known
+ to fit that range (for immediate operands in sign-extending x86-64
+ instructions)."
+ (match_operand 0 "x86_64_immediate_operand"))
+
+(define_constraint "Z"
+ "32-bit unsigned integer constant, or a symbolic reference known
+ to fit that range (for immediate operands in zero-extending x86-64
+ instructions)."
+ (match_operand 0 "x86_64_zext_immediate_operand"))
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.4.ver b/gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.4.ver
new file mode 100644
index 000000000..aaeb934fe
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.4.ver
@@ -0,0 +1,81 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divxc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunsxfdi
+___fixunsxfsi
+___fixxfdi
+___floatdidf
+___floatdisf
+___floatdixf
+___gcc_personality_v0
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___mulvdi3
+___mulvsi3
+___mulxc3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powixf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.5.ver b/gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.5.ver
new file mode 100644
index 000000000..02a085843
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/darwin-libgcc.10.5.ver
@@ -0,0 +1,85 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetIPInfo
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divxc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunsxfdi
+___fixunsxfsi
+___fixxfdi
+___floatdidf
+___floatdisf
+___floatdixf
+___floatundidf
+___floatundisf
+___floatundixf
+___gcc_personality_v0
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___mulvdi3
+___mulvsi3
+___mulxc3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powixf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/darwin.h b/gcc-4.2.1-5666.3/gcc/config/i386/darwin.h
new file mode 100644
index 000000000..d0b1db12b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/darwin.h
@@ -0,0 +1,452 @@
+/* Target definitions for x86 running Darwin.
+ Copyright (C) 2001, 2002, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Enable Mach-O bits in generic x86 code. */
+#undef TARGET_MACHO
+#define TARGET_MACHO 1
+
+/* APPLE LOCAL begin mainline */
+#undef TARGET_64BIT
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+#define TARGET_64BIT (target_flags & MASK_64BIT)
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __x86_64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
+#define TARGET_VERSION fprintf (stderr, " (i686 Darwin)");
+/* APPLE LOCAL end mainline */
+
+#undef TARGET_64BIT
+#define TARGET_64BIT (target_flags & MASK_64BIT)
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __x86_64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
+/* Size of the Obj-C jump buffer. */
+#define OBJC_JBLEN ((TARGET_64BIT) ? ((9 * 2) + 3 + 16) : (18))
+
+#undef TARGET_FPMATH_DEFAULT
+#define TARGET_FPMATH_DEFAULT (TARGET_SSE ? FPMATH_SSE : FPMATH_387)
+
+/* APPLE LOCAL begin mainline */
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int")
+
+#undef MAX_BITS_PER_WORD
+#define MAX_BITS_PER_WORD 64
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__LITTLE_ENDIAN__"); \
+ darwin_cpp_builtins (pfile); \
+ } \
+ while (0)
+/* APPLE LOCAL end mainline */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int")
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef MAX_BITS_PER_WORD
+#define MAX_BITS_PER_WORD 64
+
+#undef FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
+#define FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN (0)
+
+/* We want -fPIC by default, unless we're using -static to compile for
+ the kernel or some such. */
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{!mkernel:%{!static:%{!mdynamic-no-pic:-fPIC}}} \
+ "/* APPLE LOCAL ARM ignore -mthumb and -mno-thumb */"\
+ %<mthumb %<mno-thumb \
+ "/* APPLE LOCAL ARM 5683689 */"\
+ %{!mmacosx-version-min=*: %{!miphoneos-version-min=*: %(darwin_cc1_minversion)}} \
+ "/* APPLE LOCAL ignore -mcpu=G4 -mcpu=G5 */"\
+ %<faltivec %<mno-fused-madd %<mlong-branch %<mlongcall %<mcpu=G4 %<mcpu=G5 \
+ %{g: %{!fno-eliminate-unused-debug-symbols: -feliminate-unused-debug-symbols }}"
+
+/* APPLE LOCAL AltiVec */
+#define CPP_ALTIVEC_SPEC "%<faltivec"
+
+/* APPLE LOCAL begin mainline */
+#undef ASM_SPEC
+/* APPLE LOCAL begin kext weak_import 5935650 */
+#define ASM_SPEC "-arch %(darwin_arch) -force_cpusubtype_ALL \
+ %{mkernel|static|fapple-kext:%{!m64:-static}}"
+/* APPLE LOCAL end kext weak_import 5935650 */
+
+#define DARWIN_ARCH_SPEC "%{m64:x86_64;:i386}"
+#define DARWIN_SUBARCH_SPEC DARWIN_ARCH_SPEC
+
+/* APPLE LOCAL begin mainline 2007-03-13 5005743 5040758 */ \
+/* Determine a minimum version based on compiler options. */
+#define DARWIN_MINVERSION_SPEC \
+ "%{!m64|fgnu-runtime:10.4; \
+ ,objective-c|,objc-cpp-output:10.5; \
+ ,objective-c-header:10.5; \
+ ,objective-c++|,objective-c++-cpp-output:10.5; \
+ ,objective-c++-header|,objc++-cpp-output:10.5; \
+ :10.4}"
+
+/* APPLE LOCAL end mainline 2007-03-13 5005743 5040758 */ \
+/* APPLE LOCAL begin ARM 5683689 */
+/* Default cc1 option for specifying minimum version number. */
+#define DARWIN_CC1_MINVERSION_SPEC "-mmacosx-version-min=%(darwin_minversion)"
+
+/* Default ld option for specifying minimum version number. */
+#define DARWIN_LD_MINVERSION_SPEC "-macosx_version_min %(darwin_minversion)"
+
+/* Use macosx version numbers by default. */
+#define DARWIN_DEFAULT_VERSION_TYPE DARWIN_VERSION_MACOSX
+/* APPLE LOCAL end ARM 5683689 */
+
+/* APPLE LOCAL ARM 5681645 8307333 */
+#define DARWIN_IPHONEOS_LIBGCC_SPEC "-lgcc"
+
+/* APPLE LOCAL begin link optimizations 6499452 */
+#undef DARWIN_CRT1_SPEC
+#define DARWIN_CRT1_SPEC \
+ "%:version-compare(!> 10.5 mmacosx-version-min= -lcrt1.o) \
+ %:version-compare(>< 10.5 10.6 mmacosx-version-min= -lcrt1.10.5.o) \
+ %:version-compare(>= 10.6 mmacosx-version-min= -lcrt1.10.6.o)"
+
+#undef DARWIN_DYLIB1_SPEC
+#define DARWIN_DYLIB1_SPEC \
+ "%:version-compare(!> 10.5 mmacosx-version-min= -ldylib1.o) \
+ %:version-compare(>< 10.5 10.6 mmacosx-version-min= -ldylib1.10.5.o)"
+
+#undef DARWIN_BUNDLE1_SPEC
+#define DARWIN_BUNDLE1_SPEC \
+ "%:version-compare(!> 10.6 mmacosx-version-min= -lbundle1.o)"
+/* APPLE LOCAL end link optimizations 6499452 */
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ DARWIN_EXTRA_SPECS \
+ { "darwin_arch", DARWIN_ARCH_SPEC }, \
+ { "darwin_crt2", "" }, \
+ { "darwin_subarch", DARWIN_SUBARCH_SPEC },
+/* APPLE LOCAL end mainline */
+
+/* APPLE LOCAL begin prefer -lSystem 6645902 */
+#undef LINK_GCC_C_SEQUENCE_SPEC
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{miphoneos-version-min=*: %G %L} \
+ %{!miphoneos-version-min=*: \
+ %{!static:%:version-compare(>= 10.6 mmacosx-version-min= -lSystem)} %G %L}"
+/* APPLE LOCAL end prefer -lSystem 6645902 */
+
+/* Use the following macro for any Darwin/x86-specific command-line option
+ translation. */
+#define SUBTARGET_OPTION_TRANSLATE_TABLE \
+ { "", "" }
+
+/* The Darwin assembler mostly follows AT&T syntax. */
+#undef ASSEMBLER_DIALECT
+#define ASSEMBLER_DIALECT ASM_ATT
+
+/* Define macro used to output shift-double opcodes when the shift
+ count is in %cl. Some assemblers require %cl as an argument;
+ some don't. This macro controls what to do: by default, don't
+ print %cl. */
+
+#define SHIFT_DOUBLE_OMITS_COUNT 0
+
+extern void darwin_x86_file_end (void);
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END darwin_x86_file_end
+
+/* Define the syntax of pseudo-ops, labels and comments. */
+
+/* String containing the assembler's comment-starter. */
+
+#define ASM_COMMENT_START "#"
+
+/* By default, target has a 80387, uses IEEE compatible arithmetic,
+ and returns float values in the 387. */
+
+#define TARGET_SUBTARGET_DEFAULT (MASK_80387 | MASK_IEEE_FP | MASK_FLOAT_RETURNS | MASK_128BIT_LONG_DOUBLE)
+/* APPLE LOCAL begin mainline */
+/* For darwin we want to target specific processor features as a minimum,
+ but these unfortunately don't correspond to a specific processor. */
+#undef TARGET_SUBTARGET32_DEFAULT
+#define TARGET_SUBTARGET32_DEFAULT (MASK_MMX \
+ | MASK_SSE \
+ | MASK_SSE2)
+
+#undef TARGET_SUBTARGET64_DEFAULT
+#define TARGET_SUBTARGET64_DEFAULT (MASK_MMX \
+ | MASK_SSE \
+ | MASK_SSE2 \
+ | MASK_SSE3)
+/* APPLE LOCAL end mainline */
+/* APPLE LOCAL mdynamic-no-pic */
+/* Remove disabling of mdynamic-no-pic */
+
+#undef GOT_SYMBOL_NAME
+#define GOT_SYMBOL_NAME (machopic_function_base_name ())
+
+/* Define the syntax of pseudo-ops, labels and comments. */
+
+#define LPREFIX "L"
+
+/* These are used by -fbranch-probabilities */
+#define HOT_TEXT_SECTION_NAME "__TEXT,__text,regular,pure_instructions"
+#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
+ "__TEXT,__unlikely,regular,pure_instructions"
+
+/* Assembler pseudos to introduce constants of various size. */
+
+#define ASM_BYTE_OP "\t.byte\t"
+#define ASM_SHORT "\t.word\t"
+#define ASM_LONG "\t.long\t"
+#define ASM_QUAD "\t.quad\t"
+
+#define SUBTARGET_ENCODE_SECTION_INFO darwin_encode_section_info
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ do { if ((LOG) != 0) \
+ { \
+ if (in_section == text_section) \
+ fprintf (FILE, "\t%s %d,0x90\n", ALIGN_ASM_OP, (LOG)); \
+ else \
+ fprintf (FILE, "\t%s %d\n", ALIGN_ASM_OP, (LOG)); \
+ } \
+ } while (0)
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ /* APPLE LOCAL begin mainline */ \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (ROUNDED)))
+ /* APPLE LOCAL end mainline */
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (ROUNDED)))
+
+
+/* APPLE LOCAL begin Macintosh alignment 2002-2-19 --ff */
+#if 0
+#define MASK_ALIGN_NATURAL 0x40000000
+#define TARGET_ALIGN_NATURAL (target_flags & MASK_ALIGN_NATURAL)
+#define MASK_ALIGN_MAC68K 0x20000000
+#define TARGET_ALIGN_MAC68K (target_flags & MASK_ALIGN_MAC68K)
+#endif
+#define rs6000_alignment_flags target_flags
+
+#define ROUND_TYPE_ALIGN(TYPE, COMPUTED, SPECIFIED) \
+ (((TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == QUAL_UNION_TYPE) \
+ && OPTION_ALIGN_MAC68K \
+ && MAX (COMPUTED, SPECIFIED) == 8) ? 16 \
+ : MAX (COMPUTED, SPECIFIED))
+/* APPLE LOCAL end Macintosh alignment 2002-2-19 --ff */
+
+/* Darwin profiling -- call mcount. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
+ /* APPLE LOCAL axe stubs 5571540 */ \
+ if (darwin_stubs && MACHOPIC_INDIRECT && !TARGET_64BIT) \
+ { \
+ const char *name = machopic_mcount_stub_name (); \
+ fprintf (FILE, "\tcall %s\n", name+1); /* skip '&' */ \
+ machopic_validate_stub_or_non_lazy_ptr (name); \
+ } \
+ else fprintf (FILE, "\tcall mcount\n"); \
+ } while (0)
+
+/* APPLE LOCAL CW asm blocks */
+extern int flag_iasm_blocks;
+/* APPLE LOCAL begin fix-and-continue x86 */
+#undef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS \
+ do { \
+ /* APPLE LOCAL begin ARM 5683689 */ \
+ if (!darwin_macosx_version_min \
+ && !darwin_iphoneos_version_min) \
+ darwin_macosx_version_min = "10.1"; \
+ /* APPLE LOCAL end ARM 5683689 */ \
+ /* APPLE LOCAL begin CW asm blocks */ \
+ if (flag_iasm_blocks) \
+ flag_ms_asms = 1; \
+ /* APPLE LOCAL end CW asm blocks */ \
+ /* APPLE LOCAL begin constant cfstrings */ \
+ if (darwin_constant_cfstrings < 0) \
+ darwin_constant_cfstrings = 1; \
+ /* APPLE LOCAL end constant cfstrings */ \
+ if (TARGET_64BIT) \
+ { \
+ if (MACHO_DYNAMIC_NO_PIC_P) \
+ target_flags &= ~MASK_MACHO_DYNAMIC_NO_PIC; \
+ } \
+ /* APPLE LOCAL begin fix this for mainline */ \
+ /* For mainline this needs to be fixed to have every \
+ cpu architecture feature as an isa mask. Every \
+ cpu we've shipped supports all of these features. \
+ This includes all ix86_arch cpu features currently \
+ defined except x86_cmove which is turned on for \
+ TARGET_SSE anyhow. */ \
+ if (!ix86_arch_string) \
+ { \
+ x86_cmpxchg = ~(0); \
+ x86_cmpxchg8b = ~(0); \
+ x86_cmpxchg16b = ~(0); \
+ x86_xadd = ~(0); \
+ x86_bswap = ~(0); \
+ } \
+ /* APPLE LOCAL end fix this for mainline */ \
+ } while (0)
+
+/* True, iff we're generating fast turn around debugging code. When
+ true, we arrange for function prologues to start with 6 nops so
+ that gdb may insert code to redirect them, and for data to be
+ accessed indirectly. The runtime uses this indirection to forward
+ references for data to the original instance of that data. */
+
+#define TARGET_FIX_AND_CONTINUE (darwin_fix_and_continue)
+/* APPLE LOCAL end fix-and-continue x86 */
+
+#define C_COMMON_OVERRIDE_OPTIONS \
+ do { \
+ SUBTARGET_C_COMMON_OVERRIDE_OPTIONS; \
+ } while (0)
+
+/* APPLE LOCAL begin mainline 4.3 2006-10-31 4370143 */
+/* Removed PREFERRED_DEBUGGING_TYPE */
+/* APPLE LOCAL end mainline 4.3 2006-10-31 4370143 */
+
+/* Darwin uses the standard DWARF register numbers but the default
+ register numbers for STABS. Fortunately for 64-bit code the
+ default and the standard are the same. */
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+ (TARGET_64BIT ? dbx64_register_map[n] \
+ : write_symbols == DWARF2_DEBUG ? svr4_dbx_register_map[n] \
+ : dbx_register_map[n])
+
+/* Unfortunately, the 32-bit EH information also doesn't use the standard
+ DWARF register numbers. */
+#define DWARF2_FRAME_REG_OUT(n, for_eh) \
+ (! (for_eh) || write_symbols != DWARF2_DEBUG || TARGET_64BIT ? (n) \
+ : (n) == 5 ? 4 \
+ : (n) == 4 ? 5 \
+ : (n) >= 11 && (n) <= 18 ? (n) + 1 \
+ : (n))
+
+/* APPLE LOCAL begin 4457939 stack alignment mishandled */
+/* <rdar://problem/4471596> stack alignment is not handled properly
+
+ Please remove this entire a**le local when addressing this
+ Radar. */
+extern void ix86_darwin_init_expanders (void);
+#define INIT_EXPANDERS (ix86_darwin_init_expanders ())
+/* APPLE LOCAL end 4457939 stack alignment mishandled */
+
+
+/* APPLE LOCAL begin CW asm blocks */
+#define IASM_VALID_PIC(DECL, E) \
+ do { \
+ if (! TARGET_64BIT \
+ && E->as_immediate && ! MACHO_DYNAMIC_NO_PIC_P && flag_pic) \
+ warning (0, "non-pic addressing form not suitible for pic code"); \
+ } while (0)
+#define IASM_RIP(X) do { if (TARGET_64BIT) strcat (X, "(%%rip)"); } while (0)
+/* APPLE LOCAL end cw asm blocks */
+
+/* APPLE LOCAL KEXT */
+#define TARGET_SUPPORTS_KEXTABI1 (! TARGET_64BIT)
+
+
+#undef REGISTER_TARGET_PRAGMAS
+#define REGISTER_TARGET_PRAGMAS() DARWIN_REGISTER_TARGET_PRAGMAS()
+
+#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
+#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES darwin_set_default_type_attributes
+
+/* APPLE LOCAL begin mainline */
+/* For 64-bit, we need to add 4 because @GOTPCREL is relative to the
+ end of the instruction, but without the 4 we'd only have the right
+ address for the start of the instruction. */
+#undef ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX
+#define ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX(FILE, ENCODING, SIZE, ADDR, DONE) \
+ if (TARGET_64BIT) \
+ { \
+ if ((SIZE) == 4 && ((ENCODING) & 0x70) == DW_EH_PE_pcrel) \
+ { \
+ fputs (ASM_LONG, FILE); \
+ assemble_name (FILE, XSTR (ADDR, 0)); \
+ fputs ("+4@GOTPCREL", FILE); \
+ goto DONE; \
+ } \
+ } \
+ else \
+ { \
+ if (ENCODING == ASM_PREFERRED_EH_DATA_FORMAT (2, 1)) \
+ { \
+ darwin_non_lazy_pcrel (FILE, ADDR); \
+ goto DONE; \
+ } \
+ }
+/* APPLE LOCAL end mainline */
+/* APPLE LOCAL begin track initialization status 4964532 */
+/* APPLE LOCAL begin ARM 5683689 */
+#undef TARGET_DWARF_UNINIT_VARS
+#define TARGET_DWARF_UNINIT_VARS \
+ (darwin_iphoneos_version_min || \
+ strverscmp (darwin_macosx_version_min, "10.4") >= 0)
+/* APPLE LOCAL end ARM 5683689 */
+/* APPLE LOCAL end track initialization status 4964532 */
+
+/* This needs to move since i386 uses the first flag and other flags are
+ used in Mach-O. */
+#undef MACHO_SYMBOL_FLAG_VARIABLE
+#define MACHO_SYMBOL_FLAG_VARIABLE ((SYMBOL_FLAG_MACH_DEP) << 3)
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/darwin.opt b/gcc-4.2.1-5666.3/gcc/config/i386/darwin.opt
new file mode 100644
index 000000000..90854e1ca
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/darwin.opt
@@ -0,0 +1,15 @@
+; APPLE LOCAL begin Macintosh alignment 2002-2-19 --ff
+malign-mac68k
+Target Report Mask(ALIGN_MAC68K) Var(darwin_alignment_flags)
+Align structs and unions according to mac68k rules
+
+malign-natural
+Target Report Mask(ALIGN_NATURAL) Var(darwin_alignment_flags)
+Align structs and unions according to natural rules
+
+; Maybe we don't need this.
+;malign-power
+;; I want this to clear MASK_ALIGN_MAC68K | MASK_ALIGN_NATURAL
+;Target Undocumented InverseMask(ALIGN_MAC68K)
+;Align structs and unions according to PowerPC rules
+; APPLE LOCAL end Macintosh alignment 2002-2-19 --ff
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/darwin64.h b/gcc-4.2.1-5666.3/gcc/config/i386/darwin64.h
new file mode 100644
index 000000000..e630a7064
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/darwin64.h
@@ -0,0 +1,43 @@
+/* Target definitions for x86_64 running Darwin.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (x86_64 Darwin)");
+
+#undef DARWIN_ARCH_SPEC
+#define DARWIN_ARCH_SPEC "%{m32:i386;:x86_64}"
+
+#undef DARWIN_SUBARCH_SPEC
+#define DARWIN_SUBARCH_SPEC DARWIN_ARCH_SPEC
+
+/* APPLE LOCAL begin kext 6400713 */
+#undef ASM_SPEC
+#define ASM_SPEC "-arch %(darwin_arch) -force_cpusubtype_ALL \
+ %{mkernel|static|fapple-kext:%{m32:-static}}"
+/* APPLE LOCAL end kext 6400713 */
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ /* APPLE LOCAL 6015949 */ \
+ DARWIN_EXTRA_SPECS \
+ { "darwin_arch", DARWIN_ARCH_SPEC }, \
+ { "darwin_crt2", "" }, \
+ { "darwin_subarch", DARWIN_SUBARCH_SPEC },
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/driver-i386.c b/gcc-4.2.1-5666.3/gcc/config/i386/driver-i386.c
new file mode 100644
index 000000000..ffcee4e55
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/driver-i386.c
@@ -0,0 +1,300 @@
+/* Subroutines for the gcc driver.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include <stdlib.h>
+
+const char *host_detect_local_cpu (int argc, const char **argv);
+
+#ifdef GCC_VERSION
+#define cpuid(num,a,b,c,d) \
+ asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1" \
+ : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
+ : "0" (num))
+
+#define bit_CMPXCHG8B (1 << 8)
+#define bit_CMOV (1 << 15)
+#define bit_MMX (1 << 23)
+#define bit_SSE (1 << 25)
+#define bit_SSE2 (1 << 26)
+
+#define bit_SSE3 (1 << 0)
+#define bit_CMPXCHG16B (1 << 13)
+
+#define bit_3DNOW (1 << 31)
+#define bit_3DNOWP (1 << 30)
+#define bit_LM (1 << 29)
+
+/* This will be called by the spec parser in gcc.c when it sees
+ a %:local_cpu_detect(args) construct. Currently it will be called
+ with either "arch" or "tune" as argument depending on if -march=native
+ or -mtune=native is to be substituted.
+
+ It returns a string containing new command line parameters to be
+ put at the place of the above two options, depending on what CPU
+ this is executed. E.g. "-march=k8" on an AMD64 machine
+ for -march=native.
+
+ ARGC and ARGV are set depending on the actual arguments given
+ in the spec. */
+const char *host_detect_local_cpu (int argc, const char **argv)
+{
+ const char *cpu = NULL;
+ enum processor_type processor = PROCESSOR_I386;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int max_level;
+ unsigned int vendor;
+ unsigned int ext_level;
+ unsigned char has_mmx = 0, has_3dnow = 0, has_3dnowp = 0, has_sse = 0;
+ unsigned char has_sse2 = 0, has_sse3 = 0, has_cmov = 0;
+ unsigned char has_longmode = 0, has_cmpxchg8b = 0;
+ unsigned char is_amd = 0;
+ unsigned int family = 0;
+ bool arch;
+
+ if (argc < 1)
+ return NULL;
+
+ arch = strcmp (argv[0], "arch") == 0;
+ if (!arch && strcmp (argv[0], "tune"))
+ return NULL;
+
+#ifndef __x86_64__
+ /* See if we can use cpuid. */
+ asm volatile ("pushfl; pushfl; popl %0; movl %0,%1; xorl %2,%0;"
+ "pushl %0; popfl; pushfl; popl %0; popfl"
+ : "=&r" (eax), "=&r" (ebx)
+ : "i" (0x00200000));
+
+ if (((eax ^ ebx) & 0x00200000) == 0)
+ goto done;
+#endif
+
+ processor = PROCESSOR_PENTIUM;
+
+ /* Check the highest input value for eax. */
+ cpuid (0, eax, ebx, ecx, edx);
+ max_level = eax;
+ /* We only look at the first four characters. */
+ vendor = ebx;
+ if (max_level == 0)
+ goto done;
+
+ cpuid (1, eax, ebx, ecx, edx);
+ has_cmpxchg8b = !!(edx & bit_CMPXCHG8B);
+ has_cmov = !!(edx & bit_CMOV);
+ has_mmx = !!(edx & bit_MMX);
+ has_sse = !!(edx & bit_SSE);
+ has_sse2 = !!(edx & bit_SSE2);
+ has_sse3 = !!(ecx & bit_SSE3);
+ /* We don't care for extended family. */
+ family = (eax >> 8) & ~(1 << 4);
+
+ cpuid (0x80000000, eax, ebx, ecx, edx);
+ ext_level = eax;
+ if (ext_level >= 0x80000000)
+ {
+ cpuid (0x80000001, eax, ebx, ecx, edx);
+ has_3dnow = !!(edx & bit_3DNOW);
+ has_3dnowp = !!(edx & bit_3DNOWP);
+ has_longmode = !!(edx & bit_LM);
+ }
+
+ is_amd = vendor == *(unsigned int*)"Auth";
+
+ if (is_amd)
+ {
+ if (has_mmx)
+ processor = PROCESSOR_K6;
+ if (has_3dnowp)
+ processor = PROCESSOR_ATHLON;
+ if (has_sse2 || has_longmode)
+ processor = PROCESSOR_K8;
+ }
+ else
+ {
+ switch (family)
+ {
+ case 5:
+ /* Default is PROCESSOR_PENTIUM. */
+ break;
+ case 6:
+ processor = PROCESSOR_PENTIUMPRO;
+ break;
+ case 15:
+ processor = PROCESSOR_PENTIUM4;
+ break;
+ default:
+ /* We have no idea. Use something reasonable. */
+ if (arch)
+ {
+ if (has_sse3)
+ {
+ if (has_longmode)
+ cpu = "nocona";
+ else
+ cpu = "prescott";
+ }
+ else if (has_sse2)
+ cpu = "pentium4";
+ else if (has_cmov)
+ cpu = "pentiumpro";
+ else if (has_mmx)
+ cpu = "pentium-mmx";
+ else if (has_cmpxchg8b)
+ cpu = "pentium";
+ else
+ cpu = "i386";
+ }
+ else
+ cpu = "generic";
+ goto done;
+ break;
+ }
+ }
+
+ switch (processor)
+ {
+ case PROCESSOR_I386:
+ cpu = "i386";
+ break;
+ case PROCESSOR_I486:
+ cpu = "i486";
+ break;
+ case PROCESSOR_PENTIUM:
+ if (has_mmx)
+ cpu = "pentium-mmx";
+ else
+ cpu = "pentium";
+ break;
+ case PROCESSOR_PENTIUMPRO:
+ if (arch)
+ {
+ if (has_sse3)
+ {
+ if (has_longmode)
+ {
+ /* It is Core 2 Duo. */
+ cpu = "nocona";
+ }
+ else
+ {
+ /* It is Core Duo. */
+ cpu = "prescott";
+ }
+ }
+ else if (has_sse2)
+ {
+ /* It is Pentium M. */
+ cpu = "pentium4";
+ }
+ else if (has_sse)
+ {
+ /* It is Pentium III. */
+ cpu = "pentium3";
+ }
+ else if (has_mmx)
+ {
+ /* It is Pentium II. */
+ cpu = "pentium2";
+ }
+ else
+ {
+ /* Default to Pentium Pro. */
+ cpu = "pentiumpro";
+ }
+ }
+ else
+ {
+ /* For -mtune, we default to -mtune=generic. */
+ cpu = "generic";
+ }
+ break;
+ case PROCESSOR_K6:
+ if (has_3dnow)
+ cpu = "k6-3";
+ else
+ cpu = "k6";
+ break;
+ case PROCESSOR_ATHLON:
+ if (has_sse)
+ cpu = "athlon-4";
+ else
+ cpu = "athlon";
+ break;
+ case PROCESSOR_PENTIUM4:
+ if (has_sse3)
+ {
+ if (has_longmode)
+ cpu = "nocona";
+ else
+ cpu = "prescott";
+ }
+ else
+ cpu = "pentium4";
+ break;
+ case PROCESSOR_K8:
+ cpu = "k8";
+ break;
+ case PROCESSOR_NOCONA:
+ cpu = "nocona";
+ break;
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
+ cpu = "generic";
+ break;
+ default:
+ abort ();
+ break;
+ }
+
+done:
+ return concat ("-m", argv[0], "=", cpu, NULL);
+}
+#else
+/* If we aren't compiling with GCC we just provide a minimal
+ default value. */
+const char *host_detect_local_cpu (int argc, const char **argv)
+{
+ const char *cpu;
+ bool arch;
+
+ if (argc < 1)
+ return NULL;
+
+ arch = strcmp (argv[0], "arch") == 0;
+ if (!arch && strcmp (argv[0], "tune"))
+ return NULL;
+
+ if (arch)
+ {
+ /* FIXME: i386 is wrong for 64bit compiler. How can we tell if
+ we are generating 64bit or 32bit code? */
+ cpu = "i386";
+ }
+ else
+ cpu = "generic";
+
+ return concat ("-m", argv[0], "=", cpu, NULL);
+}
+#endif /* GCC_VERSION */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/emmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/emmintrin.h
new file mode 100644
index 000000000..857ea6ff9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/emmintrin.h
@@ -0,0 +1,1981 @@
+/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef _EMMINTRIN_H_INCLUDED
+#define _EMMINTRIN_H_INCLUDED
+
+#ifdef __SSE2__
+#include <xmmintrin.h>
+
+/* SSE2 */
+typedef double __v2df __attribute__ ((__vector_size__ (16)));
+typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef int __v4si __attribute__ ((__vector_size__ (16)));
+typedef short __v8hi __attribute__ ((__vector_size__ (16)));
+typedef char __v16qi __attribute__ ((__vector_size__ (16)));
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
+typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
+
+/* Create a selector for use with the SHUFPD instruction. */
+#define _MM_SHUFFLE2(fp1,fp0) \
+ (((fp1) << 1) | (fp0))
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+/* APPLE LOCAL begin radar 4152603 */
+/* Create a vector with element 0 as F and the rest zero. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_sd (double __F)
+{
+ return __extension__ (__m128d){ __F, 0 };
+}
+
+/* Create a vector with both elements equal to F. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_pd (double __F)
+{
+ return __extension__ (__m128d){ __F, __F };
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_pd1 (double __F)
+{
+ return _mm_set1_pd (__F);
+}
+
+/* Create a vector with the lower value X and upper value W. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_pd (double __W, double __X)
+{
+ return __extension__ (__m128d){ __X, __W };
+}
+
+/* Create a vector with the lower value W and upper value X. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_pd (double __W, double __X)
+{
+ return __extension__ (__m128d){ __W, __X };
+}
+
+/* Create a vector of zeros. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setzero_pd (void)
+{
+ return __extension__ (__m128d){ 0.0, 0.0 };
+}
+
+/* Sets the low DPFP value of A from the low value of B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_move_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* Load two DPFP values from P. The address must be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_pd (double const *__P)
+{
+ return *(__m128d *)__P;
+}
+
+/* Load two DPFP values from P. The address need not be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadu_pd (double const *__P)
+{
+ return __builtin_ia32_loadupd (__P);
+}
+
+/* Create a vector with all two elements equal to *P. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load1_pd (double const *__P)
+{
+ return _mm_set1_pd (*__P);
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_sd (double const *__P)
+{
+ return _mm_set_sd (*__P);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_pd1 (double const *__P)
+{
+ return _mm_load1_pd (__P);
+}
+
+/* Load two DPFP values in reverse order. The address must be aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadr_pd (double const *__P)
+{
+ __m128d __tmp = _mm_load_pd (__P);
+ return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1));
+}
+
+/* Store two DPFP values. The address must be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_pd (double *__P, __m128d __A)
+{
+ *(__m128d *)__P = __A;
+}
+
+/* Store two DPFP values. The address need not be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storeu_pd (double *__P, __m128d __A)
+{
+ __builtin_ia32_storeupd (__P, __A);
+}
+
+/* Stores the lower DPFP value. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_sd (double *__P, __m128d __A)
+{
+ *__P = __builtin_ia32_vec_ext_v2df (__A, 0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE double __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsd_f64 (__m128d __A)
+{
+ return __builtin_ia32_vec_ext_v2df (__A, 0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storel_pd (double *__P, __m128d __A)
+{
+ _mm_store_sd (__P, __A);
+}
+
+/* Stores the upper DPFP value. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storeh_pd (double *__P, __m128d __A)
+{
+ *__P = __builtin_ia32_vec_ext_v2df (__A, 1);
+}
+
+/* Store the lower DPFP value across two words.
+ The address must be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store1_pd (double *__P, __m128d __A)
+{
+ _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0)));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_pd1 (double *__P, __m128d __A)
+{
+ _mm_store1_pd (__P, __A);
+}
+
+/* Store two DPFP values in reverse order. The address must be aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storer_pd (double *__P, __m128d __A)
+{
+ _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1)));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi128_si32 (__m128i __A)
+{
+ return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0);
+}
+
+#ifdef __x86_64__
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi128_si64 (__m128i __A)
+{
+ return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi128_si64x (__m128i __A)
+{
+ return __builtin_ia32_vec_ext_v2di ((__v2di)__A, 0);
+}
+#endif
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mul_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mul_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_div_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_div_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sqrt_pd (__m128d __A)
+{
+ return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
+}
+
+/* Return pair {sqrt (A[0), B[1]}. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sqrt_sd (__m128d __A, __m128d __B)
+{
+ __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
+ return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_and_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_andnot_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_or_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_xor_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmple_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpge_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpneq_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnlt_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnle_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpngt_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnge_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpord_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpunord_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmple_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
+ (__v2df)
+ __builtin_ia32_cmpltsd ((__v2df) __B,
+ (__v2df)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpge_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
+ (__v2df)
+ __builtin_ia32_cmplesd ((__v2df) __B,
+ (__v2df)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpneq_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnlt_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnle_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpngt_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
+ (__v2df)
+ __builtin_ia32_cmpnltsd ((__v2df) __B,
+ (__v2df)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnge_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
+ (__v2df)
+ __builtin_ia32_cmpnlesd ((__v2df) __B,
+ (__v2df)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpord_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpunord_sd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comieq_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comilt_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comile_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comigt_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comige_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comineq_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomieq_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomilt_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomile_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomigt_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomige_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomineq_sd (__m128d __A, __m128d __B)
+{
+ return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
+}
+
+/* Create a vector of Qi, where i is the element number. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_epi64x (long long __q1, long long __q0)
+{
+ return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_epi64 (__m64 __q1, __m64 __q0)
+{
+ return _mm_set_epi64x ((long long)__q1, (long long)__q0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
+{
+ return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
+ short __q3, short __q2, short __q1, short __q0)
+{
+ return __extension__ (__m128i)(__v8hi){
+ __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
+ char __q11, char __q10, char __q09, char __q08,
+ char __q07, char __q06, char __q05, char __q04,
+ char __q03, char __q02, char __q01, char __q00)
+{
+ return __extension__ (__m128i)(__v16qi){
+ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+ __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
+ };
+}
+
+/* APPLE LOCAL begin 4220129 */
+/* functions moved to end of file */
+/* APPLE LOCAL end 4220129 */
+
+/* Create a vector of Qi, where i is the element number.
+ The parameter order is reversed from the _mm_set_epi* functions. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_epi64 (__m64 __q0, __m64 __q1)
+{
+ return _mm_set_epi64 (__q1, __q0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
+{
+ return _mm_set_epi32 (__q3, __q2, __q1, __q0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
+ short __q4, short __q5, short __q6, short __q7)
+{
+ return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
+ char __q04, char __q05, char __q06, char __q07,
+ char __q08, char __q09, char __q10, char __q11,
+ char __q12, char __q13, char __q14, char __q15)
+{
+ return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+ __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_si128 (__m128i const *__P)
+{
+ return *__P;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadu_si128 (__m128i const *__P)
+{
+ return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
+}
+
+/* APPLE LOCAL begin 4099020 */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadl_epi64 (__m128i const *__P)
+{
+ return (__m128i)__builtin_ia32_loadlv4si ((__v2si *)__P);
+}
+/* APPLE LOCAL end 4099020 */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_si128 (__m128i *__P, __m128i __B)
+{
+ *__P = __B;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storeu_si128 (__m128i *__P, __m128i __B)
+{
+ __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin 4099020 */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storel_epi64 (__m128i *__P, __m128i __B)
+{
+ __builtin_ia32_storelv4si ((__v2si *)__P, __B);
+}
+/* APPLE LOCAL end 4099020 */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movepi64_pi64 (__m128i __B)
+{
+ return (__m64) __builtin_ia32_vec_ext_v2di ((__v2di)__B, 0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movpi64_epi64 (__m64 __A)
+{
+ return _mm_set_epi64 ((__m64)0LL, __A);
+}
+
+/* APPLE LOCAL begin 4099020 */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_move_epi64 (__m128i __A)
+{
+ return (__m128i)__builtin_ia32_movqv4si ((__v4si)__A) ;
+}
+/* APPLE LOCAL end 4099020 */
+
+/* Create a vector of zeros. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setzero_si128 (void)
+{
+ return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtepi32_pd (__m128i __A)
+{
+ return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtepi32_ps (__m128i __A)
+{
+ return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpd_epi32 (__m128d __A)
+{
+ return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpd_pi32 (__m128d __A)
+{
+ return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpd_ps (__m128d __A)
+{
+ return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttpd_epi32 (__m128d __A)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttpd_pi32 (__m128d __A)
+{
+ return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpi32_pd (__m64 __A)
+{
+ return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtps_epi32 (__m128 __A)
+{
+ return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttps_epi32 (__m128 __A)
+{
+ return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtps_pd (__m128 __A)
+{
+ return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsd_si32 (__m128d __A)
+{
+ return __builtin_ia32_cvtsd2si ((__v2df) __A);
+}
+
+#ifdef __x86_64__
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsd_si64 (__m128d __A)
+{
+ return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsd_si64x (__m128d __A)
+{
+ return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
+}
+#endif
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttsd_si32 (__m128d __A)
+{
+ return __builtin_ia32_cvttsd2si ((__v2df) __A);
+}
+
+#ifdef __x86_64__
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttsd_si64 (__m128d __A)
+{
+ return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttsd_si64x (__m128d __A)
+{
+ return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
+}
+#endif
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsd_ss (__m128 __A, __m128d __B)
+{
+ return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi32_sd (__m128d __A, int __B)
+{
+ return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
+}
+
+#ifdef __x86_64__
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64_sd (__m128d __A, long long __B)
+{
+ return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64x_sd (__m128d __A, long long __B)
+{
+ return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
+}
+#endif
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtss_sd (__m128d __A, __m128 __B)
+{
+ return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL 5814283 */
+#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)(__A), (__v2df)(__B), (__C)))
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_pd (__m128d __A, __m128d __B)
+{
+ return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadh_pd (__m128d __A, double const *__B)
+{
+ return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadl_pd (__m128d __A, double const *__B)
+{
+ return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movemask_pd (__m128d __A)
+{
+ return __builtin_ia32_movmskpd ((__v2df)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_packs_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_packs_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_packus_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_madd_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mulhi_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mullo_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mul_su32 (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mul_epu32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
+}
+
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_slli_epi16 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_slli_epi32 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_slli_epi64 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
+}
+#else
+#define _mm_slli_epi16(__A, __B) \
+ ((__m128i)__builtin_ia32_psllwi128 ((__v8hi)(__A), __B))
+#define _mm_slli_epi32(__A, __B) \
+ ((__m128i)__builtin_ia32_pslldi128 ((__v8hi)(__A), __B))
+#define _mm_slli_epi64(__A, __B) \
+ ((__m128i)__builtin_ia32_psllqi128 ((__v8hi)(__A), __B))
+#endif
+
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srai_epi16 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srai_epi32 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
+}
+#else
+#define _mm_srai_epi16(__A, __B) \
+ ((__m128i)__builtin_ia32_psrawi128 ((__v8hi)(__A), __B))
+#define _mm_srai_epi32(__A, __B) \
+ ((__m128i)__builtin_ia32_psradi128 ((__v8hi)(__A), __B))
+#endif
+
+#if 0
+static __m128i __attribute__((__always_inline__))
+_mm_srli_si128 (__m128i __A, int __B)
+{
+ return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B * 8));
+}
+
+static __m128i __attribute__((__always_inline__))
+_mm_srli_si128 (__m128i __A, int __B)
+{
+ return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B * 8));
+}
+#else
+/* APPLE LOCAL begin 5919583 */
+#define _mm_srli_si128 (__m128i)__builtin_ia32_psrldqi128_byteshift
+#define _mm_slli_si128 (__m128i)__builtin_ia32_pslldqi128_byteshift
+/* APPLE LOCAL end 5919583 */
+#endif
+
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srli_epi16 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srli_epi32 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srli_epi64 (__m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
+}
+#else
+#define _mm_srli_epi16(__A, __B) \
+ ((__m128i)__builtin_ia32_psrlwi128 ((__v8hi)(__A), __B))
+#define _mm_srli_epi32(__A, __B) \
+ ((__m128i)__builtin_ia32_psrldi128 ((__v4si)(__A), __B))
+#define _mm_srli_epi64(__A, __B) \
+ ((__m128i)__builtin_ia32_psrlqi128 ((__v4si)(__A), __B))
+#endif
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sll_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psllw128((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sll_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pslld128((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sll_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psllq128((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sra_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sra_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srl_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srl_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srl_epi64 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_and_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_andnot_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_or_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_xor_si128 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
+}
+
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_extract_epi16 (__m128i const __A, int const __N)
+{
+ return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
+{
+ return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N);
+}
+#else
+#define _mm_extract_epi16(A, N) \
+ ((int) __builtin_ia32_vec_ext_v8hi ((__v8hi)(A), (N)))
+#define _mm_insert_epi16(A, D, N) \
+ ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(A), (D), (N)))
+#endif
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_epi16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movemask_epi8 (__m128i __A)
+{
+ return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mulhi_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin 5814283 */
+#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)(__A), __B))
+#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)(__A), __B))
+#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)(__A), __B))
+/* APPLE LOCAL end 5814283 */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
+{
+ __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_avg_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_avg_epu16 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sad_epu8 (__m128i __A, __m128i __B)
+{
+ return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_stream_si32 (int *__A, int __B)
+{
+ __builtin_ia32_movnti (__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_stream_si128 (__m128i *__A, __m128i __B)
+{
+ __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_stream_pd (double *__A, __m128d __B)
+{
+ __builtin_ia32_movntpd (__A, (__v2df)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_clflush (void const *__A)
+{
+ __builtin_ia32_clflush (__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_lfence (void)
+{
+ __builtin_ia32_lfence ();
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mfence (void)
+{
+ __builtin_ia32_mfence ();
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi32_si128 (int __A)
+{
+ return _mm_set_epi32 (0, 0, 0, __A);
+}
+
+#ifdef __x86_64__
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64_si128 (long long __A)
+{
+ return _mm_set_epi64x (0, __A);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64x_si128 (long long __A)
+{
+ return _mm_set_epi64x (0, __A);
+}
+#endif
+
+/* Casts between various SP, DP, INT vector types. Note that these do no
+ conversion of values, they just change the type. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_castpd_ps(__m128d __A)
+{
+ return (__m128) __A;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_castpd_si128(__m128d __A)
+{
+ return (__m128i) __A;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_castps_pd(__m128 __A)
+{
+ return (__m128d) __A;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_castps_si128(__m128 __A)
+{
+ return (__m128i) __A;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_castsi128_ps(__m128i __A)
+{
+ return (__m128) __A;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_castsi128_pd(__m128i __A)
+{
+ return (__m128d) __A;
+}
+/* APPLE LOCAL end radar 4152603 */
+
+/* APPLE LOCAL begin 4220129, 4286110 */
+/* Set all of the elements of the vector to A. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_epi64x (long long __A)
+{
+ return _mm_set_epi64x (__A, __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_epi64 (__m64 __A)
+{
+ return _mm_set_epi64 (__A, __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_epi32 (int __A)
+{
+ return _mm_set_epi32 (__A, __A, __A, __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_epi16 (short __A)
+{
+ __m128i temp, temp2, temp3;
+ temp = _mm_cvtsi32_si128((int)__A);
+ temp2 = _mm_unpacklo_epi16(temp, temp);
+ temp3 = _mm_shuffle_epi32(temp2, 0);
+ return temp3;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_epi8 (char __A)
+{
+ __m128i temp, temp2, temp3, temp4;
+ temp = _mm_cvtsi32_si128 ((int)__A);
+ temp2 = _mm_unpacklo_epi8 (temp, temp);
+ temp3 = _mm_unpacklo_epi8 (temp2, temp2);
+ temp4 = _mm_shuffle_epi32 (temp3, 0);
+ return temp4;
+}
+/* APPLE LOCAL end 4220129, 4286110 */
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+#endif /* __SSE2__ */
+
+#endif /* _EMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/gmm_malloc.h b/gcc-4.2.1-5666.3/gcc/config/i386/gmm_malloc.h
new file mode 100644
index 000000000..20d7f5e04
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/gmm_malloc.h
@@ -0,0 +1,77 @@
+/* Copyright (C) 2004 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+#ifndef _MM_MALLOC_H_INCLUDED
+#define _MM_MALLOC_H_INCLUDED
+
+#include <stdlib.h>
+#include <errno.h>
+
+static __inline__ void*
+_mm_malloc (size_t size, size_t align)
+{
+ void * malloc_ptr;
+ void * aligned_ptr;
+
+ /* Error if align is not a power of two. */
+ if (align & (align - 1))
+ {
+ errno = EINVAL;
+ return ((void*) 0);
+ }
+
+ if (size == 0)
+ return ((void *) 0);
+
+ /* Assume malloc'd pointer is aligned at least to sizeof (void*).
+ If necessary, add another sizeof (void*) to store the value
+ returned by malloc. Effectively this enforces a minimum alignment
+ of sizeof double. */
+ if (align < 2 * sizeof (void *))
+ align = 2 * sizeof (void *);
+
+ malloc_ptr = malloc (size + align);
+ if (!malloc_ptr)
+ return ((void *) 0);
+
+ /* Align We have at least sizeof (void *) space below malloc'd ptr. */
+ aligned_ptr = (void *) (((size_t) malloc_ptr + align)
+ & ~((size_t) (align) - 1));
+
+ /* Store the original pointer just before p. */
+ ((void **) aligned_ptr) [-1] = malloc_ptr;
+
+ return aligned_ptr;
+}
+
+static __inline__ void
+_mm_free (void * aligned_ptr)
+{
+ if (aligned_ptr)
+ free (((void **) aligned_ptr) [-1]);
+}
+
+#endif /* _MM_MALLOC_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/gstabs.h b/gcc-4.2.1-5666.3/gcc/config/i386/gstabs.h
new file mode 100644
index 000000000..e9a621871
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/gstabs.h
@@ -0,0 +1,7 @@
+/* We do not want to output SDB debugging information. */
+
+#undef SDB_DEBUGGING_INFO
+
+/* We want to output DBX debugging information. */
+
+#define DBX_DEBUGGING_INFO 1
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/host-i386-darwin.c b/gcc-4.2.1-5666.3/gcc/config/i386/host-i386-darwin.c
new file mode 100644
index 000000000..3025bdae6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/host-i386-darwin.c
@@ -0,0 +1,31 @@
+/* i386-darwin host-specific hook definitions.
+ Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "config/host-darwin.h"
+
+/* Darwin doesn't do anything special for x86 hosts; this file exists just
+ to include config/host-darwin.h. */
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/i386-modes.def b/gcc-4.2.1-5666.3/gcc/config/i386/i386-modes.def
new file mode 100644
index 000000000..3cb4cb1b8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/i386-modes.def
@@ -0,0 +1,97 @@
+/* Definitions of target machine for GCC for IA-32.
+ Copyright (C) 2002, 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* The x86_64 ABI specifies both XF and TF modes.
+ XFmode is __float80 is IEEE extended; TFmode is __float128
+ is IEEE quad. */
+
+FRACTIONAL_FLOAT_MODE (XF, 80, 12, ieee_extended_intel_96_format);
+FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* In ILP32 mode, XFmode has size 12 and alignment 4.
+ In LP64 mode, XFmode has size and alignment 16. */
+ADJUST_FLOAT_FORMAT (XF, (TARGET_128BIT_LONG_DOUBLE
+ ? &ieee_extended_intel_128_format
+ : TARGET_96_ROUND_53_LONG_DOUBLE
+ ? &ieee_extended_intel_96_round_53_format
+ : &ieee_extended_intel_96_format));
+ADJUST_BYTESIZE (XF, TARGET_128BIT_LONG_DOUBLE ? 16 : 12);
+ADJUST_ALIGNMENT (XF, TARGET_128BIT_LONG_DOUBLE ? 16 : 4);
+
+/* Add any extra modes needed to represent the condition code.
+
+ For the i386, we need separate modes when floating-point
+ equality comparisons are being done.
+
+ Add CCNO to indicate comparisons against zero that requires
+ Overflow flag to be unset. Sign bit test is used instead and
+ thus can be used to form "a&b>0" type of tests.
+
+ Add CCGC to indicate comparisons against zero that allows
+ unspecified garbage in the Carry flag. This mode is used
+ by inc/dec instructions.
+
+ Add CCGOC to indicate comparisons against zero that allows
+ unspecified garbage in the Carry and Overflow flag. This
+ mode is used to simulate comparisons of (a-b) and (a+b)
+ against zero using sub/cmp/add operations.
+
+ APPLE LOCAL begin 5612787 mainline sse4
+ Add CCA to indicate that only the Above flag is valid.
+ Add CCC to indicate that only the Carry flag is valid.
+ Add CCO to indicate that only the Overflow flag is valid.
+ Add CCS to indicate that only the Sign flag is valid.
+ APPLE LOCAL end 5612787 mainline sse4
+ Add CCZ to indicate that only the Zero flag is valid. */
+
+CC_MODE (CCGC);
+CC_MODE (CCGOC);
+CC_MODE (CCNO);
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+CC_MODE (CCA);
+CC_MODE (CCC);
+CC_MODE (CCO);
+CC_MODE (CCS);
+/* APPLE LOCAL end 5612787 mainline sse4 */
+CC_MODE (CCZ);
+CC_MODE (CCFP);
+CC_MODE (CCFPU);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 4); /* V4QI V2HI */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
+/* APPLE LOCAL 5612787 mainline sse4 */
+VECTOR_MODE (INT, QI, 2); /* V2QI */
+VECTOR_MODE (INT, DI, 4); /* V4DI */
+VECTOR_MODE (INT, SI, 8); /* V8SI */
+VECTOR_MODE (INT, HI, 16); /* V16HI */
+VECTOR_MODE (INT, QI, 32); /* V32QI */
+VECTOR_MODE (FLOAT, DF, 4); /* V4DF */
+VECTOR_MODE (FLOAT, SF, 8); /* V8SF */
+/* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+VECTOR_MODE (INT, DI, 1); /* V1DI (__mm64) */
+VECTOR_MODE (INT, SI, 1); /* V1SI */
+/* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+/* The symbol Pmode stands for one of the above machine modes (usually SImode).
+ The tm.h file specifies which one. It is not a distinct mode. */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/i386-protos.h b/gcc-4.2.1-5666.3/gcc/config/i386/i386-protos.h
new file mode 100644
index 000000000..f92428e55
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/i386-protos.h
@@ -0,0 +1,261 @@
+/* Definitions of target machine for GCC for IA-32.
+ Copyright (C) 1988, 1992, 1994, 1995, 1996, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Functions in i386.c */
+extern void override_options (void);
+extern void optimization_options (int, int);
+
+extern int ix86_can_use_return_insn_p (void);
+extern int ix86_frame_pointer_required (void);
+extern void ix86_setup_frame_addresses (void);
+
+extern void ix86_file_end (void);
+extern HOST_WIDE_INT ix86_initial_elimination_offset (int, int);
+extern void ix86_expand_prologue (void);
+extern void ix86_expand_epilogue (int);
+
+extern void ix86_output_addr_vec_elt (FILE *, int);
+extern void ix86_output_addr_diff_elt (FILE *, int, int);
+
+#ifdef RTX_CODE
+extern int ix86_aligned_p (rtx);
+
+extern int standard_80387_constant_p (rtx);
+extern const char *standard_80387_constant_opcode (rtx);
+extern rtx standard_80387_constant_rtx (int);
+extern int standard_sse_constant_p (rtx);
+extern const char *standard_sse_constant_opcode (rtx, rtx);
+extern int symbolic_reference_mentioned_p (rtx);
+extern bool extended_reg_mentioned_p (rtx);
+extern bool x86_extended_QIreg_mentioned_p (rtx);
+extern bool x86_extended_reg_mentioned_p (rtx);
+extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
+
+extern int ix86_expand_movmem (rtx, rtx, rtx, rtx);
+extern int ix86_expand_clrmem (rtx, rtx, rtx);
+extern int ix86_expand_strlen (rtx, rtx, rtx, rtx);
+
+extern bool legitimate_constant_p (rtx);
+extern bool constant_address_p (rtx);
+extern bool legitimate_pic_operand_p (rtx);
+extern int legitimate_pic_address_disp_p (rtx);
+extern int legitimate_address_p (enum machine_mode, rtx, int);
+extern rtx legitimize_address (rtx, rtx, enum machine_mode);
+
+extern void print_reg (rtx, int, FILE*);
+extern void print_operand (FILE*, rtx, int);
+extern void print_operand_address (FILE*, rtx);
+extern bool output_addr_const_extra (FILE*, rtx);
+
+extern void split_di (rtx[], int, rtx[], rtx[]);
+extern void split_ti (rtx[], int, rtx[], rtx[]);
+
+extern const char *output_set_got (rtx, rtx);
+extern const char *output_387_binary_op (rtx, rtx*);
+extern const char *output_387_reg_move (rtx, rtx*);
+extern const char *output_fix_trunc (rtx, rtx*, int);
+extern const char *output_fp_compare (rtx, rtx*, int, int);
+
+extern void ix86_expand_clear (rtx);
+extern void ix86_expand_move (enum machine_mode, rtx[]);
+extern void ix86_expand_vector_move (enum machine_mode, rtx[]);
+extern void ix86_expand_vector_move_misalign (enum machine_mode, rtx[]);
+extern void ix86_expand_push (enum machine_mode, rtx);
+extern rtx ix86_fixup_binary_operands (enum rtx_code,
+ enum machine_mode, rtx[]);
+extern void ix86_fixup_binary_operands_no_copy (enum rtx_code,
+ enum machine_mode, rtx[]);
+extern void ix86_expand_binary_operator (enum rtx_code,
+ enum machine_mode, rtx[]);
+extern int ix86_binary_operator_ok (enum rtx_code, enum machine_mode, rtx[]);
+extern void ix86_expand_unary_operator (enum rtx_code, enum machine_mode,
+ rtx[]);
+/* APPLE LOCAL begin 4176531 4424891 */
+extern const char *ix86_expand_convert_uns_DF2SI_sse (rtx *);
+extern const char *ix86_expand_convert_uns_SF2SI_sse (rtx *);
+extern const char *ix86_expand_convert_uns_DI2DF_sse (rtx *);
+extern const char *ix86_expand_convert_uns_SI2DF_sse (rtx *);
+extern const char *ix86_expand_convert_sign_DI2DF_sse (rtx *);
+/* APPLE LOCAL end 4176531 4424891 */
+extern rtx ix86_build_signbit_mask (enum machine_mode, bool, bool);
+extern void ix86_expand_fp_absneg_operator (enum rtx_code, enum machine_mode,
+ rtx[]);
+extern void ix86_expand_copysign (rtx []);
+extern void ix86_split_copysign_const (rtx []);
+extern void ix86_split_copysign_var (rtx []);
+extern int ix86_unary_operator_ok (enum rtx_code, enum machine_mode, rtx[]);
+extern int ix86_match_ccmode (rtx, enum machine_mode);
+extern rtx ix86_expand_compare (enum rtx_code, rtx *, rtx *);
+extern int ix86_use_fcomi_compare (enum rtx_code);
+extern void ix86_expand_branch (enum rtx_code, rtx);
+extern int ix86_expand_setcc (enum rtx_code, rtx);
+extern int ix86_expand_int_movcc (rtx[]);
+extern int ix86_expand_fp_movcc (rtx[]);
+extern bool ix86_expand_fp_vcond (rtx[]);
+extern bool ix86_expand_int_vcond (rtx[]);
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+extern void ix86_expand_sse_unpack (rtx[], bool, bool);
+extern void ix86_expand_sse4_unpack (rtx[], bool, bool);
+/* APPLE LOCAL end 5612787 mainline sse4 */
+extern int ix86_expand_int_addcc (rtx[]);
+extern void ix86_expand_call (rtx, rtx, rtx, rtx, rtx, int);
+extern void x86_initialize_trampoline (rtx, rtx, rtx);
+extern rtx ix86_zero_extend_to_Pmode (rtx);
+extern void ix86_split_long_move (rtx[]);
+extern void ix86_split_ashl (rtx *, rtx, enum machine_mode);
+extern void ix86_split_ashr (rtx *, rtx, enum machine_mode);
+extern void ix86_split_lshr (rtx *, rtx, enum machine_mode);
+extern rtx ix86_find_base_term (rtx);
+extern int ix86_check_movabs (rtx, int);
+
+extern rtx assign_386_stack_local (enum machine_mode, enum ix86_stack_slot);
+extern int ix86_attr_length_immediate_default (rtx, int);
+extern int ix86_attr_length_address_default (rtx);
+
+extern enum machine_mode ix86_fp_compare_mode (enum rtx_code);
+
+extern rtx ix86_libcall_value (enum machine_mode);
+extern bool ix86_function_value_regno_p (int);
+extern bool ix86_function_arg_regno_p (int);
+extern int ix86_function_arg_boundary (enum machine_mode, tree);
+extern int ix86_return_in_memory (tree);
+/* APPLE LOCAL radar 4781080 */
+extern bool ix86_objc_fpreturn_msgcall (tree, bool);
+extern void ix86_va_start (tree, rtx);
+extern rtx ix86_va_arg (tree, tree);
+
+extern rtx ix86_force_to_memory (enum machine_mode, rtx);
+extern void ix86_free_from_memory (enum machine_mode);
+extern void ix86_split_fp_branch (enum rtx_code code, rtx, rtx,
+ rtx, rtx, rtx, rtx);
+extern bool ix86_hard_regno_mode_ok (int, enum machine_mode);
+extern bool ix86_modes_tieable_p (enum machine_mode, enum machine_mode);
+extern int ix86_register_move_cost (enum machine_mode, enum reg_class,
+ enum reg_class);
+extern int ix86_secondary_memory_needed (enum reg_class, enum reg_class,
+ enum machine_mode, int);
+extern bool ix86_cannot_change_mode_class (enum machine_mode,
+ enum machine_mode, enum reg_class);
+extern enum reg_class ix86_preferred_reload_class (rtx, enum reg_class);
+extern enum reg_class ix86_preferred_output_reload_class (rtx, enum reg_class);
+extern int ix86_memory_move_cost (enum machine_mode, enum reg_class, int);
+extern int ix86_mode_needed (int, rtx);
+extern void emit_i387_cw_initialization (int);
+extern bool ix86_fp_jump_nontrivial_p (enum rtx_code);
+extern void x86_order_regs_for_local_alloc (void);
+extern void x86_function_profiler (FILE *, int);
+extern void x86_emit_floatuns (rtx [2]);
+extern void ix86_emit_fp_unordered_jump (rtx);
+
+extern void ix86_emit_i387_log1p (rtx, rtx);
+
+extern enum rtx_code ix86_reverse_condition (enum rtx_code, enum machine_mode);
+
+#ifdef TREE_CODE
+extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree);
+extern rtx function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
+extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int);
+extern rtx ix86_function_value (tree, tree, bool);
+#endif
+
+#endif
+
+#ifdef TREE_CODE
+extern int ix86_return_pops_args (tree, tree, int);
+
+extern int ix86_data_alignment (tree, int);
+extern int ix86_local_alignment (tree, int);
+extern int ix86_constant_alignment (tree, int);
+extern tree ix86_handle_shared_attribute (tree *, tree, tree, int, bool *);
+extern tree ix86_handle_selectany_attribute (tree *, tree, tree, int, bool *);
+
+extern unsigned int i386_pe_section_type_flags (tree, const char *, int);
+extern void i386_pe_asm_named_section (const char *, unsigned int, tree);
+extern int x86_field_alignment (tree, int);
+#endif
+
+extern rtx ix86_tls_get_addr (void);
+extern rtx ix86_tls_module_base (void);
+
+extern void ix86_expand_vector_init (bool, rtx, rtx);
+extern void ix86_expand_vector_set (bool, rtx, rtx, int);
+extern void ix86_expand_vector_extract (bool, rtx, rtx, int);
+extern void ix86_expand_reduc_v4sf (rtx (*)(rtx, rtx, rtx), rtx, rtx);
+
+/* In winnt.c */
+extern int i386_pe_dllexport_name_p (const char *);
+extern int i386_pe_dllimport_name_p (const char *);
+extern void i386_pe_unique_section (tree, int);
+extern void i386_pe_declare_function_type (FILE *, const char *, int);
+extern void i386_pe_record_external_function (tree, const char *);
+extern void i386_pe_record_exported_symbol (const char *, int);
+extern void i386_pe_asm_file_end (FILE *);
+extern void i386_pe_encode_section_info (tree, rtx, int);
+extern const char *i386_pe_strip_name_encoding (const char *);
+extern const char *i386_pe_strip_name_encoding_full (const char *);
+extern void i386_pe_output_labelref (FILE *, const char *);
+extern bool i386_pe_valid_dllimport_attribute_p (tree);
+
+/* In winnt-cxx.c and winnt-stubs.c */
+extern void i386_pe_adjust_class_at_definition (tree);
+extern bool i386_pe_type_dllimport_p (tree);
+extern bool i386_pe_type_dllexport_p (tree);
+
+extern rtx maybe_get_pool_constant (rtx);
+
+extern char internal_label_prefix[16];
+extern int internal_label_prefix_len;
+
+enum ix86_address_seg { SEG_DEFAULT, SEG_FS, SEG_GS };
+struct ix86_address
+{
+ rtx base, index, disp;
+ HOST_WIDE_INT scale;
+ enum ix86_address_seg seg;
+};
+
+extern int ix86_decompose_address (rtx, struct ix86_address *);
+extern int memory_address_length (rtx addr);
+extern void x86_output_aligned_bss (FILE *, tree, const char *,
+ unsigned HOST_WIDE_INT, int);
+extern void x86_elf_aligned_common (FILE *, const char *,
+ unsigned HOST_WIDE_INT, int);
+
+#ifdef RTX_CODE
+extern void ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *,
+ enum rtx_code *, enum rtx_code *);
+extern enum rtx_code ix86_fp_compare_code_to_integer (enum rtx_code);
+#endif
+
+/* APPLE LOCAL begin CW asm blocks */
+extern const char *i386_iasm_register_name (const char *regname, char *buf);
+extern bool iasm_x86_needs_swapping (const char *);
+extern bool iasm_print_op (char *buf, tree arg, unsigned argnum, tree *uses,
+ bool must_be_reg, bool must_not_be_reg, void *);
+extern void iasm_x86_print_prefix (char *buf, tree prefix_list);
+extern tree iasm_raise_reg (tree);
+/* APPLE LOCAL end CW asm blocks */
+
+/* APPLE LOCAL 3399553 */
+extern void ix86_expand_flt_rounds (rtx);
+extern int asm_preferred_eh_data_format (int, int);
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/i386.c b/gcc-4.2.1-5666.3/gcc/config/i386/i386.c
new file mode 100644
index 000000000..0e212967a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/i386.c
@@ -0,0 +1,23515 @@
+/* Subroutines used for code generation on IA-32.
+ Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+ 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "tm_p.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-codes.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "except.h"
+#include "function.h"
+#include "recog.h"
+#include "expr.h"
+#include "optabs.h"
+#include "toplev.h"
+#include "basic-block.h"
+#include "ggc.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "cgraph.h"
+#include "tree-gimple.h"
+#include "dwarf2.h"
+#include "tm-constrs.h"
+
+/* APPLE LOCAL begin pascal strings */
+#include "../../libcpp/internal.h"
+extern struct cpp_reader* parse_in;
+/* APPLE LOCAL end pascal strings */
+/* APPLE LOCAL begin regparmandstackparm */
+#include "integrate.h"
+#include "tree-inline.h"
+#include "splay-tree.h"
+#include "tree-pass.h"
+#include "c-tree.h"
+#include "c-common.h"
+/* APPLE LOCAL end regparmandstackparm */
+/* APPLE LOCAL begin dwarf call/pop 5221468 */
+#include "debug.h"
+#include "dwarf2out.h"
+/* APPLE LOCAL end dwarf call/pop 5221468 */
+
+#ifndef CHECK_STACK_LIMIT
+#define CHECK_STACK_LIMIT (-1)
+#endif
+
+/* Return index of given mode in mult and division cost tables. */
+#define MODE_INDEX(mode) \
+ ((mode) == QImode ? 0 \
+ : (mode) == HImode ? 1 \
+ : (mode) == SImode ? 2 \
+ : (mode) == DImode ? 3 \
+ : 4)
+
+/* Processor costs (relative to an add) */
+/* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
+#define COSTS_N_BYTES(N) ((N) * 2)
+
+static const
+struct processor_costs size_cost = { /* costs for tuning for size */
+ COSTS_N_BYTES (2), /* cost of an add instruction */
+ COSTS_N_BYTES (3), /* cost of a lea instruction */
+ COSTS_N_BYTES (2), /* variable shift costs */
+ COSTS_N_BYTES (3), /* constant shift costs */
+ {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
+ COSTS_N_BYTES (3), /* HI */
+ COSTS_N_BYTES (3), /* SI */
+ COSTS_N_BYTES (3), /* DI */
+ COSTS_N_BYTES (5)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
+ COSTS_N_BYTES (3), /* HI */
+ COSTS_N_BYTES (3), /* SI */
+ COSTS_N_BYTES (3), /* DI */
+ COSTS_N_BYTES (5)}, /* other */
+ COSTS_N_BYTES (3), /* cost of movsx */
+ COSTS_N_BYTES (3), /* cost of movzx */
+ 0, /* "large" insn */
+ 2, /* MOVE_RATIO */
+ 2, /* cost for loading QImode using movzbl */
+ {2, 2, 2}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 2, 2}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {2, 2, 2}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {2, 2, 2}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 3, /* cost of moving MMX register */
+ {3, 3}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {3, 3}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 3, /* cost of moving SSE register */
+ {3, 3, 3}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {3, 3, 3}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 3, /* MMX or SSE register to integer */
+ 0, /* size of prefetch block */
+ 0, /* number of parallel prefetches */
+ 2, /* Branch cost */
+ COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
+ COSTS_N_BYTES (2), /* cost of FMUL instruction. */
+ COSTS_N_BYTES (2), /* cost of FDIV instruction. */
+ COSTS_N_BYTES (2), /* cost of FABS instruction. */
+ COSTS_N_BYTES (2), /* cost of FCHS instruction. */
+ COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
+};
+
+/* Processor costs (relative to an add) */
+static const
+struct processor_costs i386_cost = { /* 386 specific costs */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (3), /* variable shift costs */
+ COSTS_N_INSNS (2), /* constant shift costs */
+ {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (6), /* HI */
+ COSTS_N_INSNS (6), /* SI */
+ COSTS_N_INSNS (6), /* DI */
+ COSTS_N_INSNS (6)}, /* other */
+ COSTS_N_INSNS (1), /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (23), /* HI */
+ COSTS_N_INSNS (23), /* SI */
+ COSTS_N_INSNS (23), /* DI */
+ COSTS_N_INSNS (23)}, /* other */
+ COSTS_N_INSNS (3), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
+ 15, /* "large" insn */
+ 3, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {2, 4, 2}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 4, 2}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {8, 8, 8}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {8, 8, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {4, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {4, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {4, 8, 16}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {4, 8, 16}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 3, /* MMX or SSE register to integer */
+ 0, /* size of prefetch block */
+ 0, /* number of parallel prefetches */
+ 1, /* Branch cost */
+ COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (27), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (88), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (22), /* cost of FABS instruction. */
+ COSTS_N_INSNS (24), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs i486_cost = { /* 486 specific costs */
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (3), /* variable shift costs */
+ COSTS_N_INSNS (2), /* constant shift costs */
+ {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (12), /* HI */
+ COSTS_N_INSNS (12), /* SI */
+ COSTS_N_INSNS (12), /* DI */
+ COSTS_N_INSNS (12)}, /* other */
+ 1, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (40), /* HI */
+ COSTS_N_INSNS (40), /* SI */
+ COSTS_N_INSNS (40), /* DI */
+ COSTS_N_INSNS (40)}, /* other */
+ COSTS_N_INSNS (3), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
+ 15, /* "large" insn */
+ 3, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {2, 4, 2}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 4, 2}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {8, 8, 8}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {8, 8, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {4, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {4, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {4, 8, 16}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {4, 8, 16}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 3, /* MMX or SSE register to integer */
+ 0, /* size of prefetch block */
+ 0, /* number of parallel prefetches */
+ 1, /* Branch cost */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (16), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (73), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (3), /* cost of FABS instruction. */
+ COSTS_N_INSNS (3), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs pentium_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (4), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (11), /* HI */
+ COSTS_N_INSNS (11), /* SI */
+ COSTS_N_INSNS (11), /* DI */
+ COSTS_N_INSNS (11)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (25), /* HI */
+ COSTS_N_INSNS (25), /* SI */
+ COSTS_N_INSNS (25), /* DI */
+ COSTS_N_INSNS (25)}, /* other */
+ COSTS_N_INSNS (3), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
+ 8, /* "large" insn */
+ 6, /* MOVE_RATIO */
+ 6, /* cost for loading QImode using movzbl */
+ {2, 4, 2}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 4, 2}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {2, 2, 6}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {4, 4, 6}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 8, /* cost of moving MMX register */
+ {8, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {8, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {4, 8, 16}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {4, 8, 16}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 3, /* MMX or SSE register to integer */
+ 0, /* size of prefetch block */
+ 0, /* number of parallel prefetches */
+ 2, /* Branch cost */
+ COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (3), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (39), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (1), /* cost of FABS instruction. */
+ COSTS_N_INSNS (1), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs pentiumpro_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (4), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (4)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (17), /* HI */
+ COSTS_N_INSNS (17), /* SI */
+ COSTS_N_INSNS (17), /* DI */
+ COSTS_N_INSNS (17)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 6, /* MOVE_RATIO */
+ 2, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 2, 2}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {2, 2, 6}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {4, 4, 6}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {2, 2}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {2, 2}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {2, 2, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {2, 2, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 3, /* MMX or SSE register to integer */
+ 32, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 2, /* Branch cost */
+ COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (5), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (56), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs k6_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (2), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (3), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (3), /* DI */
+ COSTS_N_INSNS (3)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (18), /* HI */
+ COSTS_N_INSNS (18), /* SI */
+ COSTS_N_INSNS (18), /* DI */
+ COSTS_N_INSNS (18)}, /* other */
+ COSTS_N_INSNS (2), /* cost of movsx */
+ COSTS_N_INSNS (2), /* cost of movzx */
+ 8, /* "large" insn */
+ 4, /* MOVE_RATIO */
+ 3, /* cost for loading QImode using movzbl */
+ {4, 5, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 3, 2}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {6, 6, 6}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {4, 4, 4}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {2, 2}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {2, 2}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {2, 2, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {2, 2, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 6, /* MMX or SSE register to integer */
+ 32, /* size of prefetch block */
+ 1, /* number of parallel prefetches */
+ 1, /* Branch cost */
+ COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (2), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (56), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs athlon_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (2), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (5), /* HI */
+ COSTS_N_INSNS (5), /* SI */
+ COSTS_N_INSNS (5), /* DI */
+ COSTS_N_INSNS (5)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 9, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {3, 4, 3}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {3, 4, 3}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {4, 4, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {4, 4}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {4, 4}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {4, 4, 6}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {4, 4, 5}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 5, /* Branch cost */
+ COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (4), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (24), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs k8_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (2), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (5)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 9, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {3, 4, 3}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {3, 4, 3}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {4, 4, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {3, 3}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {4, 4}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {4, 3, 6}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {4, 4, 5}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 5, /* Branch cost */
+ COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (4), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (19), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs pentium4_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (3), /* cost of a lea instruction */
+ COSTS_N_INSNS (4), /* variable shift costs */
+ COSTS_N_INSNS (4), /* constant shift costs */
+ {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (15), /* HI */
+ COSTS_N_INSNS (15), /* SI */
+ COSTS_N_INSNS (15), /* DI */
+ COSTS_N_INSNS (15)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (56), /* HI */
+ COSTS_N_INSNS (56), /* SI */
+ COSTS_N_INSNS (56), /* DI */
+ COSTS_N_INSNS (56)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 16, /* "large" insn */
+ 6, /* MOVE_RATIO */
+ 2, /* cost for loading QImode using movzbl */
+ {4, 5, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {2, 3, 2}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {2, 2, 6}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {4, 4, 6}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {2, 2}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {2, 2}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 12, /* cost of moving SSE register */
+ {12, 12, 12}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {2, 2, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 10, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 2, /* Branch cost */
+ COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (7), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (43), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (2), /* cost of FABS instruction. */
+ COSTS_N_INSNS (2), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
+};
+
+static const
+struct processor_costs nocona_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1), /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (10), /* HI */
+ COSTS_N_INSNS (10), /* SI */
+ COSTS_N_INSNS (10), /* DI */
+ COSTS_N_INSNS (10)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (66), /* HI */
+ COSTS_N_INSNS (66), /* SI */
+ COSTS_N_INSNS (66), /* DI */
+ COSTS_N_INSNS (66)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 16, /* "large" insn */
+ 17, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 3, /* cost of reg,reg fld/fst */
+ {12, 12, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {4, 4, 4}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 6, /* cost of moving MMX register */
+ {12, 12}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {12, 12}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 6, /* cost of moving SSE register */
+ {12, 12, 12}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {12, 12, 12}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 8, /* MMX or SSE register to integer */
+ 128, /* size of prefetch block */
+ 8, /* number of parallel prefetches */
+ 1, /* Branch cost */
+ COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (40), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (3), /* cost of FABS instruction. */
+ COSTS_N_INSNS (3), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
+};
+/* APPLE LOCAL begin mainline */
+static const
+struct processor_costs core2_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (3), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (3), /* DI */
+ COSTS_N_INSNS (3)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (22), /* HI */
+ COSTS_N_INSNS (22), /* SI */
+ COSTS_N_INSNS (22), /* DI */
+ COSTS_N_INSNS (22)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 16, /* MOVE_RATIO */
+ 2, /* cost for loading QImode using movzbl */
+ {6, 6, 6}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 2, /* cost of reg,reg fld/fst */
+ {6, 6, 6}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {4, 4, 4}, /* cost of loading integer registers */
+ 2, /* cost of moving MMX register */
+ {6, 6}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {4, 4}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {6, 6, 6}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {4, 4, 4}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 2, /* MMX or SSE register to integer */
+ 128, /* size of prefetch block */
+ 8, /* number of parallel prefetches */
+ 3, /* Branch cost */
+ COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (5), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (32), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (1), /* cost of FABS instruction. */
+ COSTS_N_INSNS (1), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
+};
+/* APPLE LOCAL end mainline */
+/* Generic64 should produce code tuned for Nocona and K8. */
+static const
+struct processor_costs generic64_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ /* On all chips taken into consideration lea is 2 cycles and more. With
+ this cost however our current implementation of synth_mult results in
+ use of unnecessary temporary registers causing regression on several
+ SPECfp benchmarks. */
+ COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (2)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 17, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {12, 12, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {8, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {8, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {8, 8, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {8, 8, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
+ is increased to perhaps more appropriate value of 5. */
+ 3, /* Branch cost */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (20), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (8), /* cost of FABS instruction. */
+ COSTS_N_INSNS (8), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
+};
+
+/* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
+static const
+struct processor_costs generic32_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (2)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 17, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {12, 12, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {8, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {8, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {8, 8, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {8, 8, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 3, /* Branch cost */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (20), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (8), /* cost of FABS instruction. */
+ COSTS_N_INSNS (8), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
+};
+
+const struct processor_costs *ix86_cost = &pentium_cost;
+
+/* Processor feature/optimization bitmasks. */
+#define m_386 (1<<PROCESSOR_I386)
+#define m_486 (1<<PROCESSOR_I486)
+#define m_PENT (1<<PROCESSOR_PENTIUM)
+#define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
+#define m_K6 (1<<PROCESSOR_K6)
+#define m_ATHLON (1<<PROCESSOR_ATHLON)
+#define m_PENT4 (1<<PROCESSOR_PENTIUM4)
+#define m_K8 (1<<PROCESSOR_K8)
+#define m_ATHLON_K8 (m_K8 | m_ATHLON)
+#define m_NOCONA (1<<PROCESSOR_NOCONA)
+/* APPLE LOCAL mainline */
+#define m_CORE2 (1<<PROCESSOR_CORE2)
+#define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
+#define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
+#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
+
+/* Generic instruction choice should be common subset of supported CPUs
+ (PPro/PENT4/NOCONA/Athlon/K8). */
+
+/* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
+ Generic64 seems like good code size tradeoff. We can't enable it for 32bit
+ generic because it is not working well with PPro base chips. */
+/* APPLE LOCAL begin mainline */
+const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
+const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_zero_extend_with_and = m_486 | m_PENT;
+const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC /* m_386 | m_K6 */;
+const int x86_double_with_add = ~m_386;
+const int x86_use_bit_test = m_386;
+const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
+const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
+const int x86_3dnow_a = m_ATHLON_K8;
+/* APPLE LOCAL end mainline */
+const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
+/* Branch hints were put in P4 based on simulation result. But
+ after P4 was made, no performance benefit was observed with
+ branch hints. It also increases the code size. As the result,
+ icc never generates branch hints. */
+const int x86_branch_hints = 0;
+const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
+/* We probably ought to watch for partial register stalls on Generic32
+ compilation setting as well. However in current implementation the
+ partial register stalls are not eliminated very well - they can
+ be introduced via subregs synthesized by combine and can happen
+ in caller/callee saving sequences.
+ Because this option pays back little on PPro based chips and is in conflict
+ with partial reg. dependencies used by Athlon/P4 based chips, it is better
+ to leave it off for generic32 for now. */
+const int x86_partial_reg_stall = m_PPRO;
+/* APPLE LOCAL begin mainline */
+const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
+const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
+const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
+const int x86_use_mov0 = m_K6;
+const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
+const int x86_read_modify_write = ~m_PENT;
+const int x86_read_modify = ~(m_PENT | m_PPRO);
+const int x86_split_long_moves = m_PPRO;
+const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
+const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
+const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
+const int x86_qimode_math = ~(0);
+const int x86_promote_qi_regs = 0;
+/* On PPro this flag is meant to avoid partial register stalls. Just like
+ the x86_partial_reg_stall this option might be considered for Generic32
+ if our scheme for avoiding partial stalls was more effective. */
+const int x86_himode_math = ~(m_PPRO);
+const int x86_promote_hi_regs = m_PPRO;
+const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 |m_GENERIC;
+const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC);
+const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
+const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
+const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
+const int x86_shift1 = ~m_486;
+const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+/* In Generic model we have an conflict here in between PPro/Pentium4 based chips
+ that thread 128bit SSE registers as single units versus K8 based chips that
+ divide SSE registers to two 64bit halves.
+ x86_sse_partial_reg_dependency promote all store destinations to be 128bit
+ to allow register renaming on 128bit SSE units, but usually results in one
+ extra microop on 64bit SSE units. Experimental results shows that disabling
+ this option on P4 brings over 20% SPECfp regression, while enabling it on
+ K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
+ of moves. */
+const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
+/* Set for machines where the type and dependencies are resolved on SSE
+ register parts instead of whole registers, so we may maintain just
+ lower part of scalar values in proper format leaving the upper part
+ undefined. */
+const int x86_sse_split_regs = m_ATHLON_K8;
+const int x86_sse_typeless_stores = m_ATHLON_K8;
+const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
+const int x86_use_ffreep = m_ATHLON_K8;
+const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
+const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_GENERIC);
+
+/* ??? Allowing interunit moves makes it all too easy for the compiler to put
+ integer data in xmm registers. Which results in pretty abysmal code. */
+/* APPLE LOCAL 5612787 mainline sse4 */
+const int x86_inter_unit_moves = ~(m_ATHLON_K8);
+
+const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_CORE2 | m_PPRO | m_GENERIC32;
+/* Some CPU cores are not able to predict more than 4 branch instructions in
+ the 16 byte window. */
+const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
+const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT | m_CORE2 | m_GENERIC;
+const int x86_use_bt = m_ATHLON_K8;
+/* APPLE LOCAL begin */
+/* See comment in darwin override options for what needs fixing.
+ Most of this code has been rewritten in mainline anyhow.
+ All we've done here is remove the const since we assign to
+ them in SUBTARGET_OVERRIDE_OPTIONS. */
+/* Compare and exchange was added for 80486. */
+int x86_cmpxchg = ~m_386;
+/* Compare and exchange 8 bytes was added for pentium. */
+int x86_cmpxchg8b = ~(m_386 | m_486);
+/* Compare and exchange 16 bytes was added for nocona. */
+/* APPLE LOCAL mainline */
+int x86_cmpxchg16b = m_NOCONA | m_CORE2;
+/* Exchange and add was added for 80486. */
+int x86_xadd = ~m_386;
+/* APPLE LOCAL begin mainline bswap */
+/* Byteswap was added for 80486. */
+int x86_bswap = ~m_386;
+/* APPLE LOCAL end mainline bswap */
+/* APPLE LOCAL end */
+const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
+/* APPLE LOCAL end mainline */
+
+/* In case the average insn count for single function invocation is
+ lower than this constant, emit fast (but longer) prologue and
+ epilogue code. */
+#define FAST_PROLOGUE_INSN_COUNT 20
+
+/* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
+static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
+static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
+static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
+
+/* Array of the smallest class containing reg number REGNO, indexed by
+ REGNO. Used by REGNO_REG_CLASS in i386.h. */
+
+enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
+{
+ /* ax, dx, cx, bx */
+ AREG, DREG, CREG, BREG,
+ /* si, di, bp, sp */
+ SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
+ /* FP registers */
+ FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
+ FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
+ /* arg pointer */
+ NON_Q_REGS,
+ /* flags, fpsr, dirflag, frame */
+ NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
+ SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
+ SSE_REGS, SSE_REGS,
+ MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
+ MMX_REGS, MMX_REGS,
+ NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
+ NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
+ SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
+ SSE_REGS, SSE_REGS,
+};
+
+/* The "default" register map used in 32bit mode. */
+
+int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
+{
+ 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
+ 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
+ -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
+ 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
+ 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
+};
+
+static int const x86_64_int_parameter_registers[6] =
+{
+ 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
+ FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
+};
+
+static int const x86_64_int_return_registers[4] =
+{
+ 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
+};
+
+/* The "default" register map used in 64bit mode. */
+int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
+{
+ 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
+ 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
+ -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
+ 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
+ 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
+ 8,9,10,11,12,13,14,15, /* extended integer registers */
+ 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
+};
+
+/* Define the register numbers to be used in Dwarf debugging information.
+ The SVR4 reference port C compiler uses the following register numbers
+ in its Dwarf output code:
+ 0 for %eax (gcc regno = 0)
+ 1 for %ecx (gcc regno = 2)
+ 2 for %edx (gcc regno = 1)
+ 3 for %ebx (gcc regno = 3)
+ 4 for %esp (gcc regno = 7)
+ 5 for %ebp (gcc regno = 6)
+ 6 for %esi (gcc regno = 4)
+ 7 for %edi (gcc regno = 5)
+ The following three DWARF register numbers are never generated by
+ the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
+ believes these numbers have these meanings.
+ 8 for %eip (no gcc equivalent)
+ 9 for %eflags (gcc regno = 17)
+ 10 for %trapno (no gcc equivalent)
+ It is not at all clear how we should number the FP stack registers
+ for the x86 architecture. If the version of SDB on x86/svr4 were
+ a bit less brain dead with respect to floating-point then we would
+ have a precedent to follow with respect to DWARF register numbers
+ for x86 FP registers, but the SDB on x86/svr4 is so completely
+ broken with respect to FP registers that it is hardly worth thinking
+ of it as something to strive for compatibility with.
+ The version of x86/svr4 SDB I have at the moment does (partially)
+ seem to believe that DWARF register number 11 is associated with
+ the x86 register %st(0), but that's about all. Higher DWARF
+ register numbers don't seem to be associated with anything in
+ particular, and even for DWARF regno 11, SDB only seems to under-
+ stand that it should say that a variable lives in %st(0) (when
+ asked via an `=' command) if we said it was in DWARF regno 11,
+ but SDB still prints garbage when asked for the value of the
+ variable in question (via a `/' command).
+ (Also note that the labels SDB prints for various FP stack regs
+ when doing an `x' command are all wrong.)
+ Note that these problems generally don't affect the native SVR4
+ C compiler because it doesn't allow the use of -O with -g and
+ because when it is *not* optimizing, it allocates a memory
+ location for each floating-point variable, and the memory
+ location is what gets described in the DWARF AT_location
+ attribute for the variable in question.
+ Regardless of the severe mental illness of the x86/svr4 SDB, we
+ do something sensible here and we use the following DWARF
+ register numbers. Note that these are all stack-top-relative
+ numbers.
+ 11 for %st(0) (gcc regno = 8)
+ 12 for %st(1) (gcc regno = 9)
+ 13 for %st(2) (gcc regno = 10)
+ 14 for %st(3) (gcc regno = 11)
+ 15 for %st(4) (gcc regno = 12)
+ 16 for %st(5) (gcc regno = 13)
+ 17 for %st(6) (gcc regno = 14)
+ 18 for %st(7) (gcc regno = 15)
+*/
+int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
+{
+ 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
+ 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
+ -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
+ 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
+ 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
+};
+
+/* Test and compare insns in i386.md store the information needed to
+ generate branch and scc insns here. */
+
+rtx ix86_compare_op0 = NULL_RTX;
+rtx ix86_compare_op1 = NULL_RTX;
+rtx ix86_compare_emitted = NULL_RTX;
+
+/* Size of the register save area. */
+#define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
+
+/* Define the structure for the machine field in struct function. */
+
+struct stack_local_entry GTY(())
+{
+ unsigned short mode;
+ unsigned short n;
+ rtx rtl;
+ struct stack_local_entry *next;
+};
+
+/* Structure describing stack frame layout.
+ Stack grows downward:
+
+ [arguments]
+ <- ARG_POINTER
+ saved pc
+
+ saved frame pointer if frame_pointer_needed
+ <- HARD_FRAME_POINTER
+ [saved regs]
+
+ [padding1] \
+ )
+ [va_arg registers] (
+ > to_allocate <- FRAME_POINTER
+ [frame] (
+ )
+ [padding2] /
+ */
+struct ix86_frame
+{
+ int nregs;
+ int padding1;
+ int va_arg_size;
+ HOST_WIDE_INT frame;
+ int padding2;
+ int outgoing_arguments_size;
+ int red_zone_size;
+
+ HOST_WIDE_INT to_allocate;
+ /* The offsets relative to ARG_POINTER. */
+ HOST_WIDE_INT frame_pointer_offset;
+ HOST_WIDE_INT hard_frame_pointer_offset;
+ HOST_WIDE_INT stack_pointer_offset;
+
+ /* When save_regs_using_mov is set, emit prologue using
+ move instead of push instructions. */
+ bool save_regs_using_mov;
+};
+
+/* Code model option. */
+enum cmodel ix86_cmodel;
+/* Asm dialect. */
+enum asm_dialect ix86_asm_dialect = ASM_ATT;
+/* TLS dialects. */
+enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
+
+/* Which unit we are generating floating point math for. */
+enum fpmath_unit ix86_fpmath;
+
+/* Which cpu are we scheduling for. */
+enum processor_type ix86_tune;
+/* Which instruction set architecture to use. */
+enum processor_type ix86_arch;
+
+/* true if sse prefetch instruction is not NOOP. */
+int x86_prefetch_sse;
+
+/* ix86_regparm_string as a number */
+static int ix86_regparm;
+
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+/* True if SSE population count insn supported. */
+int x86_popcnt;
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+/* -mstackrealign option */
+extern int ix86_force_align_arg_pointer;
+static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
+
+/* Preferred alignment for stack boundary in bits. */
+unsigned int ix86_preferred_stack_boundary;
+/* APPLE LOCAL begin radar 4216496, 4229407, 4120689, 4095567 */
+unsigned int ix86_save_preferred_stack_boundary;
+/* APPLE LOCAL end radar 4216496, 4229407, 4120689, 4095567 */
+
+/* Values 1-5: see jump.c */
+int ix86_branch_cost;
+
+/* Variables which are this size or smaller are put in the data/bss
+ or ldata/lbss sections. */
+
+int ix86_section_threshold = 65536;
+
+/* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
+char internal_label_prefix[16];
+int internal_label_prefix_len;
+
+static bool ix86_handle_option (size_t, const char *, int);
+static void output_pic_addr_const (FILE *, rtx, int);
+static void put_condition_code (enum rtx_code, enum machine_mode,
+ int, int, FILE *);
+static const char *get_some_local_dynamic_name (void);
+static int get_some_local_dynamic_name_1 (rtx *, void *);
+static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
+static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
+ rtx *);
+static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
+static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
+ enum machine_mode);
+static rtx get_thread_pointer (int);
+static rtx legitimize_tls_address (rtx, enum tls_model, int);
+static void get_pc_thunk_name (char [32], unsigned int);
+static rtx gen_push (rtx);
+static int ix86_flags_dependent (rtx, rtx, enum attr_type);
+static int ix86_agi_dependent (rtx, rtx, enum attr_type);
+static struct machine_function * ix86_init_machine_status (void);
+static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
+static int ix86_nsaved_regs (void);
+static void ix86_emit_save_regs (void);
+static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
+static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
+static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static HOST_WIDE_INT ix86_GOT_alias_set (void);
+static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
+static rtx ix86_expand_aligntest (rtx, int);
+static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
+static int ix86_issue_rate (void);
+static int ix86_adjust_cost (rtx, rtx, rtx, int);
+static int ia32_multipass_dfa_lookahead (void);
+static void ix86_init_mmx_sse_builtins (void);
+static rtx x86_this_parameter (tree);
+static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
+ HOST_WIDE_INT, tree);
+static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
+static void x86_file_start (void);
+static void ix86_reorg (void);
+static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
+static tree ix86_build_builtin_va_list (void);
+static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int *, int);
+static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool ix86_scalar_mode_supported_p (enum machine_mode);
+static bool ix86_vector_mode_supported_p (enum machine_mode);
+
+static int ix86_address_cost (rtx);
+static bool ix86_cannot_force_const_mem (rtx);
+static rtx ix86_delegitimize_address (rtx);
+
+static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
+
+struct builtin_description;
+static rtx ix86_expand_sse_comi (const struct builtin_description *,
+ tree, rtx);
+static rtx ix86_expand_sse_compare (const struct builtin_description *,
+ tree, rtx);
+static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
+static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
+static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
+static rtx ix86_expand_store_builtin (enum insn_code, tree);
+static rtx safe_vector_operand (rtx, enum machine_mode);
+static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
+static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
+static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
+static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
+static int ix86_fp_comparison_cost (enum rtx_code code);
+static unsigned int ix86_select_alt_pic_regnum (void);
+static int ix86_save_reg (unsigned int, int);
+static void ix86_compute_frame_layout (struct ix86_frame *);
+static int ix86_comp_type_attributes (tree, tree);
+static int ix86_function_regparm (tree, tree);
+const struct attribute_spec ix86_attribute_table[];
+static bool ix86_function_ok_for_sibcall (tree, tree);
+static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
+static int ix86_value_regno (enum machine_mode, tree, tree);
+static bool contains_128bit_aligned_vector_p (tree);
+static rtx ix86_struct_value_rtx (tree, int);
+static bool ix86_ms_bitfield_layout_p (tree);
+static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
+static int extended_reg_mentioned_1 (rtx *, void *);
+static bool ix86_rtx_costs (rtx, int, int, int *);
+static int min_insn_size (rtx);
+static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
+static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
+static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static void ix86_init_builtins (void);
+static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+/* APPLE LOCAL mangle_type 7105099 */
+static const char *ix86_mangle_type (tree);
+static tree ix86_stack_protect_fail (void);
+static rtx ix86_internal_arg_pointer (void);
+static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
+
+/* This function is only used on Solaris. */
+static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
+ ATTRIBUTE_UNUSED;
+
+/* Register class used for passing given 64bit part of the argument.
+ These represent classes as documented by the PS ABI, with the exception
+ of SSESF, SSEDF classes, that are basically SSE class, just gcc will
+ use SF or DFmode move instead of DImode to avoid reformatting penalties.
+
+ Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
+ whenever possible (upper half does contain padding).
+ */
+enum x86_64_reg_class
+ {
+ X86_64_NO_CLASS,
+ X86_64_INTEGER_CLASS,
+ X86_64_INTEGERSI_CLASS,
+ X86_64_SSE_CLASS,
+ X86_64_SSESF_CLASS,
+ X86_64_SSEDF_CLASS,
+ X86_64_SSEUP_CLASS,
+ X86_64_X87_CLASS,
+ X86_64_X87UP_CLASS,
+ X86_64_COMPLEX_X87_CLASS,
+ X86_64_MEMORY_CLASS
+ };
+static const char * const x86_64_reg_class_name[] = {
+ "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
+ "sseup", "x87", "x87up", "cplx87", "no"
+};
+
+#define MAX_CLASSES 4
+
+/* Table of constants used by fldpi, fldln2, etc.... */
+static REAL_VALUE_TYPE ext_80387_constants_table [5];
+static bool ext_80387_constants_init = 0;
+static void init_ext_80387_constants (void);
+static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
+static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
+static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
+static section *x86_64_elf_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align)
+ ATTRIBUTE_UNUSED;
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+# undef TARGET_MERGE_DECL_ATTRIBUTES
+# define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
+#endif
+
+#undef TARGET_COMP_TYPE_ATTRIBUTES
+#define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS ix86_init_builtins
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN ix86_expand_builtin
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
+
+#undef TARGET_ENCODE_SECTION_INFO
+#ifndef SUBTARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
+#else
+#define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
+#endif
+
+#undef TARGET_ASM_OPEN_PAREN
+#define TARGET_ASM_OPEN_PAREN ""
+#undef TARGET_ASM_CLOSE_PAREN
+#define TARGET_ASM_CLOSE_PAREN ""
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
+#ifdef ASM_QUAD
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
+#endif
+
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ ia32_multipass_dfa_lookahead
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS true
+#endif
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
+
+#undef TARGET_DELEGITIMIZE_ADDRESS
+#define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
+
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
+
+#if TARGET_MACHO
+#undef TARGET_BINDS_LOCAL_P
+#define TARGET_BINDS_LOCAL_P darwin_binds_local_p
+#endif
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START x86_file_start
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT \
+ | TARGET_64BIT_DEFAULT \
+ | TARGET_SUBTARGET_DEFAULT \
+ | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION ix86_handle_option
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS ix86_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST ix86_address_cost
+
+#undef TARGET_FIXED_CONDITION_CODE_REGS
+#define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
+#undef TARGET_CC_MODES_COMPATIBLE
+#define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
+
+#undef TARGET_MD_ASM_CLOBBERS
+#define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
+#undef TARGET_INTERNAL_ARG_POINTER
+#define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
+#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
+#define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
+#endif
+
+#ifdef SUBTARGET_INSERT_ATTRIBUTES
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
+#endif
+
+/* APPLE LOCAL begin mangle_type 7105099 */
+#undef TARGET_MANGLE_TYPE
+#define TARGET_MANGLE_TYPE ix86_mangle_type
+/* APPLE LOCAL end mangle_type 7105099 */
+
+#undef TARGET_STACK_PROTECT_FAIL
+#define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE ix86_function_value
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+
+/* The svr4 ABI for the i386 says that records and unions are returned
+ in memory. */
+#ifndef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+#endif
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
+{
+ switch (code)
+ {
+ case OPT_m3dnow:
+ if (!value)
+ {
+ target_flags &= ~MASK_3DNOW_A;
+ target_flags_explicit |= MASK_3DNOW_A;
+ }
+ return true;
+
+ case OPT_mmmx:
+ if (!value)
+ {
+ target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
+ target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
+ }
+ return true;
+
+ case OPT_msse:
+ if (!value)
+ {
+ target_flags &= ~(MASK_SSE2 | MASK_SSE3);
+ target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
+ }
+ return true;
+
+ case OPT_msse2:
+ if (!value)
+ {
+ target_flags &= ~MASK_SSE3;
+ target_flags_explicit |= MASK_SSE3;
+ }
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+/* APPLE LOCAL begin 4760857 optimization pragmas. */
+/* Hoisted so it can be used by reset_optimization_options. */
+static struct ptt
+ {
+ const struct processor_costs *cost; /* Processor costs */
+ const int target_enable; /* Target flags to enable. */
+ const int target_disable; /* Target flags to disable. */
+ const int align_loop; /* Default alignments. */
+ const int align_loop_max_skip;
+ const int align_jump;
+ const int align_jump_max_skip;
+ const int align_func;
+ }
+const processor_target_table[PROCESSOR_max] =
+ {
+ {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
+ {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
+ {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
+ {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
+ {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
+ {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
+ {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
+ {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
+ {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
+ /* APPLE LOCAL mainline */
+ {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
+ {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
+ {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
+ };
+/* APPLE LOCAL end 4760857 optimization pragmas. */
+
+/* Sometimes certain combinations of command options do not make
+ sense on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Don't use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
+
+void
+override_options (void)
+{
+ int i;
+ int ix86_tune_defaulted = 0;
+ /* APPLE LOCAL mainline */
+ int ix86_arch_specified = 0;
+
+ /* Comes from final.c -- no real reason to change it. */
+#define MAX_CODE_ALIGN 16
+
+ /* APPLE LOCAL begin 4760857 optimization pragmas. */
+ /* processor_target_table moved to file scope. */
+ /* APPLE LOCAL end 4760857 optimization pragmas. */
+
+ static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
+ static struct pta
+ {
+ const char *const name; /* processor name or nickname. */
+ const enum processor_type processor;
+ const enum pta_flags
+ {
+ PTA_SSE = 1,
+ PTA_SSE2 = 2,
+ PTA_SSE3 = 4,
+ PTA_MMX = 8,
+ PTA_PREFETCH_SSE = 16,
+ PTA_3DNOW = 32,
+ PTA_3DNOW_A = 64,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* APPLE LOCAL begin mainline */
+ PTA_64BIT = 128,
+ PTA_SSSE3 = 256,
+ /* APPLE LOCAL end mainline */
+ PTA_CX16 = 1 << 9,
+ PTA_POPCNT = 1 << 10,
+ PTA_ABM = 1 << 11,
+ PTA_SSE4A = 1 << 12,
+ PTA_NO_SAHF = 1 << 13,
+ PTA_SSE4_1 = 1 << 14,
+ PTA_SSE4_2 = 1 << 15
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ } flags;
+ }
+ const processor_alias_table[] =
+ {
+ {"i386", PROCESSOR_I386, 0},
+ {"i486", PROCESSOR_I486, 0},
+ {"i586", PROCESSOR_PENTIUM, 0},
+ {"pentium", PROCESSOR_PENTIUM, 0},
+ {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
+ {"winchip-c6", PROCESSOR_I486, PTA_MMX},
+ {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
+ {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
+ {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
+ {"i686", PROCESSOR_PENTIUMPRO, 0},
+ {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
+ {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
+ {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
+ {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
+ {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
+ {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
+ | PTA_MMX | PTA_PREFETCH_SSE},
+ {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
+ | PTA_MMX | PTA_PREFETCH_SSE},
+ {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
+ | PTA_MMX | PTA_PREFETCH_SSE},
+ {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
+ | PTA_MMX | PTA_PREFETCH_SSE},
+ /* APPLE LOCAL begin mainline */
+ {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
+ | PTA_64BIT | PTA_MMX
+ | PTA_PREFETCH_SSE},
+ /* APPLE LOCAL end mainline */
+ {"k6", PROCESSOR_K6, PTA_MMX},
+ {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
+ {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
+ {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
+ | PTA_3DNOW_A},
+ {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
+ | PTA_3DNOW | PTA_3DNOW_A},
+ {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
+ | PTA_3DNOW_A | PTA_SSE},
+ {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
+ | PTA_3DNOW_A | PTA_SSE},
+ {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
+ | PTA_3DNOW_A | PTA_SSE},
+ {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
+ | PTA_SSE | PTA_SSE2 },
+ {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
+ | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
+ {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
+ | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
+ {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
+ | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
+ {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
+ | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
+ {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
+ {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
+ };
+
+ int const pta_size = ARRAY_SIZE (processor_alias_table);
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
+ SUBSUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+ /* -fPIC is the default for x86_64. */
+ if (TARGET_MACHO && TARGET_64BIT)
+ flag_pic = 2;
+
+ /* Set the default values for switches whose default depends on TARGET_64BIT
+ in case they weren't overwritten by command line options. */
+ if (TARGET_64BIT)
+ {
+ /* Mach-O doesn't support omitting the frame pointer for now. */
+ if (flag_omit_frame_pointer == 2)
+ flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
+ if (flag_asynchronous_unwind_tables == 2)
+ flag_asynchronous_unwind_tables = 1;
+ if (flag_pcc_struct_return == 2)
+ flag_pcc_struct_return = 0;
+ }
+ else
+ {
+ if (flag_omit_frame_pointer == 2)
+ flag_omit_frame_pointer = 0;
+ if (flag_asynchronous_unwind_tables == 2)
+ flag_asynchronous_unwind_tables = 0;
+ if (flag_pcc_struct_return == 2)
+ flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
+ }
+
+ /* Need to check -mtune=generic first. */
+ if (ix86_tune_string)
+ {
+ if (!strcmp (ix86_tune_string, "generic")
+ || !strcmp (ix86_tune_string, "i686")
+ /* As special support for cross compilers we read -mtune=native
+ as -mtune=generic. With native compilers we won't see the
+ -mtune=native, as it was changed by the driver. */
+ || !strcmp (ix86_tune_string, "native"))
+ {
+ if (TARGET_64BIT)
+ ix86_tune_string = "generic64";
+ else
+ ix86_tune_string = "generic32";
+ }
+ else if (!strncmp (ix86_tune_string, "generic", 7))
+ error ("bad value (%s) for -mtune= switch", ix86_tune_string);
+ }
+ else
+ {
+ if (ix86_arch_string)
+ ix86_tune_string = ix86_arch_string;
+ if (!ix86_tune_string)
+ {
+ ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
+ ix86_tune_defaulted = 1;
+ }
+
+ /* ix86_tune_string is set to ix86_arch_string or defaulted. We
+ need to use a sensible tune option. */
+ if (!strcmp (ix86_tune_string, "generic")
+ || !strcmp (ix86_tune_string, "x86-64")
+ || !strcmp (ix86_tune_string, "i686"))
+ {
+ if (TARGET_64BIT)
+ ix86_tune_string = "generic64";
+ else
+ ix86_tune_string = "generic32";
+ }
+ }
+ if (!strcmp (ix86_tune_string, "x86-64"))
+ warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
+ "-mtune=generic instead as appropriate.");
+
+ if (!ix86_arch_string)
+ ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
+ /* APPLE LOCAL begin mainline */
+ else
+ ix86_arch_specified = 1;
+ /* APPLE LOCAL end mainline */
+ if (!strcmp (ix86_arch_string, "generic"))
+ error ("generic CPU can be used only for -mtune= switch");
+ if (!strncmp (ix86_arch_string, "generic", 7))
+ error ("bad value (%s) for -march= switch", ix86_arch_string);
+
+ if (ix86_cmodel_string != 0)
+ {
+ if (!strcmp (ix86_cmodel_string, "small"))
+ ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
+ else if (!strcmp (ix86_cmodel_string, "medium"))
+ ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
+ else if (flag_pic)
+ sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
+ else if (!strcmp (ix86_cmodel_string, "32"))
+ ix86_cmodel = CM_32;
+ else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
+ ix86_cmodel = CM_KERNEL;
+ else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
+ ix86_cmodel = CM_LARGE;
+ else
+ error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
+ }
+ else
+ {
+ ix86_cmodel = CM_32;
+ if (TARGET_64BIT)
+ ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
+ }
+ if (ix86_asm_string != 0)
+ {
+ if (! TARGET_MACHO
+ && !strcmp (ix86_asm_string, "intel"))
+ ix86_asm_dialect = ASM_INTEL;
+ else if (!strcmp (ix86_asm_string, "att"))
+ ix86_asm_dialect = ASM_ATT;
+ else
+ error ("bad value (%s) for -masm= switch", ix86_asm_string);
+ }
+ if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
+ error ("code model %qs not supported in the %s bit mode",
+ ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
+ if (ix86_cmodel == CM_LARGE)
+ sorry ("code model %<large%> not supported yet");
+ if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
+ sorry ("%i-bit mode not compiled in",
+ (target_flags & MASK_64BIT) ? 64 : 32);
+
+ for (i = 0; i < pta_size; i++)
+ if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
+ {
+ ix86_arch = processor_alias_table[i].processor;
+ /* Default cpu tuning to the architecture. */
+ ix86_tune = ix86_arch;
+ if (processor_alias_table[i].flags & PTA_MMX
+ && !(target_flags_explicit & MASK_MMX))
+ target_flags |= MASK_MMX;
+ if (processor_alias_table[i].flags & PTA_3DNOW
+ && !(target_flags_explicit & MASK_3DNOW))
+ target_flags |= MASK_3DNOW;
+ if (processor_alias_table[i].flags & PTA_3DNOW_A
+ && !(target_flags_explicit & MASK_3DNOW_A))
+ target_flags |= MASK_3DNOW_A;
+ if (processor_alias_table[i].flags & PTA_SSE
+ && !(target_flags_explicit & MASK_SSE))
+ target_flags |= MASK_SSE;
+ if (processor_alias_table[i].flags & PTA_SSE2
+ && !(target_flags_explicit & MASK_SSE2))
+ target_flags |= MASK_SSE2;
+ if (processor_alias_table[i].flags & PTA_SSE3
+ && !(target_flags_explicit & MASK_SSE3))
+ target_flags |= MASK_SSE3;
+ /* APPLE LOCAL begin mainline */
+ if (processor_alias_table[i].flags & PTA_SSSE3
+ && !(target_flags_explicit & MASK_SSSE3))
+ target_flags |= MASK_SSSE3;
+ /* APPLE LOCAL end mainline */
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ if (processor_alias_table[i].flags & PTA_SSE4_1
+ && !(target_flags_explicit & MASK_SSE4_1))
+ target_flags |= MASK_SSE4_1;
+ if (processor_alias_table[i].flags & PTA_SSE4_2
+ && !(target_flags_explicit & MASK_SSE4_2))
+ target_flags |= MASK_SSE4_2;
+ if (processor_alias_table[i].flags & PTA_SSE4A
+ && !(target_flags_explicit & MASK_SSE4A))
+ target_flags |= MASK_SSE4A;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
+ x86_prefetch_sse = true;
+ if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
+ error ("CPU you selected does not support x86-64 "
+ "instruction set");
+ break;
+ }
+
+ if (i == pta_size)
+ error ("bad value (%s) for -march= switch", ix86_arch_string);
+
+ for (i = 0; i < pta_size; i++)
+ if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
+ {
+ ix86_tune = processor_alias_table[i].processor;
+ if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
+ {
+ if (ix86_tune_defaulted)
+ {
+ ix86_tune_string = "x86-64";
+ for (i = 0; i < pta_size; i++)
+ if (! strcmp (ix86_tune_string,
+ processor_alias_table[i].name))
+ break;
+ ix86_tune = processor_alias_table[i].processor;
+ }
+ else
+ error ("CPU you selected does not support x86-64 "
+ "instruction set");
+ }
+ /* Intel CPUs have always interpreted SSE prefetch instructions as
+ NOPs; so, we can enable SSE prefetch instructions even when
+ -mtune (rather than -march) points us to a processor that has them.
+ However, the VIA C3 gives a SIGILL, so we only do that for i686 and
+ higher processors. */
+ if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
+ x86_prefetch_sse = true;
+ break;
+ }
+ if (i == pta_size)
+ error ("bad value (%s) for -mtune= switch", ix86_tune_string);
+
+ if (optimize_size)
+ ix86_cost = &size_cost;
+ else
+ ix86_cost = processor_target_table[ix86_tune].cost;
+ target_flags |= processor_target_table[ix86_tune].target_enable;
+ target_flags &= ~processor_target_table[ix86_tune].target_disable;
+
+ /* Arrange to set up i386_stack_locals for all functions. */
+ init_machine_status = ix86_init_machine_status;
+
+ /* Validate -mregparm= value. */
+ if (ix86_regparm_string)
+ {
+ i = atoi (ix86_regparm_string);
+ if (i < 0 || i > REGPARM_MAX)
+ error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
+ else
+ ix86_regparm = i;
+ }
+ else
+ if (TARGET_64BIT)
+ ix86_regparm = REGPARM_MAX;
+
+ /* If the user has provided any of the -malign-* options,
+ warn and use that value only if -falign-* is not set.
+ Remove this code in GCC 3.2 or later. */
+ if (ix86_align_loops_string)
+ {
+ warning (0, "-malign-loops is obsolete, use -falign-loops");
+ if (align_loops == 0)
+ {
+ i = atoi (ix86_align_loops_string);
+ if (i < 0 || i > MAX_CODE_ALIGN)
+ error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
+ else
+ align_loops = 1 << i;
+ }
+ }
+
+ if (ix86_align_jumps_string)
+ {
+ warning (0, "-malign-jumps is obsolete, use -falign-jumps");
+ if (align_jumps == 0)
+ {
+ i = atoi (ix86_align_jumps_string);
+ if (i < 0 || i > MAX_CODE_ALIGN)
+ error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
+ else
+ align_jumps = 1 << i;
+ }
+ }
+
+ if (ix86_align_funcs_string)
+ {
+ warning (0, "-malign-functions is obsolete, use -falign-functions");
+ if (align_functions == 0)
+ {
+ i = atoi (ix86_align_funcs_string);
+ if (i < 0 || i > MAX_CODE_ALIGN)
+ error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
+ else
+ align_functions = 1 << i;
+ }
+ }
+
+ /* Default align_* from the processor table. */
+ if (align_loops == 0)
+ {
+ align_loops = processor_target_table[ix86_tune].align_loop;
+ align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
+ }
+ if (align_jumps == 0)
+ {
+ align_jumps = processor_target_table[ix86_tune].align_jump;
+ align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
+ }
+ if (align_functions == 0)
+ {
+ align_functions = processor_target_table[ix86_tune].align_func;
+ }
+
+ /* Validate -mbranch-cost= value, or provide default. */
+ ix86_branch_cost = ix86_cost->branch_cost;
+ if (ix86_branch_cost_string)
+ {
+ i = atoi (ix86_branch_cost_string);
+ if (i < 0 || i > 5)
+ error ("-mbranch-cost=%d is not between 0 and 5", i);
+ else
+ ix86_branch_cost = i;
+ }
+ if (ix86_section_threshold_string)
+ {
+ i = atoi (ix86_section_threshold_string);
+ if (i < 0)
+ error ("-mlarge-data-threshold=%d is negative", i);
+ else
+ ix86_section_threshold = i;
+ }
+
+ if (ix86_tls_dialect_string)
+ {
+ if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
+ ix86_tls_dialect = TLS_DIALECT_GNU;
+ else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
+ ix86_tls_dialect = TLS_DIALECT_GNU2;
+ else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
+ ix86_tls_dialect = TLS_DIALECT_SUN;
+ else
+ error ("bad value (%s) for -mtls-dialect= switch",
+ ix86_tls_dialect_string);
+ }
+ /* APPLE LOCAL begin mainline */
+ if (TARGET_64BIT)
+ {
+ if (TARGET_ALIGN_DOUBLE)
+ error ("-malign-double makes no sense in the 64bit mode");
+ if (TARGET_RTD)
+ error ("-mrtd calling convention not supported in the 64bit mode");
+ /* APPLE LOCAL begin radar 4877693 */
+ if (ix86_force_align_arg_pointer)
+ error ("-mstackrealign not supported in the 64bit mode");
+ /* APPLE LOCAL end radar 4877693 */
+
+ /* Enable by default the SSE and MMX builtins. Do allow the user to
+ explicitly disable any of these. In particular, disabling SSE and
+ MMX for kernel code is extremely useful. */
+ if (!ix86_arch_specified)
+ target_flags
+ |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE
+ | TARGET_SUBTARGET64_DEFAULT) & ~target_flags_explicit);
+ /* APPLE LOCAL begin mainline candidate */
+ /* Disable the red zone for kernel compilation.
+ ??? Why aren't we using -mcmodel=kernel? */
+ if (TARGET_MACHO
+ && (flag_mkernel || flag_apple_kext))
+ target_flags |= MASK_NO_RED_ZONE;
+ /* APPLE LOCAL end mainline candidate */
+ }
+ else
+ {
+ if (!ix86_arch_specified)
+ target_flags |= (TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit);
+
+ /* i386 ABI does not specify red zone. It still makes sense to use it
+ when programmer takes care to stack from being destroyed. */
+ if (!(target_flags_explicit & MASK_NO_RED_ZONE))
+ target_flags |= MASK_NO_RED_ZONE;
+ }
+
+ /* APPLE LOCAL end mainline */
+ /* Keep nonleaf frame pointers. */
+ if (flag_omit_frame_pointer)
+ target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
+ else if (TARGET_OMIT_LEAF_FRAME_POINTER)
+ flag_omit_frame_pointer = 1;
+
+ /* If we're doing fast math, we don't care about comparison order
+ wrt NaNs. This lets us use a shorter comparison sequence. */
+ if (flag_finite_math_only)
+ target_flags &= ~MASK_IEEE_FP;
+
+ /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
+ since the insns won't need emulation. */
+ if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
+ target_flags &= ~MASK_NO_FANCY_MATH_387;
+
+ /* Likewise, if the target doesn't have a 387, or we've specified
+ software floating point, don't use 387 inline intrinsics. */
+ if (!TARGET_80387)
+ target_flags |= MASK_NO_FANCY_MATH_387;
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* Turn on SSE4.1 builtins for -msse4.2. */
+ if (TARGET_SSE4_2)
+ target_flags |= MASK_SSE4_1;
+ /* Turn on SSSE3 builtins for -msse4.1. */
+ if (TARGET_SSE4_1)
+ target_flags |= MASK_SSSE3;
+ /* Turn on SSE3 builtins for -msse4a. */
+ if (TARGET_SSE4A)
+ target_flags |= MASK_SSE3;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ /* APPLE LOCAL begin mainline */
+ /* Turn on SSE3 builtins for -mssse3. */
+ if (TARGET_SSSE3)
+ target_flags |= MASK_SSE3;
+ /* APPLE LOCAL end mainline */
+ /* Turn on SSE2 builtins for -msse3. */
+ if (TARGET_SSE3)
+ target_flags |= MASK_SSE2;
+
+ /* Turn on SSE builtins for -msse2. */
+ if (TARGET_SSE2)
+ target_flags |= MASK_SSE;
+
+ /* Turn on MMX builtins for -msse. */
+ if (TARGET_SSE)
+ {
+ target_flags |= MASK_MMX & ~target_flags_explicit;
+ x86_prefetch_sse = true;
+ }
+
+ /* Turn on MMX builtins for 3Dnow. */
+ if (TARGET_3DNOW)
+ target_flags |= MASK_MMX;
+
+ /* APPLE LOCAL mainline */
+ /* Moved this up... */
+ /* Validate -mpreferred-stack-boundary= value, or provide default.
+ The default of 128 bits is for Pentium III's SSE __m128. We can't
+ change it because of optimize_size. Otherwise, we can't mix object
+ files compiled with -Os and -On. */
+ ix86_preferred_stack_boundary = 128;
+ if (ix86_preferred_stack_boundary_string)
+ {
+ i = atoi (ix86_preferred_stack_boundary_string);
+ if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
+ error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
+ TARGET_64BIT ? 4 : 2);
+ else
+ ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
+ }
+
+ /* Accept -msseregparm only if at least SSE support is enabled. */
+ if (TARGET_SSEREGPARM
+ && ! TARGET_SSE)
+ error ("-msseregparm used without SSE enabled");
+
+ ix86_fpmath = TARGET_FPMATH_DEFAULT;
+
+ if (ix86_fpmath_string != 0)
+ {
+ if (! strcmp (ix86_fpmath_string, "387"))
+ ix86_fpmath = FPMATH_387;
+ else if (! strcmp (ix86_fpmath_string, "sse"))
+ {
+ if (!TARGET_SSE)
+ {
+ warning (0, "SSE instruction set disabled, using 387 arithmetics");
+ ix86_fpmath = FPMATH_387;
+ }
+ else
+ ix86_fpmath = FPMATH_SSE;
+ }
+ else if (! strcmp (ix86_fpmath_string, "387,sse")
+ || ! strcmp (ix86_fpmath_string, "sse,387"))
+ {
+ if (!TARGET_SSE)
+ {
+ warning (0, "SSE instruction set disabled, using 387 arithmetics");
+ ix86_fpmath = FPMATH_387;
+ }
+ else if (!TARGET_80387)
+ {
+ warning (0, "387 instruction set disabled, using SSE arithmetics");
+ ix86_fpmath = FPMATH_SSE;
+ }
+ else
+ ix86_fpmath = FPMATH_SSE | FPMATH_387;
+ }
+ else
+ error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
+ }
+
+ /* If the i387 is disabled, then do not return values in it. */
+ if (!TARGET_80387)
+ target_flags &= ~MASK_FLOAT_RETURNS;
+
+ if ((x86_accumulate_outgoing_args & TUNEMASK)
+ && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
+ && !optimize_size)
+ target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+
+ /* ??? Unwind info is not correct around the CFG unless either a frame
+ pointer is present or M_A_O_A is set. Fixing this requires rewriting
+ unwind info generation to be aware of the CFG and propagating states
+ around edges. */
+ if ((flag_unwind_tables || flag_asynchronous_unwind_tables
+ || flag_exceptions || flag_non_call_exceptions)
+ && flag_omit_frame_pointer
+ && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
+ {
+ if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
+ warning (0, "unwind tables currently require either a frame pointer "
+ "or -maccumulate-outgoing-args for correctness");
+ target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+ }
+
+ /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
+ {
+ char *p;
+ ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
+ p = strchr (internal_label_prefix, 'X');
+ internal_label_prefix_len = p - internal_label_prefix;
+ *p = '\0';
+ }
+
+ /* When scheduling description is not available, disable scheduler pass
+ so it won't slow down the compilation and make x87 code slower. */
+ /* APPLE LOCAL 5591571 */
+ if (1 || !TARGET_SCHEDULE)
+ flag_schedule_insns_after_reload = flag_schedule_insns = 0;
+
+ /* APPLE LOCAL begin dynamic-no-pic */
+#if TARGET_MACHO
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ {
+ if (flag_pic)
+ warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
+ flag_pic = 0;
+ }
+ else
+#endif
+ if (flag_pic == 1)
+ {
+ /* Darwin's -fpic is -fPIC. */
+ flag_pic = 2;
+ }
+ /* APPLE LOCAL end dynamic-no-pic */
+ /* APPLE LOCAL begin 4812082 -fast */
+ /* These flags were the best on the software H264 codec, and have therefore
+ been lumped into -fast per 4812082. They have not been evaluated on
+ any other code, except that -fno-tree-pre is known to lose on the
+ hardware accelerated version of the codec. */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ flag_omit_frame_pointer = 1;
+ flag_strict_aliasing = 1;
+ flag_tree_pre = 0;
+ target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
+ align_loops = processor_target_table[ix86_tune].align_loop;
+ }
+ /* APPLE LOCAL end 4812082 -fast */
+}
+
+/* switch to the appropriate section for output of DECL.
+ DECL is either a `VAR_DECL' node or a constant of some sort.
+ RELOC indicates whether forming the initial value of DECL requires
+ link-time relocations. */
+
+static section *
+x86_64_elf_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align)
+{
+ if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
+ && ix86_in_large_data_p (decl))
+ {
+ const char *sname = NULL;
+ unsigned int flags = SECTION_WRITE;
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_DATA:
+ sname = ".ldata";
+ break;
+ case SECCAT_DATA_REL:
+ sname = ".ldata.rel";
+ break;
+ case SECCAT_DATA_REL_LOCAL:
+ sname = ".ldata.rel.local";
+ break;
+ case SECCAT_DATA_REL_RO:
+ sname = ".ldata.rel.ro";
+ break;
+ case SECCAT_DATA_REL_RO_LOCAL:
+ sname = ".ldata.rel.ro.local";
+ break;
+ case SECCAT_BSS:
+ sname = ".lbss";
+ flags |= SECTION_BSS;
+ break;
+ case SECCAT_RODATA:
+ case SECCAT_RODATA_MERGE_STR:
+ case SECCAT_RODATA_MERGE_STR_INIT:
+ case SECCAT_RODATA_MERGE_CONST:
+ sname = ".lrodata";
+ flags = 0;
+ break;
+ case SECCAT_SRODATA:
+ case SECCAT_SDATA:
+ case SECCAT_SBSS:
+ gcc_unreachable ();
+ case SECCAT_TEXT:
+ case SECCAT_TDATA:
+ case SECCAT_TBSS:
+ /* We don't split these for medium model. Place them into
+ default sections and hope for best. */
+ break;
+ }
+ if (sname)
+ {
+ /* We might get called with string constants, but get_named_section
+ doesn't like them as they are not DECLs. Also, we need to set
+ flags in that case. */
+ if (!DECL_P (decl))
+ return get_section (sname, flags, NULL);
+ return get_named_section (decl, sname, reloc);
+ }
+ }
+ return default_elf_select_section (decl, reloc, align);
+}
+
+/* Build up a unique section name, expressed as a
+ STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
+ RELOC indicates whether the initial value of EXP requires
+ link-time relocations. */
+
+static void
+x86_64_elf_unique_section (tree decl, int reloc)
+{
+ if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
+ && ix86_in_large_data_p (decl))
+ {
+ const char *prefix = NULL;
+ /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
+ bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
+
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_DATA:
+ case SECCAT_DATA_REL:
+ case SECCAT_DATA_REL_LOCAL:
+ case SECCAT_DATA_REL_RO:
+ case SECCAT_DATA_REL_RO_LOCAL:
+ prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
+ break;
+ case SECCAT_BSS:
+ prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
+ break;
+ case SECCAT_RODATA:
+ case SECCAT_RODATA_MERGE_STR:
+ case SECCAT_RODATA_MERGE_STR_INIT:
+ case SECCAT_RODATA_MERGE_CONST:
+ prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
+ break;
+ case SECCAT_SRODATA:
+ case SECCAT_SDATA:
+ case SECCAT_SBSS:
+ gcc_unreachable ();
+ case SECCAT_TEXT:
+ case SECCAT_TDATA:
+ case SECCAT_TBSS:
+ /* We don't split these for medium model. Place them into
+ default sections and hope for best. */
+ break;
+ }
+ if (prefix)
+ {
+ const char *name;
+ size_t nlen, plen;
+ char *string;
+ plen = strlen (prefix);
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ name = targetm.strip_name_encoding (name);
+ nlen = strlen (name);
+
+ string = alloca (nlen + plen + 1);
+ memcpy (string, prefix, plen);
+ memcpy (string + plen, name, nlen + 1);
+
+ DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
+ return;
+ }
+ }
+ default_unique_section (decl, reloc);
+}
+
+#ifdef COMMON_ASM_OP
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object.
+
+ For medium model x86-64 we need to use .largecomm opcode for
+ large objects. */
+void
+x86_elf_aligned_common (FILE *file,
+ const char *name, unsigned HOST_WIDE_INT size,
+ int align)
+{
+ if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
+ && size > (unsigned int)ix86_section_threshold)
+ fprintf (file, ".largecomm\t");
+ else
+ fprintf (file, "%s", COMMON_ASM_OP);
+ assemble_name (file, name);
+ fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
+ size, align / BITS_PER_UNIT);
+}
+
+/* Utility function for targets to use in implementing
+ ASM_OUTPUT_ALIGNED_BSS. */
+
+void
+x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
+ const char *name, unsigned HOST_WIDE_INT size,
+ int align)
+{
+ if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
+ && size > (unsigned int)ix86_section_threshold)
+ switch_to_section (get_named_section (decl, ".lbss", 0));
+ else
+ switch_to_section (bss_section);
+ ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
+#ifdef ASM_DECLARE_OBJECT_NAME
+ last_assemble_variable_decl = decl;
+ ASM_DECLARE_OBJECT_NAME (file, name, decl);
+#else
+ /* Standard thing is just output label for the object. */
+ ASM_OUTPUT_LABEL (file, name);
+#endif /* ASM_DECLARE_OBJECT_NAME */
+ ASM_OUTPUT_SKIP (file, size ? size : 1);
+}
+#endif
+
+void
+optimization_options (int level, int size ATTRIBUTE_UNUSED)
+{
+ /* APPLE LOCAL begin disable strict aliasing; breaks too much existing code. */
+#if TARGET_MACHO
+ flag_strict_aliasing = 0;
+#endif
+ /* APPLE LOCAL end disable strict aliasing; breaks too much existing code. */
+ /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
+ make the problem with not enough registers even worse. */
+#ifdef INSN_SCHEDULING
+ if (level > 1)
+ flag_schedule_insns = 0;
+#endif
+
+ /* APPLE LOCAL begin pragma fenv */
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+ /* APPLE LOCAL end pragma fenv */
+
+ if (TARGET_MACHO)
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+
+ /* The default values of these switches depend on the TARGET_64BIT
+ that is not known at this moment. Mark these values with 2 and
+ let user the to override these. In case there is no command line option
+ specifying them, we will set the defaults in override_options. */
+ if (optimize >= 1)
+ flag_omit_frame_pointer = 2;
+ flag_pcc_struct_return = 2;
+ flag_asynchronous_unwind_tables = 2;
+#ifdef SUBTARGET_OPTIMIZATION_OPTIONS
+ SUBTARGET_OPTIMIZATION_OPTIONS;
+#endif
+ /* APPLE LOCAL begin 4200243 */
+ if (getenv ("RC_FORCE_SSE3"))
+ target_flags |= MASK_SSE3;
+}
+/* APPLE LOCAL end 4200243 */
+
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+/* Version of the above for use from #pragma optimization_level. Only
+ per-function flags are reset. */
+#if TARGET_MACHO
+void
+reset_optimization_options (int level, int size)
+{
+ /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
+ make the problem with not enough registers even worse. */
+#ifdef INSN_SCHEDULING
+ if (level > 1)
+ flag_schedule_insns = 0;
+#endif
+
+ /* APPLE LOCAL begin pragma fenv */
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+ /* APPLE LOCAL end pragma fenv */
+
+ /* The default values of these switches depend on TARGET_64BIT
+ which was set earlier and not reset. */
+ if (optimize >= 1)
+ {
+ if (TARGET_64BIT)
+ flag_omit_frame_pointer = 1;
+ else
+ flag_omit_frame_pointer = 0;
+ }
+#ifdef SUBTARGET_OPTIMIZATION_OPTIONS
+ SUBTARGET_OPTIMIZATION_OPTIONS;
+#endif
+ /* APPLE LOCAL begin 4760857 */
+ if (size)
+ ix86_cost = &size_cost;
+ else
+ ix86_cost = processor_target_table[ix86_tune].cost;
+
+ /* Default align_* from the processor table. */
+ if (align_loops == 0)
+ {
+ align_loops = processor_target_table[ix86_tune].align_loop;
+ align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
+ }
+ if (align_jumps == 0)
+ {
+ align_jumps = processor_target_table[ix86_tune].align_jump;
+ align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
+ }
+ /* APPLE LOCAL end 4760857 */
+}
+#endif
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
+
+/* Table of valid machine attributes. */
+const struct attribute_spec ix86_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ /* Stdcall attribute says callee is responsible for popping arguments
+ if they are not variable. */
+ { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
+ /* Fastcall attribute says callee is responsible for popping arguments
+ if they are not variable. */
+ { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
+ /* Cdecl attribute says the callee is a normal C declaration */
+ { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
+ /* Regparm attribute specifies how many integer arguments are to be
+ passed in registers. */
+ { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
+ /* APPLE LOCAL begin regparmandstackparm */
+ /* regparmandstackparm means two entry points; a traditional stack-based
+ one, and another, with a mangled name, that employs regparm and
+ sseregparm. */
+ { "regparmandstackparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
+ { "regparmandstackparmee", 0, 0, false, true, true, ix86_handle_cconv_attribute },
+ /* APPLE LOCAL end regparmandstackparm */
+ /* Sseregparm attribute says we are using x86_64 calling conventions
+ for FP arguments. */
+ { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
+ /* force_align_arg_pointer says this function realigns the stack at entry. */
+ { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
+ false, true, true, ix86_handle_cconv_attribute },
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+ { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
+ { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
+ { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
+#endif
+ { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
+ { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE,
+#endif
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Decide whether we can make a sibling call to a function. DECL is the
+ declaration of the function being targeted by the call and EXP is the
+ CALL_EXPR representing the call. */
+
+static bool
+ix86_function_ok_for_sibcall (tree decl, tree exp)
+{
+ tree func;
+ rtx a, b;
+
+ /* APPLE LOCAL begin indirect sibcall 4087330 */
+ /* If we are generating position-independent code, we cannot sibcall
+ optimize any indirect call, or a direct call to a global function,
+ as the PLT requires %ebx be live. (Darwin does not have a PLT.) */
+ if (!TARGET_MACHO
+ && !TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
+ return false;
+ /* APPLE LOCAL end indirect sibcall 4087330 */
+
+ if (decl)
+ func = decl;
+ else
+ {
+ func = TREE_TYPE (TREE_OPERAND (exp, 0));
+ if (POINTER_TYPE_P (func))
+ func = TREE_TYPE (func);
+ }
+
+ /* Check that the return value locations are the same. Like
+ if we are returning floats on the 80387 register stack, we cannot
+ make a sibcall from a function that doesn't return a float to a
+ function that does or, conversely, from a function that does return
+ a float to a function that doesn't; the necessary stack adjustment
+ would not be executed. This is also the place we notice
+ differences in the return value ABI. Note that it is ok for one
+ of the functions to have void return type as long as the return
+ value of the other is passed in a register. */
+ a = ix86_function_value (TREE_TYPE (exp), func, false);
+ b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
+ cfun->decl, false);
+ if (STACK_REG_P (a) || STACK_REG_P (b))
+ {
+ if (!rtx_equal_p (a, b))
+ return false;
+ }
+ else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
+ ;
+ else if (!rtx_equal_p (a, b))
+ return false;
+
+ /* If this call is indirect, we'll need to be able to use a call-clobbered
+ register for the address of the target function. Make sure that all
+ such registers are not used for passing parameters. */
+ if (!decl && !TARGET_64BIT)
+ {
+ tree type;
+
+ /* We're looking at the CALL_EXPR, we need the type of the function. */
+ type = TREE_OPERAND (exp, 0); /* pointer expression */
+ type = TREE_TYPE (type); /* pointer type */
+ type = TREE_TYPE (type); /* function type */
+
+ if (ix86_function_regparm (type, NULL) >= 3)
+ {
+ /* ??? Need to count the actual number of registers to be used,
+ not the possible number of registers. Fix later. */
+ return false;
+ }
+ }
+
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+ /* Dllimport'd functions are also called indirectly. */
+ if (decl && DECL_DLLIMPORT_P (decl)
+ && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
+ return false;
+#endif
+
+ /* If we forced aligned the stack, then sibcalling would unalign the
+ stack, which may break the called function. */
+ if (cfun->machine->force_align_arg_pointer)
+ return false;
+
+ /* Otherwise okay. That also includes certain types of indirect calls. */
+ return true;
+}
+
+/* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
+ calling convention attributes;
+ arguments as in struct attribute_spec.handler. */
+
+static tree
+ix86_handle_cconv_attribute (tree *node, tree name,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE
+ && TREE_CODE (*node) != METHOD_TYPE
+ && TREE_CODE (*node) != FIELD_DECL
+ && TREE_CODE (*node) != TYPE_DECL)
+ {
+ warning (OPT_Wattributes, "%qs attribute only applies to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ /* Can combine regparm with all attributes but fastcall. */
+ if (is_attribute_p ("regparm", name))
+ {
+ tree cst;
+
+ if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("fastcall and regparm attributes are not compatible");
+ }
+
+ /* APPLE LOCAL begin regparmandstackparm */
+ if (!TARGET_64BIT
+ && (lookup_attribute ("regparmandstackparm", TYPE_ATTRIBUTES (*node))
+ || lookup_attribute ("regparmandstackparmee", TYPE_ATTRIBUTES (*node))))
+ {
+ error ("regparmandstackparm and regparm attributes are not compatible");
+ }
+ /* APPLE LOCAL end regparmandstackparm */
+
+ cst = TREE_VALUE (args);
+ if (TREE_CODE (cst) != INTEGER_CST)
+ {
+ warning (OPT_Wattributes,
+ "%qs attribute requires an integer constant argument",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ else if (compare_tree_int (cst, REGPARM_MAX) > 0)
+ {
+ warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
+ IDENTIFIER_POINTER (name), REGPARM_MAX);
+ *no_add_attrs = true;
+ }
+
+ if (!TARGET_64BIT
+ && lookup_attribute (ix86_force_align_arg_pointer_string,
+ TYPE_ATTRIBUTES (*node))
+ && compare_tree_int (cst, REGPARM_MAX-1))
+ {
+ error ("%s functions limited to %d register parameters",
+ ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
+ }
+
+ return NULL_TREE;
+ }
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* Turn on popcnt instruction for -msse4.2 or -mabm. */
+ if (TARGET_SSE4_2)
+ x86_popcnt = true;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ if (TARGET_64BIT)
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ /* Can combine fastcall with stdcall (redundant) and sseregparm. */
+ /* APPLE LOCAL begin regparmandstackparm */
+ if (is_attribute_p ("fastcall", name)
+ || is_attribute_p ("regparmandstackparm", name))
+ /* APPLE LOCAL end regparmandstackparm */
+ {
+ if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("fastcall and cdecl attributes are not compatible");
+ }
+ if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("fastcall and stdcall attributes are not compatible");
+ }
+ if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("fastcall and regparm attributes are not compatible");
+ }
+ }
+
+ /* Can combine stdcall with fastcall (redundant), regparm and
+ sseregparm. */
+ else if (is_attribute_p ("stdcall", name))
+ {
+ if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("stdcall and cdecl attributes are not compatible");
+ }
+ /* APPLE LOCAL begin regparmandstackparm */
+ if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node))
+ || lookup_attribute ("regparmandstackparm", TYPE_ATTRIBUTES (*node)))
+ /* APPLE LOCAL end regparmandstackparm */
+ {
+ error ("stdcall and fastcall attributes are not compatible");
+ }
+ }
+
+ /* Can combine cdecl with regparm and sseregparm. */
+ else if (is_attribute_p ("cdecl", name))
+ {
+ if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("stdcall and cdecl attributes are not compatible");
+ }
+ if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
+ {
+ error ("fastcall and cdecl attributes are not compatible");
+ }
+ }
+
+ /* Can combine sseregparm with all attributes. */
+
+ return NULL_TREE;
+}
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+ are compatible, and 2 if they are nearly compatible (which causes a
+ warning to be generated). */
+
+static int
+ix86_comp_type_attributes (tree type1, tree type2)
+{
+ /* Check for mismatch of non-default calling convention. */
+ const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
+
+ if (TREE_CODE (type1) != FUNCTION_TYPE)
+ return 1;
+
+ /* Check for mismatched fastcall/regparm types. */
+ if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
+ || (ix86_function_regparm (type1, NULL)
+ != ix86_function_regparm (type2, NULL)))
+ return 0;
+
+ /* Check for mismatched sseregparm types. */
+ if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
+ return 0;
+
+ /* Check for mismatched return types (cdecl vs stdcall). */
+ if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
+ != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
+ return 0;
+
+ return 1;
+}
+
+/* Return the regparm value for a function with the indicated TYPE and DECL.
+ DECL may be NULL when calling function indirectly
+ or considering a libcall. */
+
+static int
+ix86_function_regparm (tree type, tree decl)
+{
+ tree attr;
+ /* APPLE LOCAL begin MERGE FIXME audit to ensure that it's ok
+
+ We had local_regparm but the FSF didn't and there didn't seem to
+ be a merge conflict some something is strange. These seem to be just
+ normal apple local changes. I asked Stuart about them in email. */
+ int regparm = ix86_regparm;
+ bool user_convention = false;
+
+ if (!TARGET_64BIT)
+ {
+ attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
+ if (attr)
+ {
+ regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
+ user_convention = true;
+ }
+
+ /* APPLE LOCAL begin regparmandstackparm */
+ if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type))
+ || lookup_attribute ("regparmandstackparmee", TYPE_ATTRIBUTES (type)))
+ /* APPLE LOCAL end regparmandstackparm */
+ {
+ regparm = 2;
+ user_convention = true;
+ }
+
+ /* Use register calling convention for local functions when possible. */
+ if (!TARGET_64BIT && !user_convention && decl
+ && flag_unit_at_a_time && !profile_flag)
+ {
+ struct cgraph_local_info *i = cgraph_local_info (decl);
+ if (i && i->local)
+ {
+ int local_regparm, globals = 0, regno;
+
+ /* Make sure no regparm register is taken by a global register
+ variable. */
+ for (local_regparm = 0; local_regparm < 3; local_regparm++)
+ if (global_regs[local_regparm])
+ break;
+ /* We can't use regparm(3) for nested functions as these use
+ static chain pointer in third argument. */
+ if (local_regparm == 3
+ /* APPLE LOCAL begin mainline */
+ && (decl_function_context (decl)
+ || ix86_force_align_arg_pointer)
+ /* APPLE LOCAL end mainline */
+ && !DECL_NO_STATIC_CHAIN (decl))
+ local_regparm = 2;
+ /* If the function realigns its stackpointer, the
+ prologue will clobber %ecx. If we've already
+ generated code for the callee, the callee
+ DECL_STRUCT_FUNCTION is gone, so we fall back to
+ scanning the attributes for the self-realigning
+ property. */
+ if ((DECL_STRUCT_FUNCTION (decl)
+ /* MERGE FIXME was in our version, but not in FSF 2006-05-23 */
+ && DECL_STRUCT_FUNCTION (decl)->machine
+ && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
+ || (!DECL_STRUCT_FUNCTION (decl)
+ && lookup_attribute (ix86_force_align_arg_pointer_string,
+ TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
+ local_regparm = 2;
+ /* Each global register variable increases register preassure,
+ so the more global reg vars there are, the smaller regparm
+ optimization use, unless requested by the user explicitly. */
+ for (regno = 0; regno < 6; regno++)
+ if (global_regs[regno])
+ globals++;
+ local_regparm
+ = globals < local_regparm ? local_regparm - globals : 0;
+
+ if (local_regparm > regparm)
+ regparm = local_regparm;
+ }
+ }
+ }
+ /* APPLE LOCAL end MERGE FIXME audit to ensure that it's ok */
+ return regparm;
+}
+
+/* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
+ DFmode (2) arguments in SSE registers for a function with the
+ indicated TYPE and DECL. DECL may be NULL when calling function
+ indirectly or considering a libcall. Otherwise return 0. */
+
+static int
+ix86_function_sseregparm (tree type, tree decl)
+{
+ /* Use SSE registers to pass SFmode and DFmode arguments if requested
+ by the sseregparm attribute. */
+ if (TARGET_SSEREGPARM
+ || (type
+ && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
+ {
+ if (!TARGET_SSE)
+ {
+ if (decl)
+ error ("Calling %qD with attribute sseregparm without "
+ "SSE/SSE2 enabled", decl);
+ else
+ error ("Calling %qT with attribute sseregparm without "
+ "SSE/SSE2 enabled", type);
+ return 0;
+ }
+
+ return 2;
+ }
+
+ /* APPLE LOCAL begin regparmandstackparm */
+ if (type && lookup_attribute ("regparmandstackparmee", TYPE_ATTRIBUTES (type)))
+ return 2;
+ /* APPLE LOCAL end regparmandstackparm */
+
+ /* For local functions, pass up to SSE_REGPARM_MAX SFmode
+ (and DFmode for SSE2) arguments in SSE registers,
+ even for 32-bit targets. */
+ if (!TARGET_64BIT && decl
+ && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
+ {
+ struct cgraph_local_info *i = cgraph_local_info (decl);
+ if (i && i->local)
+ return TARGET_SSE2 ? 2 : 1;
+ }
+
+ return 0;
+}
+
+/* Return true if EAX is live at the start of the function. Used by
+ ix86_expand_prologue to determine if we need special help before
+ calling allocate_stack_worker. */
+
+static bool
+ix86_eax_live_at_start_p (void)
+{
+ /* Cheat. Don't bother working forward from ix86_function_regparm
+ to the function type to whether an actual argument is located in
+ eax. Instead just look at cfg info, which is still close enough
+ to correct at this point. This gives false positives for broken
+ functions that might use uninitialized data that happens to be
+ allocated in eax, but who cares? */
+ return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
+}
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the 80386, the RTD insn may be used to pop them if the number
+ of args is fixed, but if the number is variable then the caller
+ must pop them all. RTD can't be used for library calls now
+ because the library is compiled with the Unix compiler.
+ Use of RTD is a selectable option, since it is incompatible with
+ standard Unix calling sequences. If the option is not selected,
+ the caller must always pop the args.
+
+ The attribute stdcall is equivalent to RTD on a per module basis. */
+
+int
+ix86_return_pops_args (tree fundecl, tree funtype, int size)
+{
+ int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
+
+ /* Cdecl functions override -mrtd, and never pop the stack. */
+ if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
+
+ /* Stdcall and fastcall functions will pop the stack if not
+ variable args. */
+ if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
+ || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
+ rtd = 1;
+
+ if (rtd
+ && (TYPE_ARG_TYPES (funtype) == NULL_TREE
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
+ == void_type_node)))
+ return size;
+ }
+
+ /* Lose any fake structure return argument if it is passed on the stack. */
+ if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
+ && !TARGET_64BIT
+ && !KEEP_AGGREGATE_RETURN_POINTER)
+ {
+ int nregs = ix86_function_regparm (funtype, fundecl);
+
+ if (!nregs)
+ return GET_MODE_SIZE (Pmode);
+ }
+
+ return 0;
+}
+
+/* Argument support functions. */
+
+/* Return true when register may be used to pass function parameters. */
+bool
+ix86_function_arg_regno_p (int regno)
+{
+ int i;
+ if (!TARGET_64BIT)
+ {
+ if (TARGET_MACHO)
+ return (regno < REGPARM_MAX
+ || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
+ else
+ return (regno < REGPARM_MAX
+ || (TARGET_MMX && MMX_REGNO_P (regno)
+ && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
+ || (TARGET_SSE && SSE_REGNO_P (regno)
+ && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
+ }
+
+ if (TARGET_MACHO)
+ {
+ if (SSE_REGNO_P (regno) && TARGET_SSE)
+ return true;
+ }
+ else
+ {
+ if (TARGET_SSE && SSE_REGNO_P (regno)
+ && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
+ return true;
+ }
+ /* RAX is used as hidden argument to va_arg functions. */
+ if (!regno)
+ return true;
+ for (i = 0; i < REGPARM_MAX; i++)
+ if (regno == x86_64_int_parameter_registers[i])
+ return true;
+ return false;
+}
+
+/* Return if we do not know how to pass TYPE solely in registers. */
+
+static bool
+ix86_must_pass_in_stack (enum machine_mode mode, tree type)
+{
+ if (must_pass_in_stack_var_size_or_pad (mode, type))
+ return true;
+
+ /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
+ The layout_type routine is crafty and tries to trick us into passing
+ currently unsupported vector types on the stack by using TImode. */
+ return (!TARGET_64BIT && mode == TImode
+ && type && TREE_CODE (type) != VECTOR_TYPE);
+}
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+void
+init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
+ tree fntype, /* tree ptr for function decl */
+ rtx libname, /* SYMBOL_REF of library name or 0 */
+ tree fndecl)
+{
+ static CUMULATIVE_ARGS zero_cum;
+ tree param, next_param;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "\ninit_cumulative_args (");
+ if (fntype)
+ fprintf (stderr, "fntype code = %s, ret code = %s",
+ tree_code_name[(int) TREE_CODE (fntype)],
+ tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
+ else
+ fprintf (stderr, "no fntype");
+
+ if (libname)
+ fprintf (stderr, ", libname = %s", XSTR (libname, 0));
+ }
+
+ *cum = zero_cum;
+
+ /* Set up the number of registers to use for passing arguments. */
+ cum->nregs = ix86_regparm;
+ if (TARGET_SSE)
+ cum->sse_nregs = SSE_REGPARM_MAX;
+ if (TARGET_MMX)
+ cum->mmx_nregs = MMX_REGPARM_MAX;
+ cum->warn_sse = true;
+ cum->warn_mmx = true;
+ cum->maybe_vaarg = false;
+
+ /* Use ecx and edx registers if function has fastcall attribute,
+ else look for regparm information. */
+ if (fntype && !TARGET_64BIT)
+ {
+ if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
+ {
+ cum->nregs = 2;
+ cum->fastcall = 1;
+ }
+ else
+ cum->nregs = ix86_function_regparm (fntype, fndecl);
+ }
+
+ /* Set up the number of SSE registers used for passing SFmode
+ and DFmode arguments. Warn for mismatching ABI. */
+ cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
+
+ /* Determine if this function has variable arguments. This is
+ indicated by the last argument being 'void_type_mode' if there
+ are no variable arguments. If there are variable arguments, then
+ we won't pass anything in registers in 32-bit mode. */
+
+ if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
+ {
+ for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
+ param != 0; param = next_param)
+ {
+ next_param = TREE_CHAIN (param);
+ if (next_param == 0 && TREE_VALUE (param) != void_type_node)
+ {
+ if (!TARGET_64BIT)
+ {
+ cum->nregs = 0;
+ cum->sse_nregs = 0;
+ cum->mmx_nregs = 0;
+ cum->warn_sse = 0;
+ cum->warn_mmx = 0;
+ cum->fastcall = 0;
+ cum->float_in_sse = 0;
+ }
+ cum->maybe_vaarg = true;
+ }
+ }
+ }
+ if ((!fntype && !libname)
+ || (fntype && !TYPE_ARG_TYPES (fntype)))
+ cum->maybe_vaarg = true;
+
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, ", nregs=%d )\n", cum->nregs);
+
+ return;
+}
+
+/* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
+ But in the case of vector types, it is some vector mode.
+
+ When we have only some of our vector isa extensions enabled, then there
+ are some modes for which vector_mode_supported_p is false. For these
+ modes, the generic vector support in gcc will choose some non-vector mode
+ in order to implement the type. By computing the natural mode, we'll
+ select the proper ABI location for the operand and not depend on whatever
+ the middle-end decides to do with these vector types. */
+
+static enum machine_mode
+type_natural_mode (tree type)
+{
+ enum machine_mode mode = TYPE_MODE (type);
+
+ if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ if ((size == 8 || size == 16)
+ /* ??? Generic code allows us to create width 1 vectors. Ignore. */
+ && TYPE_VECTOR_SUBPARTS (type) > 1)
+ {
+ enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
+
+ if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
+ mode = MIN_MODE_VECTOR_FLOAT;
+ else
+ mode = MIN_MODE_VECTOR_INT;
+
+ /* Get the mode which has this inner mode and number of units. */
+ for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
+ && GET_MODE_INNER (mode) == innermode)
+ return mode;
+
+ gcc_unreachable ();
+ }
+ }
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ /* Pass V1DImode objects as DImode. This is for compatibility. */
+ if (TREE_CODE (type) == VECTOR_TYPE && mode == V1DImode && !TARGET_64BIT)
+ return DImode;
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+ return mode;
+}
+
+/* We want to pass a value in REGNO whose "natural" mode is MODE. However,
+ this may not agree with the mode that the type system has chosen for the
+ register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
+ go ahead and use it. Otherwise we have to build a PARALLEL instead. */
+
+static rtx
+gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
+ unsigned int regno)
+{
+ rtx tmp;
+
+ if (orig_mode != BLKmode)
+ tmp = gen_rtx_REG (orig_mode, regno);
+ else
+ {
+ tmp = gen_rtx_REG (mode, regno);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
+ tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
+ }
+
+ return tmp;
+}
+
+/* x86-64 register passing implementation. See x86-64 ABI for details. Goal
+ of this code is to classify each 8bytes of incoming argument by the register
+ class and assign registers accordingly. */
+
+/* Return the union class of CLASS1 and CLASS2.
+ See the x86-64 PS ABI for details. */
+
+static enum x86_64_reg_class
+merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
+{
+ /* Rule #1: If both classes are equal, this is the resulting class. */
+ if (class1 == class2)
+ return class1;
+
+ /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
+ the other class. */
+ if (class1 == X86_64_NO_CLASS)
+ return class2;
+ if (class2 == X86_64_NO_CLASS)
+ return class1;
+
+ /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
+ if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
+ return X86_64_MEMORY_CLASS;
+
+ /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
+ if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
+ || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
+ return X86_64_INTEGERSI_CLASS;
+ if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
+ || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
+ return X86_64_INTEGER_CLASS;
+
+ /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
+ MEMORY is used. */
+ if (class1 == X86_64_X87_CLASS
+ || class1 == X86_64_X87UP_CLASS
+ || class1 == X86_64_COMPLEX_X87_CLASS
+ || class2 == X86_64_X87_CLASS
+ || class2 == X86_64_X87UP_CLASS
+ || class2 == X86_64_COMPLEX_X87_CLASS)
+ return X86_64_MEMORY_CLASS;
+
+ /* Rule #6: Otherwise class SSE is used. */
+ return X86_64_SSE_CLASS;
+}
+
+/* Classify the argument of type TYPE and mode MODE.
+ CLASSES will be filled by the register class used to pass each word
+ of the operand. The number of words is returned. In case the parameter
+ should be passed in memory, 0 is returned. As a special case for zero
+ sized containers, classes[0] will be NO_CLASS and 1 is returned.
+
+ BIT_OFFSET is used internally for handling records and specifies offset
+ of the offset in bits modulo 256 to avoid overflow cases.
+
+ See the x86-64 PS ABI for details.
+*/
+
+static int
+classify_argument (enum machine_mode mode, tree type,
+ enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
+{
+ HOST_WIDE_INT bytes =
+ (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+ int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ /* Variable sized entities are always passed/returned in memory. */
+ if (bytes < 0)
+ return 0;
+
+ if (mode != VOIDmode
+ && targetm.calls.must_pass_in_stack (mode, type))
+ return 0;
+
+ if (type && AGGREGATE_TYPE_P (type))
+ {
+ int i;
+ tree field;
+ enum x86_64_reg_class subclasses[MAX_CLASSES];
+
+ /* On x86-64 we pass structures larger than 16 bytes on the stack. */
+ if (bytes > 16)
+ return 0;
+
+ for (i = 0; i < words; i++)
+ classes[i] = X86_64_NO_CLASS;
+
+ /* Zero sized arrays or structures are NO_CLASS. We return 0 to
+ signalize memory class, so handle it as special case. */
+ if (!words)
+ {
+ classes[0] = X86_64_NO_CLASS;
+ return 1;
+ }
+
+ /* Classify each field of record and merge classes. */
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ /* For classes first merge in the field of the subclasses. */
+ if (TYPE_BINFO (type))
+ {
+ tree binfo, base_binfo;
+ int basenum;
+
+ for (binfo = TYPE_BINFO (type), basenum = 0;
+ BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
+ {
+ int num;
+ int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
+ tree type = BINFO_TYPE (base_binfo);
+
+ num = classify_argument (TYPE_MODE (type),
+ type, subclasses,
+ (offset + bit_offset) % 256);
+ if (!num)
+ return 0;
+ for (i = 0; i < num; i++)
+ {
+ int pos = (offset + (bit_offset % 64)) / 8 / 8;
+ classes[i + pos] =
+ merge_classes (subclasses[i], classes[i + pos]);
+ }
+ }
+ }
+ /* And now merge the fields of structure. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ int num;
+
+ if (TREE_TYPE (field) == error_mark_node)
+ continue;
+
+ /* Bitfields are always classified as integer. Handle them
+ early, since later code would consider them to be
+ misaligned integers. */
+ if (DECL_BIT_FIELD (field))
+ {
+ for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
+ i < ((int_bit_position (field) + (bit_offset % 64))
+ + tree_low_cst (DECL_SIZE (field), 0)
+ + 63) / 8 / 8; i++)
+ classes[i] =
+ merge_classes (X86_64_INTEGER_CLASS,
+ classes[i]);
+ }
+ else
+ {
+ num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
+ TREE_TYPE (field), subclasses,
+ (int_bit_position (field)
+ + bit_offset) % 256);
+ if (!num)
+ return 0;
+ for (i = 0; i < num; i++)
+ {
+ int pos =
+ (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
+ classes[i + pos] =
+ merge_classes (subclasses[i], classes[i + pos]);
+ }
+ }
+ }
+ }
+ break;
+
+ case ARRAY_TYPE:
+ /* Arrays are handled as small records. */
+ {
+ int num;
+ num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
+ TREE_TYPE (type), subclasses, bit_offset);
+ if (!num)
+ return 0;
+
+ /* The partial classes are now full classes. */
+ if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
+ subclasses[0] = X86_64_SSE_CLASS;
+ if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
+ subclasses[0] = X86_64_INTEGER_CLASS;
+
+ for (i = 0; i < words; i++)
+ classes[i] = subclasses[i % num];
+
+ break;
+ }
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ /* Unions are similar to RECORD_TYPE but offset is always 0.
+ */
+
+ /* Unions are not derived. */
+ gcc_assert (!TYPE_BINFO (type)
+ || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ int num;
+
+ if (TREE_TYPE (field) == error_mark_node)
+ continue;
+
+ num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
+ TREE_TYPE (field), subclasses,
+ bit_offset);
+ if (!num)
+ return 0;
+ for (i = 0; i < num; i++)
+ classes[i] = merge_classes (subclasses[i], classes[i]);
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Final merger cleanup. */
+ for (i = 0; i < words; i++)
+ {
+ /* If one class is MEMORY, everything should be passed in
+ memory. */
+ if (classes[i] == X86_64_MEMORY_CLASS)
+ return 0;
+
+ /* The X86_64_SSEUP_CLASS should be always preceded by
+ X86_64_SSE_CLASS. */
+ if (classes[i] == X86_64_SSEUP_CLASS
+ && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
+ classes[i] = X86_64_SSE_CLASS;
+
+ /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
+ if (classes[i] == X86_64_X87UP_CLASS
+ && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
+ classes[i] = X86_64_SSE_CLASS;
+ }
+ return words;
+ }
+
+ /* Compute alignment needed. We align all types to natural boundaries with
+ exception of XFmode that is aligned to 64bits. */
+ if (mode != VOIDmode && mode != BLKmode)
+ {
+ int mode_alignment = GET_MODE_BITSIZE (mode);
+
+ if (mode == XFmode)
+ mode_alignment = 128;
+ else if (mode == XCmode)
+ mode_alignment = 256;
+ if (COMPLEX_MODE_P (mode))
+ mode_alignment /= 2;
+ /* Misaligned fields are always returned in memory. */
+ if (bit_offset % mode_alignment)
+ return 0;
+ }
+
+ /* for V1xx modes, just use the base mode */
+ if (VECTOR_MODE_P (mode)
+ && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
+ mode = GET_MODE_INNER (mode);
+
+ /* Classification of atomic types. */
+ switch (mode)
+ {
+ case SDmode:
+ case DDmode:
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case TDmode:
+ classes[0] = X86_64_SSE_CLASS;
+ classes[1] = X86_64_SSEUP_CLASS;
+ return 2;
+ case DImode:
+ case SImode:
+ case HImode:
+ case QImode:
+ case CSImode:
+ case CHImode:
+ case CQImode:
+ if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
+ classes[0] = X86_64_INTEGERSI_CLASS;
+ else
+ classes[0] = X86_64_INTEGER_CLASS;
+ return 1;
+ case CDImode:
+ case TImode:
+ classes[0] = classes[1] = X86_64_INTEGER_CLASS;
+ return 2;
+ case CTImode:
+ return 0;
+ case SFmode:
+ if (!(bit_offset % 64))
+ classes[0] = X86_64_SSESF_CLASS;
+ else
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case DFmode:
+ classes[0] = X86_64_SSEDF_CLASS;
+ return 1;
+ case XFmode:
+ classes[0] = X86_64_X87_CLASS;
+ classes[1] = X86_64_X87UP_CLASS;
+ return 2;
+ case TFmode:
+ classes[0] = X86_64_SSE_CLASS;
+ classes[1] = X86_64_SSEUP_CLASS;
+ return 2;
+ case SCmode:
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case DCmode:
+ classes[0] = X86_64_SSEDF_CLASS;
+ classes[1] = X86_64_SSEDF_CLASS;
+ return 2;
+ case XCmode:
+ classes[0] = X86_64_COMPLEX_X87_CLASS;
+ return 1;
+ case TCmode:
+ /* This modes is larger than 16 bytes. */
+ return 0;
+ case V4SFmode:
+ case V4SImode:
+ case V16QImode:
+ case V8HImode:
+ case V2DFmode:
+ case V2DImode:
+ classes[0] = X86_64_SSE_CLASS;
+ classes[1] = X86_64_SSEUP_CLASS;
+ return 2;
+ case V2SFmode:
+ case V2SImode:
+ case V4HImode:
+ case V8QImode:
+ classes[0] = X86_64_SSE_CLASS;
+ return 1;
+ case BLKmode:
+ case VOIDmode:
+ return 0;
+ default:
+ gcc_assert (VECTOR_MODE_P (mode));
+
+ if (bytes > 16)
+ return 0;
+
+ gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
+
+ if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
+ classes[0] = X86_64_INTEGERSI_CLASS;
+ else
+ classes[0] = X86_64_INTEGER_CLASS;
+ classes[1] = X86_64_INTEGER_CLASS;
+ return 1 + (bytes > 8);
+ }
+}
+
+/* Examine the argument and return set number of register required in each
+ class. Return 0 iff parameter should be passed in memory. */
+static int
+examine_argument (enum machine_mode mode, tree type, int in_return,
+ int *int_nregs, int *sse_nregs)
+{
+ enum x86_64_reg_class class[MAX_CLASSES];
+ int n = classify_argument (mode, type, class, 0);
+
+ *int_nregs = 0;
+ *sse_nregs = 0;
+ if (!n)
+ return 0;
+ for (n--; n >= 0; n--)
+ switch (class[n])
+ {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ (*int_nregs)++;
+ break;
+ case X86_64_SSE_CLASS:
+ case X86_64_SSESF_CLASS:
+ case X86_64_SSEDF_CLASS:
+ (*sse_nregs)++;
+ break;
+ case X86_64_NO_CLASS:
+ case X86_64_SSEUP_CLASS:
+ break;
+ case X86_64_X87_CLASS:
+ case X86_64_X87UP_CLASS:
+ if (!in_return)
+ return 0;
+ break;
+ case X86_64_COMPLEX_X87_CLASS:
+ return in_return ? 2 : 0;
+ case X86_64_MEMORY_CLASS:
+ gcc_unreachable ();
+ }
+ return 1;
+}
+
+/* Construct container for the argument used by GCC interface. See
+ FUNCTION_ARG for the detailed description. */
+
+static rtx
+construct_container (enum machine_mode mode, enum machine_mode orig_mode,
+ tree type, int in_return, int nintregs, int nsseregs,
+ const int *intreg, int sse_regno)
+{
+ /* The following variables hold the static issued_error state. */
+ static bool issued_sse_arg_error;
+ static bool issued_sse_ret_error;
+ static bool issued_x87_ret_error;
+
+ enum machine_mode tmpmode;
+ int bytes =
+ (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+ enum x86_64_reg_class class[MAX_CLASSES];
+ int n;
+ int i;
+ int nexps = 0;
+ int needed_sseregs, needed_intregs;
+ rtx exp[MAX_CLASSES];
+ rtx ret;
+
+ n = classify_argument (mode, type, class, 0);
+ if (TARGET_DEBUG_ARG)
+ {
+ if (!n)
+ fprintf (stderr, "Memory class\n");
+ else
+ {
+ fprintf (stderr, "Classes:");
+ for (i = 0; i < n; i++)
+ {
+ fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
+ }
+ fprintf (stderr, "\n");
+ }
+ }
+ if (!n)
+ return NULL;
+ if (!examine_argument (mode, type, in_return, &needed_intregs,
+ &needed_sseregs))
+ return NULL;
+ if (needed_intregs > nintregs || needed_sseregs > nsseregs)
+ return NULL;
+
+ /* We allowed the user to turn off SSE for kernel mode. Don't crash if
+ some less clueful developer tries to use floating-point anyway. */
+ if (needed_sseregs && !TARGET_SSE)
+ {
+ if (in_return)
+ {
+ if (!issued_sse_ret_error)
+ {
+ error ("SSE register return with SSE disabled");
+ issued_sse_ret_error = true;
+ }
+ }
+ else if (!issued_sse_arg_error)
+ {
+ error ("SSE register argument with SSE disabled");
+ issued_sse_arg_error = true;
+ }
+ return NULL;
+ }
+
+ /* Likewise, error if the ABI requires us to return values in the
+ x87 registers and the user specified -mno-80387. */
+ if (!TARGET_80387 && in_return)
+ for (i = 0; i < n; i++)
+ if (class[i] == X86_64_X87_CLASS
+ || class[i] == X86_64_X87UP_CLASS
+ || class[i] == X86_64_COMPLEX_X87_CLASS)
+ {
+ if (!issued_x87_ret_error)
+ {
+ error ("x87 register return with x87 disabled");
+ issued_x87_ret_error = true;
+ }
+ return NULL;
+ }
+
+ /* First construct simple cases. Avoid SCmode, since we want to use
+ single register to pass this type. */
+ if (n == 1 && mode != SCmode)
+ switch (class[0])
+ {
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ return gen_rtx_REG (mode, intreg[0]);
+ case X86_64_SSE_CLASS:
+ case X86_64_SSESF_CLASS:
+ case X86_64_SSEDF_CLASS:
+ return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
+ case X86_64_X87_CLASS:
+ case X86_64_COMPLEX_X87_CLASS:
+ return gen_rtx_REG (mode, FIRST_STACK_REG);
+ case X86_64_NO_CLASS:
+ /* Zero sized array, struct or class. */
+ return NULL;
+ default:
+ gcc_unreachable ();
+ }
+ if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
+ && mode != BLKmode)
+ return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
+ if (n == 2
+ && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
+ return gen_rtx_REG (XFmode, FIRST_STACK_REG);
+ if (n == 2 && class[0] == X86_64_INTEGER_CLASS
+ && class[1] == X86_64_INTEGER_CLASS
+ && (mode == CDImode || mode == TImode || mode == TFmode)
+ && intreg[0] + 1 == intreg[1])
+ return gen_rtx_REG (mode, intreg[0]);
+
+ /* Otherwise figure out the entries of the PARALLEL. */
+ for (i = 0; i < n; i++)
+ {
+ switch (class[i])
+ {
+ case X86_64_NO_CLASS:
+ break;
+ case X86_64_INTEGER_CLASS:
+ case X86_64_INTEGERSI_CLASS:
+ /* Merge TImodes on aligned occasions here too. */
+ if (i * 8 + 8 > bytes)
+ tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
+ else if (class[i] == X86_64_INTEGERSI_CLASS)
+ tmpmode = SImode;
+ else
+ tmpmode = DImode;
+ /* We've requested 24 bytes we don't have mode for. Use DImode. */
+ if (tmpmode == BLKmode)
+ tmpmode = DImode;
+ exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (tmpmode, *intreg),
+ GEN_INT (i*8));
+ intreg++;
+ break;
+ case X86_64_SSESF_CLASS:
+ exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SFmode,
+ SSE_REGNO (sse_regno)),
+ GEN_INT (i*8));
+ sse_regno++;
+ break;
+ case X86_64_SSEDF_CLASS:
+ exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (DFmode,
+ SSE_REGNO (sse_regno)),
+ GEN_INT (i*8));
+ sse_regno++;
+ break;
+ case X86_64_SSE_CLASS:
+ if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
+ tmpmode = TImode;
+ else
+ tmpmode = DImode;
+ exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (tmpmode,
+ SSE_REGNO (sse_regno)),
+ GEN_INT (i*8));
+ if (tmpmode == TImode)
+ i++;
+ sse_regno++;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* Empty aligned struct, union or class. */
+ if (nexps == 0)
+ return NULL;
+
+ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
+ for (i = 0; i < nexps; i++)
+ XVECEXP (ret, 0, i) = exp [i];
+ return ret;
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+void
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named)
+{
+ int bytes =
+ (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+ int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if (type)
+ mode = type_natural_mode (type);
+
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
+ "mode=%s, named=%d)\n\n",
+ words, cum->words, cum->nregs, cum->sse_nregs,
+ GET_MODE_NAME (mode), named);
+
+ if (TARGET_64BIT)
+ {
+ int int_nregs, sse_nregs;
+ if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
+ cum->words += words;
+ else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
+ {
+ cum->nregs -= int_nregs;
+ cum->sse_nregs -= sse_nregs;
+ cum->regno += int_nregs;
+ cum->sse_regno += sse_nregs;
+ }
+ else
+ cum->words += words;
+ }
+ else
+ {
+ switch (mode)
+ {
+ default:
+ break;
+
+ case BLKmode:
+ if (bytes < 0)
+ break;
+ /* FALLTHRU */
+
+ case DImode:
+ case SImode:
+ case HImode:
+ case QImode:
+ cum->words += words;
+ cum->nregs -= words;
+ cum->regno += words;
+
+ if (cum->nregs <= 0)
+ {
+ cum->nregs = 0;
+ cum->regno = 0;
+ }
+ break;
+
+ case DFmode:
+ if (cum->float_in_sse < 2)
+ break;
+ case SFmode:
+ if (cum->float_in_sse < 1)
+ break;
+ /* FALLTHRU */
+
+ case TImode:
+ case V16QImode:
+ case V8HImode:
+ case V4SImode:
+ case V2DImode:
+ case V4SFmode:
+ case V2DFmode:
+ if (!type || !AGGREGATE_TYPE_P (type))
+ {
+ cum->sse_words += words;
+ cum->sse_nregs -= 1;
+ cum->sse_regno += 1;
+ if (cum->sse_nregs <= 0)
+ {
+ cum->sse_nregs = 0;
+ cum->sse_regno = 0;
+ }
+ }
+ break;
+
+ case V8QImode:
+ case V4HImode:
+ case V2SImode:
+ case V2SFmode:
+ if (!type || !AGGREGATE_TYPE_P (type))
+ {
+ cum->mmx_words += words;
+ cum->mmx_nregs -= 1;
+ cum->mmx_regno += 1;
+ if (cum->mmx_nregs <= 0)
+ {
+ cum->mmx_nregs = 0;
+ cum->mmx_regno = 0;
+ }
+ }
+ break;
+ }
+ }
+}
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+rtx
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
+ tree type, int named)
+{
+ enum machine_mode mode = orig_mode;
+ rtx ret = NULL_RTX;
+ int bytes =
+ (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+ int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ static bool warnedsse, warnedmmx;
+
+ /* To simplify the code below, represent vector types with a vector mode
+ even if MMX/SSE are not active. */
+ if (type && TREE_CODE (type) == VECTOR_TYPE)
+ mode = type_natural_mode (type);
+
+ /* Handle a hidden AL argument containing number of registers for varargs
+ x86-64 functions. For i386 ABI just return constm1_rtx to avoid
+ any AL settings. */
+ if (mode == VOIDmode)
+ {
+ if (TARGET_64BIT)
+ return GEN_INT (cum->maybe_vaarg
+ ? (cum->sse_nregs < 0
+ ? SSE_REGPARM_MAX
+ : cum->sse_regno)
+ : -1);
+ else
+ return constm1_rtx;
+ }
+ if (TARGET_64BIT)
+ ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
+ cum->sse_nregs,
+ &x86_64_int_parameter_registers [cum->regno],
+ cum->sse_regno);
+ else
+ switch (mode)
+ {
+ /* For now, pass fp/complex values on the stack. */
+ default:
+ break;
+
+ case BLKmode:
+ if (bytes < 0)
+ break;
+ /* FALLTHRU */
+ case DImode:
+ case SImode:
+ case HImode:
+ case QImode:
+ if (words <= cum->nregs)
+ {
+ int regno = cum->regno;
+
+ /* Fastcall allocates the first two DWORD (SImode) or
+ smaller arguments to ECX and EDX. */
+ if (cum->fastcall)
+ {
+ if (mode == BLKmode || mode == DImode)
+ break;
+
+ /* ECX not EAX is the first allocated register. */
+ if (regno == 0)
+ regno = 2;
+ }
+ ret = gen_rtx_REG (mode, regno);
+ }
+ break;
+ case DFmode:
+ if (cum->float_in_sse < 2)
+ break;
+ case SFmode:
+ if (cum->float_in_sse < 1)
+ break;
+ /* FALLTHRU */
+ case TImode:
+ case V16QImode:
+ case V8HImode:
+ case V4SImode:
+ case V2DImode:
+ case V4SFmode:
+ case V2DFmode:
+ if (!type || !AGGREGATE_TYPE_P (type))
+ {
+ if (!TARGET_SSE && !warnedsse && cum->warn_sse)
+ {
+ warnedsse = true;
+ warning (0, "SSE vector argument without SSE enabled "
+ "changes the ABI");
+ }
+ if (cum->sse_nregs)
+ ret = gen_reg_or_parallel (mode, orig_mode,
+ cum->sse_regno + FIRST_SSE_REG);
+ }
+ break;
+ case V8QImode:
+ case V4HImode:
+ case V2SImode:
+ case V2SFmode:
+ if (!type || !AGGREGATE_TYPE_P (type))
+ {
+ if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
+ {
+ warnedmmx = true;
+ warning (0, "MMX vector argument without MMX enabled "
+ "changes the ABI");
+ }
+ if (cum->mmx_nregs)
+ ret = gen_reg_or_parallel (mode, orig_mode,
+ cum->mmx_regno + FIRST_MMX_REG);
+ }
+ break;
+ }
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr,
+ "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
+ words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
+
+ if (ret)
+ print_simple_rtl (stderr, ret);
+ else
+ fprintf (stderr, ", stack");
+
+ fprintf (stderr, " )\n");
+ }
+
+ return ret;
+}
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type. */
+
+static bool
+ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ if (!TARGET_64BIT)
+ return 0;
+
+ if (type && int_size_in_bytes (type) == -1)
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return true when TYPE should be 128bit aligned for 32bit argument passing
+ ABI. Only called if TARGET_SSE. */
+static bool
+contains_128bit_aligned_vector_p (tree type)
+{
+ enum machine_mode mode = TYPE_MODE (type);
+ if (SSE_REG_MODE_P (mode)
+ && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
+ return true;
+ if (TYPE_ALIGN (type) < 128)
+ return false;
+
+ if (AGGREGATE_TYPE_P (type))
+ {
+ /* Walk the aggregates recursively. */
+ switch (TREE_CODE (type))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree field;
+
+ if (TYPE_BINFO (type))
+ {
+ tree binfo, base_binfo;
+ int i;
+
+ for (binfo = TYPE_BINFO (type), i = 0;
+ BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
+ if (contains_128bit_aligned_vector_p
+ (BINFO_TYPE (base_binfo)))
+ return true;
+ }
+ /* And now merge the fields of structure. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL
+ && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
+ return true;
+ }
+ break;
+ }
+
+ case ARRAY_TYPE:
+ /* Just for use if some languages passes arrays by value. */
+ if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
+ return true;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ return false;
+}
+
+/* Gives the alignment boundary, in bits, of an argument with the
+ specified mode and type. */
+
+int
+ix86_function_arg_boundary (enum machine_mode mode, tree type)
+{
+ int align;
+ if (type)
+ align = TYPE_ALIGN (type);
+ else
+ align = GET_MODE_ALIGNMENT (mode);
+ /* APPLE LOCAL begin unbreak ppc64 abi 5103220 */
+ if (type && integer_zerop (TYPE_SIZE (type)))
+ align = PARM_BOUNDARY;
+ /* APPLE LOCAL end unbreak ppc64 abi 5103220 */
+ if (align < PARM_BOUNDARY)
+ align = PARM_BOUNDARY;
+ if (!TARGET_64BIT)
+ {
+ /* i386 ABI defines all arguments to be 4 byte aligned. We have to
+ make an exception for SSE modes since these require 128bit
+ alignment.
+
+ The handling here differs from field_alignment. ICC aligns MMX
+ arguments to 4 byte boundaries, while structure fields are aligned
+ to 8 byte boundaries. */
+ if (!TARGET_SSE)
+ align = PARM_BOUNDARY;
+ else if (!type)
+ {
+ if (!SSE_REG_MODE_P (mode))
+ align = PARM_BOUNDARY;
+ }
+ else
+ {
+ if (!contains_128bit_aligned_vector_p (type))
+ align = PARM_BOUNDARY;
+ }
+ }
+ if (align > 128)
+ align = 128;
+ return align;
+}
+
+/* Return true if N is a possible register number of function value. */
+bool
+ix86_function_value_regno_p (int regno)
+{
+ if (TARGET_MACHO)
+ {
+ if (!TARGET_64BIT)
+ {
+ return ((regno) == 0
+ || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
+ || ((regno) == FIRST_SSE_REG && TARGET_SSE));
+ }
+ return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
+ || ((regno) == FIRST_SSE_REG && TARGET_SSE)
+ || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
+ }
+ else
+ {
+ if (regno == 0
+ || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
+ || (regno == FIRST_SSE_REG && TARGET_SSE))
+ return true;
+
+ if (!TARGET_64BIT
+ && (regno == FIRST_MMX_REG && TARGET_MMX))
+ return true;
+
+ return false;
+ }
+}
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+rtx
+ix86_function_value (tree valtype, tree fntype_or_decl,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ enum machine_mode natmode = type_natural_mode (valtype);
+
+ if (TARGET_64BIT)
+ {
+ rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
+ 1, REGPARM_MAX, SSE_REGPARM_MAX,
+ x86_64_int_return_registers, 0);
+ /* For zero sized structures, construct_container return NULL, but we
+ need to keep rest of compiler happy by returning meaningful value. */
+ if (!ret)
+ ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
+ return ret;
+ }
+ else
+ {
+ tree fn = NULL_TREE, fntype;
+ if (fntype_or_decl
+ && DECL_P (fntype_or_decl))
+ fn = fntype_or_decl;
+ fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
+ return gen_rtx_REG (TYPE_MODE (valtype),
+ ix86_value_regno (natmode, fn, fntype));
+ }
+}
+
+/* APPLE LOCAL begin radar 4781080 */
+/* Return true iff must generate call to objcMsgSend for an
+ fp-returning method. */
+bool
+ix86_objc_fpreturn_msgcall (tree ret_type, bool no_long_double)
+{
+ if (no_long_double)
+ return TARGET_64BIT && SCALAR_FLOAT_TYPE_P (ret_type)
+ && TYPE_MODE (ret_type) != XFmode;
+ else
+ return SCALAR_FLOAT_TYPE_P (ret_type);
+}
+/* APPLE LOCAL end radar 4781080 */
+
+/* Return true iff type is returned in memory. */
+int
+ix86_return_in_memory (tree type)
+{
+ int needed_intregs, needed_sseregs, size;
+ enum machine_mode mode = type_natural_mode (type);
+
+ if (TARGET_64BIT)
+ return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
+
+ if (mode == BLKmode)
+ return 1;
+
+ size = int_size_in_bytes (type);
+
+ if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
+ return 0;
+
+ if (VECTOR_MODE_P (mode) || mode == TImode)
+ {
+ /* User-created vectors small enough to fit in EAX. */
+ if (size < 8)
+ return 0;
+
+ /* MMX/3dNow values are returned in MM0,
+ except when it doesn't exits. */
+ if (size == 8)
+ /* APPLE LOCAL begin radar 4875125. */
+ /* Undo the mainline patch which broke MACHO ABI compatibility. */
+ return (TARGET_MACHO) ? 1 : (TARGET_MMX ? 0 : 1);
+ /* APPLE LOCAL end radar 4875125. */
+
+ /* SSE values are returned in XMM0, except when it doesn't exist. */
+ if (size == 16)
+ return (TARGET_SSE ? 0 : 1);
+ }
+
+ if (mode == XFmode)
+ return 0;
+
+ if (mode == TDmode)
+ return 1;
+
+ if (size > 12)
+ return 1;
+ return 0;
+}
+
+/* When returning SSE vector types, we have a choice of either
+ (1) being abi incompatible with a -march switch, or
+ (2) generating an error.
+ Given no good solution, I think the safest thing is one warning.
+ The user won't be able to use -Werror, but....
+
+ Choose the STRUCT_VALUE_RTX hook because that's (at present) only
+ called in response to actually generating a caller or callee that
+ uses such a type. As opposed to RETURN_IN_MEMORY, which is called
+ via aggregate_value_p for general type probing from tree-ssa. */
+
+static rtx
+ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
+{
+ static bool warnedsse, warnedmmx;
+
+ if (type)
+ {
+ /* Look at the return type of the function, not the function type. */
+ enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
+
+ if (!TARGET_SSE && !warnedsse)
+ {
+ if (mode == TImode
+ || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
+ {
+ warnedsse = true;
+ warning (0, "SSE vector return without SSE enabled "
+ "changes the ABI");
+ }
+ }
+
+ if (!TARGET_MMX && !warnedmmx)
+ {
+ if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
+ {
+ warnedmmx = true;
+ warning (0, "MMX vector return without MMX enabled "
+ "changes the ABI");
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+rtx
+ix86_libcall_value (enum machine_mode mode)
+{
+ if (TARGET_64BIT)
+ {
+ switch (mode)
+ {
+ case SFmode:
+ case SCmode:
+ case DFmode:
+ case DCmode:
+ case TFmode:
+ case SDmode:
+ case DDmode:
+ case TDmode:
+ return gen_rtx_REG (mode, FIRST_SSE_REG);
+ case XFmode:
+ case XCmode:
+ return gen_rtx_REG (mode, FIRST_FLOAT_REG);
+ case TCmode:
+ return NULL;
+ default:
+ return gen_rtx_REG (mode, 0);
+ }
+ }
+ else
+ return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
+}
+
+/* Given a mode, return the register to use for a return value. */
+
+static int
+ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
+{
+ gcc_assert (!TARGET_64BIT);
+
+ /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
+ we normally prevent this case when mmx is not available. However
+ some ABIs may require the result to be returned like DImode. */
+ if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
+ return TARGET_MMX ? FIRST_MMX_REG : 0;
+
+ /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
+ we prevent this case when sse is not available. However some ABIs
+ may require the result to be returned like integer TImode. */
+ if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
+ return TARGET_SSE ? FIRST_SSE_REG : 0;
+
+ /* Decimal floating point values can go in %eax, unlike other float modes. */
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return 0;
+
+ /* APPLE LOCAL begin regparmandstackparm */
+ if (SSE_FLOAT_MODE_P(mode)
+ && fntype && lookup_attribute ("regparmandstackparmee", TYPE_ATTRIBUTES (fntype)))
+ return FIRST_SSE_REG;
+ /* APPLE LOCAL end regparmandstackparm */
+
+ /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
+ if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
+ return 0;
+
+ /* Floating point return values in %st(0), except for local functions when
+ SSE math is enabled or for functions with sseregparm attribute. */
+ if ((func || fntype)
+ && (mode == SFmode || mode == DFmode))
+ {
+ int sse_level = ix86_function_sseregparm (fntype, func);
+ if ((sse_level >= 1 && mode == SFmode)
+ || (sse_level == 2 && mode == DFmode))
+ return FIRST_SSE_REG;
+ }
+
+ return FIRST_FLOAT_REG;
+}
+
+/* Create the va_list data type. */
+
+static tree
+ix86_build_builtin_va_list (void)
+{
+ tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
+
+ /* For i386 we use plain pointer to argument area. */
+ if (!TARGET_64BIT)
+ return build_pointer_type (char_type_node);
+
+ record = (*lang_hooks.types.make_type) (RECORD_TYPE);
+ type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
+ unsigned_type_node);
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
+ unsigned_type_node);
+ f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
+ ptr_type_node);
+ f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
+ ptr_type_node);
+
+ va_list_gpr_counter_field = f_gpr;
+ va_list_fpr_counter_field = f_fpr;
+
+ DECL_FIELD_CONTEXT (f_gpr) = record;
+ DECL_FIELD_CONTEXT (f_fpr) = record;
+ DECL_FIELD_CONTEXT (f_ovf) = record;
+ DECL_FIELD_CONTEXT (f_sav) = record;
+
+ TREE_CHAIN (record) = type_decl;
+ TYPE_NAME (record) = type_decl;
+ TYPE_FIELDS (record) = f_gpr;
+ TREE_CHAIN (f_gpr) = f_fpr;
+ TREE_CHAIN (f_fpr) = f_ovf;
+ TREE_CHAIN (f_ovf) = f_sav;
+
+ layout_type (record);
+
+ /* The correct type is an array type of one element. */
+ return build_array_type (record, build_index_type (size_zero_node));
+}
+
+/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS next_cum;
+ rtx save_area = NULL_RTX, mem;
+ rtx label;
+ rtx label_ref;
+ rtx tmp_reg;
+ rtx nsse_reg;
+ int set;
+ tree fntype;
+ int stdarg_p;
+ int i;
+
+ if (!TARGET_64BIT)
+ return;
+
+ if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
+ return;
+
+ /* Indicate to allocate space on the stack for varargs save area. */
+ ix86_save_varrargs_registers = 1;
+
+ cfun->stack_alignment_needed = 128;
+
+ fntype = TREE_TYPE (current_function_decl);
+ stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ /* For varargs, we do not want to skip the dummy va_dcl argument.
+ For stdargs, we do want to skip the last named argument. */
+ next_cum = *cum;
+ if (stdarg_p)
+ function_arg_advance (&next_cum, mode, type, 1);
+
+ if (!no_rtl)
+ save_area = frame_pointer_rtx;
+
+ set = get_varargs_alias_set ();
+
+ for (i = next_cum.regno;
+ i < ix86_regparm
+ && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
+ i++)
+ {
+ mem = gen_rtx_MEM (Pmode,
+ plus_constant (save_area, i * UNITS_PER_WORD));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+ emit_move_insn (mem, gen_rtx_REG (Pmode,
+ x86_64_int_parameter_registers[i]));
+ }
+
+ if (next_cum.sse_nregs && cfun->va_list_fpr_size)
+ {
+ /* Now emit code to save SSE registers. The AX parameter contains number
+ of SSE parameter registers used to call this function. We use
+ sse_prologue_save insn template that produces computed jump across
+ SSE saves. We need some preparation work to get this working. */
+
+ label = gen_label_rtx ();
+ label_ref = gen_rtx_LABEL_REF (Pmode, label);
+
+ /* Compute address to jump to :
+ label - 5*eax + nnamed_sse_arguments*5 */
+ tmp_reg = gen_reg_rtx (Pmode);
+ nsse_reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
+ emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
+ gen_rtx_MULT (Pmode, nsse_reg,
+ GEN_INT (4))));
+ if (next_cum.sse_regno)
+ emit_move_insn
+ (nsse_reg,
+ gen_rtx_CONST (DImode,
+ gen_rtx_PLUS (DImode,
+ label_ref,
+ GEN_INT (next_cum.sse_regno * 4))));
+ else
+ emit_move_insn (nsse_reg, label_ref);
+ emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
+
+ /* Compute address of memory block we save into. We always use pointer
+ pointing 127 bytes after first byte to store - this is needed to keep
+ instruction size limited by 4 bytes. */
+ tmp_reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
+ plus_constant (save_area,
+ 8 * REGPARM_MAX + 127)));
+ mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+ set_mem_align (mem, BITS_PER_WORD);
+
+ /* And finally do the dirty job! */
+ emit_insn (gen_sse_prologue_save (mem, nsse_reg,
+ GEN_INT (next_cum.sse_regno), label));
+ }
+
+}
+
+/* Implement va_start. */
+
+void
+ix86_va_start (tree valist, rtx nextarg)
+{
+ HOST_WIDE_INT words, n_gpr, n_fpr;
+ tree f_gpr, f_fpr, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, t;
+ tree type;
+
+ /* Only 64bit target needs something special. */
+ if (!TARGET_64BIT)
+ {
+ std_expand_builtin_va_start (valist, nextarg);
+ return;
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_ovf = TREE_CHAIN (f_fpr);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+
+ /* Count number of gp and fp argument registers used. */
+ words = current_function_args_info.words;
+ n_gpr = current_function_args_info.regno;
+ n_fpr = current_function_args_info.sse_regno;
+
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
+ (int) words, (int) n_gpr, (int) n_fpr);
+
+ if (cfun->va_list_gpr_size)
+ {
+ type = TREE_TYPE (gpr);
+ t = build2 (MODIFY_EXPR, type, gpr,
+ build_int_cst (type, n_gpr * 8));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ if (cfun->va_list_fpr_size)
+ {
+ type = TREE_TYPE (fpr);
+ t = build2 (MODIFY_EXPR, type, fpr,
+ build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ /* Find the overflow area. */
+ type = TREE_TYPE (ovf);
+ t = make_tree (type, virtual_incoming_args_rtx);
+ if (words != 0)
+ t = build2 (PLUS_EXPR, type, t,
+ build_int_cst (type, words * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, type, ovf, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
+ {
+ /* Find the register save area.
+ Prologue of the function save it right above stack frame. */
+ type = TREE_TYPE (sav);
+ t = make_tree (type, frame_pointer_rtx);
+ t = build2 (MODIFY_EXPR, type, sav, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+}
+
+/* Implement va_arg. */
+
+tree
+ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+{
+ static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
+ tree f_gpr, f_fpr, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, t;
+ int size, rsize;
+ tree lab_false, lab_over = NULL_TREE;
+ tree addr, t2;
+ rtx container;
+ int indirect_p = 0;
+ tree ptrtype;
+ enum machine_mode nat_mode;
+
+ /* Only 64bit target needs something special. */
+ if (!TARGET_64BIT)
+ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_ovf = TREE_CHAIN (f_fpr);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+
+ indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ if (indirect_p)
+ type = build_pointer_type (type);
+ size = int_size_in_bytes (type);
+ rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ nat_mode = type_natural_mode (type);
+ container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
+ REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
+
+ /* Pull the value out of the saved registers. */
+
+ addr = create_tmp_var (ptr_type_node, "addr");
+ DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
+
+ if (container)
+ {
+ int needed_intregs, needed_sseregs;
+ bool need_temp;
+ tree int_addr, sse_addr;
+
+ lab_false = create_artificial_label ();
+ lab_over = create_artificial_label ();
+
+ examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
+
+ need_temp = (!REG_P (container)
+ && ((needed_intregs && TYPE_ALIGN (type) > 64)
+ || TYPE_ALIGN (type) > 128));
+
+ /* In case we are passing structure, verify that it is consecutive block
+ on the register save area. If not we need to do moves. */
+ if (!need_temp && !REG_P (container))
+ {
+ /* Verify that all registers are strictly consecutive */
+ if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
+ {
+ rtx slot = XVECEXP (container, 0, i);
+ if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
+ || INTVAL (XEXP (slot, 1)) != i * 16)
+ need_temp = 1;
+ }
+ }
+ else
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
+ {
+ rtx slot = XVECEXP (container, 0, i);
+ if (REGNO (XEXP (slot, 0)) != (unsigned int) i
+ || INTVAL (XEXP (slot, 1)) != i * 8)
+ need_temp = 1;
+ }
+ }
+ }
+ if (!need_temp)
+ {
+ int_addr = addr;
+ sse_addr = addr;
+ }
+ else
+ {
+ int_addr = create_tmp_var (ptr_type_node, "int_addr");
+ DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
+ sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
+ DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
+ }
+
+ /* First ensure that we fit completely in registers. */
+ if (needed_intregs)
+ {
+ t = build_int_cst (TREE_TYPE (gpr),
+ (REGPARM_MAX - needed_intregs + 1) * 8);
+ t = build2 (GE_EXPR, boolean_type_node, gpr, t);
+ t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
+ t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
+ gimplify_and_add (t, pre_p);
+ }
+ if (needed_sseregs)
+ {
+ t = build_int_cst (TREE_TYPE (fpr),
+ (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
+ + REGPARM_MAX * 8);
+ t = build2 (GE_EXPR, boolean_type_node, fpr, t);
+ t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
+ t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
+ gimplify_and_add (t, pre_p);
+ }
+
+ /* Compute index to start of area used for integer regs. */
+ if (needed_intregs)
+ {
+ /* int_addr = gpr + sav; */
+ t = fold_convert (ptr_type_node, gpr);
+ t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
+ t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
+ gimplify_and_add (t, pre_p);
+ }
+ if (needed_sseregs)
+ {
+ /* sse_addr = fpr + sav; */
+ t = fold_convert (ptr_type_node, fpr);
+ t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
+ t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
+ gimplify_and_add (t, pre_p);
+ }
+ if (need_temp)
+ {
+ int i;
+ tree temp = create_tmp_var (type, "va_arg_tmp");
+
+ /* addr = &temp; */
+ t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
+ t = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (t, pre_p);
+
+ for (i = 0; i < XVECLEN (container, 0); i++)
+ {
+ rtx slot = XVECEXP (container, 0, i);
+ rtx reg = XEXP (slot, 0);
+ enum machine_mode mode = GET_MODE (reg);
+ tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
+ tree addr_type = build_pointer_type (piece_type);
+ tree src_addr, src;
+ int src_offset;
+ tree dest_addr, dest;
+
+ if (SSE_REGNO_P (REGNO (reg)))
+ {
+ src_addr = sse_addr;
+ src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
+ }
+ else
+ {
+ src_addr = int_addr;
+ src_offset = REGNO (reg) * 8;
+ }
+ src_addr = fold_convert (addr_type, src_addr);
+ src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
+ size_int (src_offset)));
+ src = build_va_arg_indirect_ref (src_addr);
+
+ dest_addr = fold_convert (addr_type, addr);
+ dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
+ size_int (INTVAL (XEXP (slot, 1)))));
+ dest = build_va_arg_indirect_ref (dest_addr);
+
+ t = build2 (MODIFY_EXPR, void_type_node, dest, src);
+ gimplify_and_add (t, pre_p);
+ }
+ }
+
+ if (needed_intregs)
+ {
+ t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
+ gimplify_and_add (t, pre_p);
+ }
+ if (needed_sseregs)
+ {
+ t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
+ gimplify_and_add (t, pre_p);
+ }
+
+ t = build1 (GOTO_EXPR, void_type_node, lab_over);
+ gimplify_and_add (t, pre_p);
+
+ t = build1 (LABEL_EXPR, void_type_node, lab_false);
+ append_to_statement_list (t, pre_p);
+ }
+
+ /* ... otherwise out of the overflow area. */
+
+ /* Care for on-stack alignment if needed. */
+ if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
+ || integer_zerop (TYPE_SIZE (type)))
+ t = ovf;
+ else
+ {
+ HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
+ t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
+ build_int_cst (TREE_TYPE (ovf), align - 1));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), -align));
+ }
+ gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
+
+ t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (t2, pre_p);
+
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ gimplify_and_add (t, pre_p);
+
+ if (container)
+ {
+ t = build1 (LABEL_EXPR, void_type_node, lab_over);
+ append_to_statement_list (t, pre_p);
+ }
+
+ ptrtype = build_pointer_type (type);
+ addr = fold_convert (ptrtype, addr);
+
+ if (indirect_p)
+ addr = build_va_arg_indirect_ref (addr);
+ return build_va_arg_indirect_ref (addr);
+}
+
+/* Return nonzero if OPNUM's MEM should be matched
+ in movabs* patterns. */
+
+int
+ix86_check_movabs (rtx insn, int opnum)
+{
+ rtx set, mem;
+
+ set = PATTERN (insn);
+ if (GET_CODE (set) == PARALLEL)
+ set = XVECEXP (set, 0, 0);
+ gcc_assert (GET_CODE (set) == SET);
+ mem = XEXP (set, opnum);
+ while (GET_CODE (mem) == SUBREG)
+ mem = SUBREG_REG (mem);
+ gcc_assert (GET_CODE (mem) == MEM);
+ return (volatile_ok || !MEM_VOLATILE_P (mem));
+}
+
+/* Initialize the table of extra 80387 mathematical constants. */
+
+static void
+init_ext_80387_constants (void)
+{
+ static const char * cst[5] =
+ {
+ "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
+ "0.6931471805599453094286904741849753009", /* 1: fldln2 */
+ "1.4426950408889634073876517827983434472", /* 2: fldl2e */
+ "3.3219280948873623478083405569094566090", /* 3: fldl2t */
+ "3.1415926535897932385128089594061862044", /* 4: fldpi */
+ };
+ int i;
+
+ for (i = 0; i < 5; i++)
+ {
+ real_from_string (&ext_80387_constants_table[i], cst[i]);
+ /* Ensure each constant is rounded to XFmode precision. */
+ real_convert (&ext_80387_constants_table[i],
+ XFmode, &ext_80387_constants_table[i]);
+ }
+
+ ext_80387_constants_init = 1;
+}
+
+/* Return true if the constant is something that can be loaded with
+ a special instruction. */
+
+int
+standard_80387_constant_p (rtx x)
+{
+ if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
+ return -1;
+
+ if (x == CONST0_RTX (GET_MODE (x)))
+ return 1;
+ if (x == CONST1_RTX (GET_MODE (x)))
+ return 2;
+
+ /* For XFmode constants, try to find a special 80387 instruction when
+ optimizing for size or on those CPUs that benefit from them. */
+ if (GET_MODE (x) == XFmode
+ && (optimize_size || x86_ext_80387_constants & TUNEMASK))
+ {
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (! ext_80387_constants_init)
+ init_ext_80387_constants ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ for (i = 0; i < 5; i++)
+ if (real_identical (&r, &ext_80387_constants_table[i]))
+ return i + 3;
+ }
+
+ return 0;
+}
+
+/* Return the opcode of the special instruction to be used to load
+ the constant X. */
+
+const char *
+standard_80387_constant_opcode (rtx x)
+{
+ switch (standard_80387_constant_p (x))
+ {
+ case 1:
+ return "fldz";
+ case 2:
+ return "fld1";
+ case 3:
+ return "fldlg2";
+ case 4:
+ return "fldln2";
+ case 5:
+ return "fldl2e";
+ case 6:
+ return "fldl2t";
+ case 7:
+ return "fldpi";
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the CONST_DOUBLE representing the 80387 constant that is
+ loaded by the specified special instruction. The argument IDX
+ matches the return value from standard_80387_constant_p. */
+
+rtx
+standard_80387_constant_rtx (int idx)
+{
+ int i;
+
+ if (! ext_80387_constants_init)
+ init_ext_80387_constants ();
+
+ switch (idx)
+ {
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ i = idx - 3;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
+ XFmode);
+}
+
+/* Return 1 if mode is a valid mode for sse. */
+static int
+standard_sse_mode_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V16QImode:
+ case V8HImode:
+ case V4SImode:
+ case V2DImode:
+ case V4SFmode:
+ case V2DFmode:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return 1 if X is FP constant we can load to SSE register w/o using memory.
+ */
+int
+standard_sse_constant_p (rtx x)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
+ return 1;
+ if (vector_all_ones_operand (x, mode)
+ && standard_sse_mode_p (mode))
+ return TARGET_SSE2 ? 2 : -1;
+
+ return 0;
+}
+
+/* Return the opcode of the special instruction to be used to load
+ the constant X. */
+
+const char *
+standard_sse_constant_opcode (rtx insn, rtx x)
+{
+ switch (standard_sse_constant_p (x))
+ {
+ case 1:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "xorps\t%0, %0";
+ else if (get_attr_mode (insn) == MODE_V2DF)
+ return "xorpd\t%0, %0";
+ else
+ return "pxor\t%0, %0";
+ case 2:
+ return "pcmpeqd\t%0, %0";
+ }
+ gcc_unreachable ();
+}
+
+/* Returns 1 if OP contains a symbol reference */
+
+int
+symbolic_reference_mentioned_p (rtx op)
+{
+ const char *fmt;
+ int i;
+
+ if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (op));
+ for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (op, i) - 1; j >= 0; j--)
+ if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
+ return 1;
+ }
+
+ else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return 1 if it is appropriate to emit `ret' instructions in the
+ body of a function. Do this only if the epilogue is simple, needing a
+ couple of insns. Prior to reloading, we can't tell how many registers
+ must be saved, so return 0 then. Return 0 if there is no frame
+ marker to de-allocate. */
+
+int
+ix86_can_use_return_insn_p (void)
+{
+ struct ix86_frame frame;
+
+ if (! reload_completed || frame_pointer_needed)
+ return 0;
+
+ /* Don't allow more than 32 pop, since that's all we can do
+ with one instruction. */
+ if (current_function_pops_args
+ && current_function_args_size >= 32768)
+ return 0;
+
+ ix86_compute_frame_layout (&frame);
+ return frame.to_allocate == 0 && frame.nregs == 0;
+}
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may
+ be accessed via the stack pointer) in functions that seem suitable. */
+
+int
+ix86_frame_pointer_required (void)
+{
+ /* If we accessed previous frames, then the generated code expects
+ to be able to access the saved ebp value in our frame. */
+ if (cfun->machine->accesses_prev_frame)
+ return 1;
+
+ /* Several x86 os'es need a frame pointer for other reasons,
+ usually pertaining to setjmp. */
+ if (SUBTARGET_FRAME_POINTER_REQUIRED)
+ return 1;
+
+ /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
+ the frame pointer by default. Turn it back on now if we've not
+ got a leaf function. */
+ if (TARGET_OMIT_LEAF_FRAME_POINTER
+ && (!current_function_is_leaf
+ || ix86_current_function_calls_tls_descriptor))
+ return 1;
+
+ if (current_function_profile)
+ return 1;
+
+ return 0;
+}
+
+/* Record that the current function accesses previous call frames. */
+
+void
+ix86_setup_frame_addresses (void)
+{
+ cfun->machine->accesses_prev_frame = 1;
+}
+
+#if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
+# define USE_HIDDEN_LINKONCE 1
+#else
+# define USE_HIDDEN_LINKONCE 0
+#endif
+
+/* APPLE LOCAL 5695218 */
+static GTY(()) int pic_labels_used;
+
+/* Fills in the label name that should be used for a pc thunk for
+ the given register. */
+
+static void
+get_pc_thunk_name (char name[32], unsigned int regno)
+{
+ gcc_assert (!TARGET_64BIT);
+
+ /* APPLE LOCAL deep branch prediction pic-base. */
+ if (USE_HIDDEN_LINKONCE || TARGET_MACHO)
+ sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
+ else
+ ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
+}
+
+
+/* This function generates code for -fpic that loads %ebx with
+ the return address of the caller and then returns. */
+
+void
+ix86_file_end (void)
+{
+ rtx xops[2];
+ int regno;
+
+ for (regno = 0; regno < 8; ++regno)
+ {
+ char name[32];
+
+ if (! ((pic_labels_used >> regno) & 1))
+ continue;
+
+ get_pc_thunk_name (name, regno);
+
+#if TARGET_MACHO
+ if (TARGET_MACHO)
+ {
+ switch_to_section (darwin_sections[text_coal_section]);
+ fputs ("\t.weak_definition\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n\t.private_extern\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n", asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+ }
+ else
+#endif
+ if (USE_HIDDEN_LINKONCE)
+ {
+ tree decl;
+
+ decl = build_decl (FUNCTION_DECL, get_identifier (name),
+ error_mark_node);
+ TREE_PUBLIC (decl) = 1;
+ TREE_STATIC (decl) = 1;
+ DECL_ONE_ONLY (decl) = 1;
+
+ (*targetm.asm_out.unique_section) (decl, 0);
+ switch_to_section (get_named_section (decl, NULL, 0));
+
+ (*targetm.asm_out.globalize_label) (asm_out_file, name);
+ fputs ("\t.hidden\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputc ('\n', asm_out_file);
+ ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
+ }
+ /* APPLE LOCAL begin deep branch prediction pic-base */
+#if TARGET_MACHO
+ else if (TARGET_MACHO)
+ {
+ switch_to_section (darwin_sections[text_coal_section]);
+ fputs (".weak_definition\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n.private_extern\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n", asm_out_file);
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+ }
+#endif
+ /* APPLE LOCAL end deep branch prediction pic-base */
+ else
+ {
+ switch_to_section (text_section);
+ ASM_OUTPUT_LABEL (asm_out_file, name);
+ }
+
+ xops[0] = gen_rtx_REG (SImode, regno);
+ xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
+ output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
+ output_asm_insn ("ret", xops);
+ }
+
+ if (NEED_INDICATE_EXEC_STACK)
+ file_end_indicate_exec_stack ();
+}
+
+/* Emit code for the SET_GOT patterns. */
+
+const char *
+output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
+{
+ rtx xops[3];
+
+ xops[0] = dest;
+ xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
+
+ if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
+ {
+ xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
+
+ if (!flag_pic)
+ output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
+ else
+ /* APPLE LOCAL begin dwarf call/pop 5221468 */
+ {
+ output_asm_insn ("call\t%a2", xops);
+
+ /* If necessary, report the effect that the instruction has on
+ the unwind info. */
+#if defined (DWARF2_UNWIND_INFO)
+ if (flag_asynchronous_unwind_tables
+#if !defined (HAVE_prologue)
+ && !ACCUMULATE_OUTGOING_ARGS
+#endif
+ && dwarf2out_do_frame ())
+ {
+ rtx insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (-4)));
+ insn = make_insn_raw (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ dwarf2out_frame_debug (insn, true);
+ }
+#endif
+ }
+ /* APPLE LOCAL end dwarf call/pop 5221468 */
+
+#if TARGET_MACHO
+ /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
+ is what will be referenced by the Mach-O PIC subsystem. */
+ if (!label)
+ ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
+#endif
+
+ (*targetm.asm_out.internal_label) (asm_out_file, "L",
+ CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
+
+ if (flag_pic)
+ /* APPLE LOCAL begin dwarf call/pop 5221468 */
+ {
+ output_asm_insn ("pop{l}\t%0", xops);
+
+ /* If necessary, report the effect that the instruction has on
+ the unwind info. We've already done this for delay slots
+ and call instructions. */
+#if defined (DWARF2_UNWIND_INFO)
+ if (flag_asynchronous_unwind_tables
+#if !defined (HAVE_prologue)
+ && !ACCUMULATE_OUTGOING_ARGS
+#endif
+ && dwarf2out_do_frame ())
+ {
+ rtx insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (4)));
+ insn = make_insn_raw (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ dwarf2out_frame_debug (insn, true);
+ }
+#endif
+ }
+ /* APPLE LOCAL end dwarf call/pop 5221468 */
+ }
+ else
+ {
+ char name[32];
+ get_pc_thunk_name (name, REGNO (dest));
+ pic_labels_used |= 1 << REGNO (dest);
+
+ xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
+ xops[2] = gen_rtx_MEM (QImode, xops[2]);
+ output_asm_insn ("call\t%X2", xops);
+ /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
+ is what will be referenced by the Mach-O PIC subsystem. */
+#if TARGET_MACHO
+ if (!label)
+ ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
+ else
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (label));
+#endif
+ }
+
+ if (TARGET_MACHO)
+ return "";
+
+ if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
+ output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
+ else
+ output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
+
+ return "";
+}
+
+/* Generate an "push" pattern for input ARG. */
+
+static rtx
+gen_push (rtx arg)
+{
+ return gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (Pmode,
+ gen_rtx_PRE_DEC (Pmode,
+ stack_pointer_rtx)),
+ arg);
+}
+
+/* Return >= 0 if there is an unused call-clobbered register available
+ for the entire function. */
+
+static unsigned int
+ix86_select_alt_pic_regnum (void)
+{
+ if (current_function_is_leaf && !current_function_profile
+ && !ix86_current_function_calls_tls_descriptor)
+ {
+ int i;
+ for (i = 2; i >= 0; --i)
+ if (!regs_ever_live[i])
+ return i;
+ }
+
+ return INVALID_REGNUM;
+}
+
+/* APPLE LOCAL begin 5695218 */
+/* Reload may introduce references to the PIC base register
+ that do not directly reference pic_offset_table_rtx.
+ In the rare event we choose an alternate PIC register,
+ walk all the insns and rewrite every reference. */
+/* Run through the insns, changing references to the original
+ PIC_OFFSET_TABLE_REGNUM to our new one. */
+static void
+ix86_globally_replace_pic_reg (unsigned int new_pic_regno)
+{
+ rtx insn;
+ const int nregs = PIC_OFFSET_TABLE_REGNUM + 1;
+ rtx reg_map[FIRST_PSEUDO_REGISTER];
+ memset (reg_map, 0, nregs * sizeof (rtx));
+ pic_offset_table_rtx = gen_rtx_REG (SImode, new_pic_regno);
+ reg_map[REAL_PIC_OFFSET_TABLE_REGNUM] = pic_offset_table_rtx;
+
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ replace_regs (PATTERN (insn), reg_map, nregs, 1);
+ replace_regs (REG_NOTES (insn), reg_map, nregs, 1);
+ }
+#if defined (TARGET_TOC)
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ if ( !SIBLING_CALL_P (insn))
+ abort ();
+ }
+#endif
+ }
+ pop_topmost_sequence ();
+
+ regs_ever_live[new_pic_regno] = 1;
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 0;
+#if defined (TARGET_TOC)
+ cfun->machine->substitute_pic_base_reg = new_pic_regno;
+#endif
+}
+/* APPLE LOCAL end 5695218 */
+
+/* Return 1 if we need to save REGNO. */
+static int
+ix86_save_reg (unsigned int regno, int maybe_eh_return)
+{
+ /* APPLE LOCAL begin CW asm blocks */
+ /* For an asm function, we don't save any registers, instead, the
+ user is responsible. */
+ if (cfun->iasm_asm_function)
+ return 0;
+ /* APPLE LOCAL end CW asm blocks */
+
+ if (pic_offset_table_rtx
+ && regno == REAL_PIC_OFFSET_TABLE_REGNUM
+ /* APPLE LOCAL begin 5695218 */
+ && (current_function_uses_pic_offset_table
+ || current_function_profile
+ || current_function_calls_eh_return
+ || current_function_uses_const_pool))
+ /* APPLE LOCAL end 5695218 */
+ {
+ if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
+ return 0;
+ return 1;
+ }
+
+ if (current_function_calls_eh_return && maybe_eh_return)
+ {
+ unsigned i;
+ for (i = 0; ; i++)
+ {
+ unsigned test = EH_RETURN_DATA_REGNO (i);
+ if (test == INVALID_REGNUM)
+ break;
+ if (test == regno)
+ return 1;
+ }
+ }
+
+ if (cfun->machine->force_align_arg_pointer
+ && regno == REGNO (cfun->machine->force_align_arg_pointer))
+ return 1;
+
+ /* APPLE LOCAL begin 5695218 */
+ /* In order to get accurate usage info for the PIC register, we've
+ been forced to break and un-break the call_used_regs and
+ fixed_regs vectors. Ignore them when considering the PIC
+ register. */
+ if (regno == REAL_PIC_OFFSET_TABLE_REGNUM
+ && regs_ever_live[regno])
+ return 1;
+ /* APPLE LOCAL end 5695218 */
+
+ return (regs_ever_live[regno]
+ && !call_used_regs[regno]
+ && !fixed_regs[regno]
+ && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
+}
+
+/* Return number of registers to be saved on the stack. */
+
+static int
+ix86_nsaved_regs (void)
+{
+ int nregs = 0;
+ int regno;
+
+ for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
+ if (ix86_save_reg (regno, true))
+ nregs++;
+ return nregs;
+}
+
+/* Return the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+HOST_WIDE_INT
+ix86_initial_elimination_offset (int from, int to)
+{
+ struct ix86_frame frame;
+ ix86_compute_frame_layout (&frame);
+
+ if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return frame.hard_frame_pointer_offset;
+ else if (from == FRAME_POINTER_REGNUM
+ && to == HARD_FRAME_POINTER_REGNUM)
+ return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
+ else
+ {
+ gcc_assert (to == STACK_POINTER_REGNUM);
+
+ if (from == ARG_POINTER_REGNUM)
+ return frame.stack_pointer_offset;
+
+ gcc_assert (from == FRAME_POINTER_REGNUM);
+ return frame.stack_pointer_offset - frame.frame_pointer_offset;
+ }
+}
+
+/* Fill structure ix86_frame about frame of currently computed function. */
+
+static void
+ix86_compute_frame_layout (struct ix86_frame *frame)
+{
+ HOST_WIDE_INT total_size;
+ unsigned int stack_alignment_needed;
+ HOST_WIDE_INT offset;
+ unsigned int preferred_alignment;
+ HOST_WIDE_INT size = get_frame_size ();
+
+ frame->nregs = ix86_nsaved_regs ();
+ total_size = size;
+
+ stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
+ preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
+
+ /* During reload iteration the amount of registers saved can change.
+ Recompute the value as needed. Do not recompute when amount of registers
+ didn't change as reload does multiple calls to the function and does not
+ expect the decision to change within single iteration. */
+ if (!optimize_size
+ && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
+ {
+ int count = frame->nregs;
+
+ cfun->machine->use_fast_prologue_epilogue_nregs = count;
+ /* The fast prologue uses move instead of push to save registers. This
+ is significantly longer, but also executes faster as modern hardware
+ can execute the moves in parallel, but can't do that for push/pop.
+
+ Be careful about choosing what prologue to emit: When function takes
+ many instructions to execute we may use slow version as well as in
+ case function is known to be outside hot spot (this is known with
+ feedback only). Weight the size of function by number of registers
+ to save as it is cheap to use one or two push instructions but very
+ slow to use many of them. */
+ if (count)
+ count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
+ if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
+ || (flag_branch_probabilities
+ && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
+ cfun->machine->use_fast_prologue_epilogue = false;
+ else
+ cfun->machine->use_fast_prologue_epilogue
+ = !expensive_function_p (count);
+ }
+ if (TARGET_PROLOGUE_USING_MOVE
+ && cfun->machine->use_fast_prologue_epilogue)
+ frame->save_regs_using_mov = true;
+ else
+ frame->save_regs_using_mov = false;
+
+
+ /* Skip return address and saved base pointer. */
+ offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
+
+ frame->hard_frame_pointer_offset = offset;
+
+ /* Do some sanity checking of stack_alignment_needed and
+ preferred_alignment, since i386 port is the only using those features
+ that may break easily. */
+
+ gcc_assert (!size || stack_alignment_needed);
+ gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
+ gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
+ gcc_assert (stack_alignment_needed
+ <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
+ stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
+
+ /* Register save area */
+ offset += frame->nregs * UNITS_PER_WORD;
+
+ /* Va-arg area */
+ if (ix86_save_varrargs_registers)
+ {
+ offset += X86_64_VARARGS_SIZE;
+ frame->va_arg_size = X86_64_VARARGS_SIZE;
+ }
+ else
+ frame->va_arg_size = 0;
+
+ /* Align start of frame for local function. */
+ frame->padding1 = ((offset + stack_alignment_needed - 1)
+ & -stack_alignment_needed) - offset;
+
+ offset += frame->padding1;
+
+ /* Frame pointer points here. */
+ frame->frame_pointer_offset = offset;
+
+ offset += size;
+
+ /* Add outgoing arguments area. Can be skipped if we eliminated
+ all the function calls as dead code.
+ Skipping is however impossible when function calls alloca. Alloca
+ expander assumes that last current_function_outgoing_args_size
+ of stack frame are unused. */
+ if (ACCUMULATE_OUTGOING_ARGS
+ && (!current_function_is_leaf || current_function_calls_alloca
+ || ix86_current_function_calls_tls_descriptor))
+ {
+ offset += current_function_outgoing_args_size;
+ frame->outgoing_arguments_size = current_function_outgoing_args_size;
+ }
+ else
+ frame->outgoing_arguments_size = 0;
+
+ /* Align stack boundary. Only needed if we're calling another function
+ or using alloca. */
+ if (!current_function_is_leaf || current_function_calls_alloca
+ || ix86_current_function_calls_tls_descriptor)
+ frame->padding2 = ((offset + preferred_alignment - 1)
+ & -preferred_alignment) - offset;
+ else
+ frame->padding2 = 0;
+
+ offset += frame->padding2;
+
+ /* We've reached end of stack frame. */
+ frame->stack_pointer_offset = offset;
+
+ /* Size prologue needs to allocate. */
+ frame->to_allocate =
+ (size + frame->padding1 + frame->padding2
+ + frame->outgoing_arguments_size + frame->va_arg_size);
+
+ if ((!frame->to_allocate && frame->nregs <= 1)
+ || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
+ frame->save_regs_using_mov = false;
+
+ if (TARGET_RED_ZONE && current_function_sp_is_unchanging
+ && current_function_is_leaf
+ && !ix86_current_function_calls_tls_descriptor)
+ {
+ frame->red_zone_size = frame->to_allocate;
+ if (frame->save_regs_using_mov)
+ frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
+ if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
+ frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
+ }
+ else
+ frame->red_zone_size = 0;
+ frame->to_allocate -= frame->red_zone_size;
+ frame->stack_pointer_offset -= frame->red_zone_size;
+#if 0
+ fprintf (stderr, "nregs: %i\n", frame->nregs);
+ fprintf (stderr, "size: %i\n", size);
+ fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
+ fprintf (stderr, "padding1: %i\n", frame->padding1);
+ fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
+ fprintf (stderr, "padding2: %i\n", frame->padding2);
+ fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
+ fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
+ fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
+ fprintf (stderr, "hard_frame_pointer_offset: %i\n",
+ frame->hard_frame_pointer_offset);
+ fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
+#endif
+}
+
+/* Emit code to save registers in the prologue. */
+
+static void
+ix86_emit_save_regs (void)
+{
+ unsigned int regno;
+ rtx insn;
+
+ for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
+ if (ix86_save_reg (regno, true))
+ {
+ insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+/* Emit code to save registers using MOV insns. First register
+ is restored from POINTER + OFFSET. */
+static void
+ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
+{
+ unsigned int regno;
+ rtx insn;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (ix86_save_reg (regno, true))
+ {
+ insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
+ Pmode, offset),
+ gen_rtx_REG (Pmode, regno));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ offset += UNITS_PER_WORD;
+ }
+}
+
+/* Expand prologue or epilogue stack adjustment.
+ The pattern exist to put a dependency on all ebp-based memory accesses.
+ STYLE should be negative if instructions should be marked as frame related,
+ zero if %r11 register is live and cannot be freely used and positive
+ otherwise. */
+
+static void
+pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
+{
+ rtx insn;
+
+ if (! TARGET_64BIT)
+ insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
+ else if (x86_64_immediate_operand (offset, DImode))
+ insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
+ else
+ {
+ rtx r11;
+ /* r11 is used by indirect sibcall return as well, set before the
+ epilogue and used after the epilogue. ATM indirect sibcall
+ shouldn't be used together with huge frame sizes in one
+ function because of the frame_size check in sibcall.c. */
+ gcc_assert (style);
+ r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
+ insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
+ /* APPLE LOCAL async unwind info 5949469 */
+ if (style < 0 /* || flag_asynchronous_unwind_tables*/)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
+ offset));
+ }
+ if (style < 0)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ /* APPLE LOCAL begin async unwind info 5949350 5949469 */
+#if 0
+ else if (flag_asynchronous_unwind_tables
+ && (src == hard_frame_pointer_rtx
+ || src == stack_pointer_rtx))
+ RTX_FRAME_RELATED_P (insn) = 1;
+#endif
+ /* APPLE LOCAL end async unwind info 5949350 5949469 */
+}
+
+/* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
+
+static rtx
+ix86_internal_arg_pointer (void)
+{
+ bool has_force_align_arg_pointer =
+ (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
+ TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
+ if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
+ && DECL_NAME (current_function_decl)
+ && MAIN_NAME_P (DECL_NAME (current_function_decl))
+ && DECL_FILE_SCOPE_P (current_function_decl))
+ || ix86_force_align_arg_pointer
+ || has_force_align_arg_pointer)
+ {
+ /* Nested functions can't realign the stack due to a register
+ conflict. */
+ if (DECL_CONTEXT (current_function_decl)
+ && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
+ {
+ if (ix86_force_align_arg_pointer)
+ warning (0, "-mstackrealign ignored for nested functions");
+ if (has_force_align_arg_pointer)
+ error ("%s not supported for nested functions",
+ ix86_force_align_arg_pointer_string);
+ return virtual_incoming_args_rtx;
+ }
+ cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
+ return copy_to_reg (cfun->machine->force_align_arg_pointer);
+ }
+ else
+ return virtual_incoming_args_rtx;
+}
+
+/* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
+ This is called from dwarf2out.c to emit call frame instructions
+ for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
+static void
+ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
+{
+ rtx unspec = SET_SRC (pattern);
+ gcc_assert (GET_CODE (unspec) == UNSPEC);
+
+ switch (index)
+ {
+ case UNSPEC_REG_SAVE:
+ dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
+ SET_DEST (pattern));
+ break;
+ case UNSPEC_DEF_CFA:
+ dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
+ INTVAL (XVECEXP (unspec, 0, 0)));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* APPLE LOCAL begin 3399553 */
+/* Calculate the value of FLT_ROUNDS into DEST.
+
+ The rounding mode is in bits 11:10 of FPSR, and has the following
+ settings:
+ 00 Round to nearest
+ 01 Round to -inf
+ 10 Round to +inf
+ 11 Round to 0
+
+ FLT_ROUNDS, on the other hand, expects the following:
+ -1 Undefined
+ 0 Round to 0
+ 1 Round to nearest
+ 2 Round to +inf
+ 3 Round to -inf
+
+ To perform the conversion, we do:
+ (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
+*/
+void
+ix86_expand_flt_rounds (rtx dest)
+{
+ rtx mem = assign_stack_temp (HImode, GET_MODE_SIZE (HImode), 0);
+ rtx temp = gen_reg_rtx (SImode);
+
+ /* Step #1: Read FPSR. Unfortunately, this can only be done into a
+ 16-bit memory location. */
+ emit_insn (gen_x86_fnstcw_1 (mem));
+
+ /* Step #2: Copy into a register. */
+ emit_insn (gen_zero_extendhisi2 (dest, mem));
+
+ /* Step #3: Perform conversion described above. */
+ emit_insn (gen_andsi3 (temp, dest, GEN_INT (0x400)));
+ emit_insn (gen_andsi3 (dest, dest, GEN_INT (0x800)));
+ emit_insn (gen_lshrsi3 (temp, temp, GEN_INT (9)));
+ emit_insn (gen_lshrsi3 (dest, dest, GEN_INT (11)));
+ emit_insn (gen_iorsi3 (dest, dest, temp));
+ emit_insn (gen_addsi3 (dest, dest, const1_rtx));
+ emit_insn (gen_andsi3 (dest, dest, GEN_INT (3)));
+}
+/* APPLE LOCAL end 3399553 */
+
+/* APPLE LOCAL begin fix-and-continue x86 */
+#ifndef TARGET_FIX_AND_CONTINUE
+#define TARGET_FIX_AND_CONTINUE 0
+#endif
+/* APPLE LOCAL end fix-and-continue x86 */
+
+/* Expand the prologue into a bunch of separate insns. */
+
+void
+ix86_expand_prologue (void)
+{
+ rtx insn;
+ bool pic_reg_used;
+ struct ix86_frame frame;
+ HOST_WIDE_INT allocate;
+
+ /* APPLE LOCAL begin fix-and-continue x86 */
+ if (TARGET_FIX_AND_CONTINUE)
+ {
+ /* gdb on darwin arranges to forward a function from the old
+ address by modifying the first 6 instructions of the function
+ to branch to the overriding function. This is necessary to
+ permit function pointers that point to the old function to
+ actually forward to the new function. */
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ }
+ /* APPLE LOCAL end fix-and-continue x86 */
+
+ ix86_compute_frame_layout (&frame);
+
+ if (cfun->machine->force_align_arg_pointer)
+ {
+ rtx x, y;
+
+ /* Grab the argument pointer. */
+ x = plus_constant (stack_pointer_rtx, 4);
+ y = cfun->machine->force_align_arg_pointer;
+ insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* The unwind info consists of two parts: install the fafp as the cfa,
+ and record the fafp as the "save register" of the stack pointer.
+ The later is there in order that the unwinder can see where it
+ should restore the stack pointer across the and insn. */
+ x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
+ x = gen_rtx_SET (VOIDmode, y, x);
+ RTX_FRAME_RELATED_P (x) = 1;
+ y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
+ UNSPEC_REG_SAVE);
+ y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
+ RTX_FRAME_RELATED_P (y) = 1;
+ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
+ x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
+ REG_NOTES (insn) = x;
+
+ /* Align the stack. */
+ emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-16)));
+
+ /* And here we cheat like madmen with the unwind info. We force the
+ cfa register back to sp+4, which is exactly what it was at the
+ start of the function. Re-pushing the return address results in
+ the return at the same spot relative to the cfa, and thus is
+ correct wrt the unwind info. */
+ x = cfun->machine->force_align_arg_pointer;
+ x = gen_frame_mem (Pmode, plus_constant (x, -4));
+ insn = emit_insn (gen_push (x));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ x = GEN_INT (4);
+ x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
+ x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
+ x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
+ REG_NOTES (insn) = x;
+ }
+
+ /* Note: AT&T enter does NOT have reversed args. Enter is probably
+ slower on all targets. Also sdb doesn't like it. */
+
+ if (frame_pointer_needed)
+ {
+ insn = emit_insn (gen_push (hard_frame_pointer_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ allocate = frame.to_allocate;
+
+ if (!frame.save_regs_using_mov)
+ ix86_emit_save_regs ();
+ else
+ allocate += frame.nregs * UNITS_PER_WORD;
+
+ /* When using red zone we may start register saving before allocating
+ the stack frame saving one cycle of the prologue. */
+ if (TARGET_RED_ZONE && frame.save_regs_using_mov)
+ ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
+ : stack_pointer_rtx,
+ -frame.nregs * UNITS_PER_WORD);
+
+ if (allocate == 0)
+ ;
+ else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
+ /* APPLE LOCAL begin CW asm blocks */
+ {
+ if (! cfun->iasm_asm_function)
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-allocate), -1);
+ }
+ /* APPLE LOCAL end CW asm blocks */
+ else
+ {
+ /* Only valid for Win32. */
+ rtx eax = gen_rtx_REG (SImode, 0);
+ bool eax_live = ix86_eax_live_at_start_p ();
+ rtx t;
+
+ gcc_assert (!TARGET_64BIT);
+
+ if (eax_live)
+ {
+ emit_insn (gen_push (eax));
+ allocate -= 4;
+ }
+
+ emit_move_insn (eax, GEN_INT (allocate));
+
+ insn = emit_insn (gen_allocate_stack_worker (eax));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
+ t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ t, REG_NOTES (insn));
+
+ if (eax_live)
+ {
+ if (frame_pointer_needed)
+ t = plus_constant (hard_frame_pointer_rtx,
+ allocate
+ - frame.to_allocate
+ - frame.nregs * UNITS_PER_WORD);
+ else
+ t = plus_constant (stack_pointer_rtx, allocate);
+ emit_move_insn (eax, gen_rtx_MEM (SImode, t));
+ }
+ }
+
+ if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
+ {
+ if (!frame_pointer_needed || !frame.to_allocate)
+ ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
+ else
+ ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
+ -frame.nregs * UNITS_PER_WORD);
+ }
+
+ pic_reg_used = false;
+ /* APPLE LOCAL begin 5695218 */
+ if (pic_offset_table_rtx && regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
+ && !TARGET_64BIT)
+ {
+ unsigned int alt_pic_reg_used;
+
+ alt_pic_reg_used = ix86_select_alt_pic_regnum ();
+ /* APPLE LOCAL end 5695218 */
+
+ if (alt_pic_reg_used != INVALID_REGNUM)
+ /* APPLE LOCAL begin 5695218 */
+ /* REGNO (pic_offset_table_rtx) = alt_pic_reg_used; */
+ ix86_globally_replace_pic_reg (alt_pic_reg_used);
+ /* APPLE LOCAL end 5695218 */
+
+ pic_reg_used = true;
+ }
+
+ if (pic_reg_used)
+ {
+ if (TARGET_64BIT)
+ insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
+ else
+ insn = emit_insn (gen_set_got (pic_offset_table_rtx));
+
+ /* Even with accurate pre-reload life analysis, we can wind up
+ deleting all references to the pic register after reload.
+ Consider if cross-jumping unifies two sides of a branch
+ controlled by a comparison vs the only read from a global.
+ In which case, allow the set_got to be deleted, though we're
+ too late to do anything about the ebx save in the prologue. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
+ }
+
+ /* Prevent function calls from be scheduled before the call to mcount.
+ In the pic_reg_used case, make sure that the got load isn't deleted. */
+ if (current_function_profile)
+ emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
+}
+
+/* Emit code to restore saved registers using MOV insns. First register
+ is restored from POINTER + OFFSET. */
+static void
+ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
+ int maybe_eh_return)
+{
+ int regno;
+ rtx base_address = gen_rtx_MEM (Pmode, pointer);
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (ix86_save_reg (regno, maybe_eh_return))
+ {
+ /* Ensure that adjust_address won't be forced to produce pointer
+ out of range allowed by x86-64 instruction set. */
+ if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
+ {
+ rtx r11;
+
+ r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
+ emit_move_insn (r11, GEN_INT (offset));
+ emit_insn (gen_adddi3 (r11, r11, pointer));
+ base_address = gen_rtx_MEM (Pmode, r11);
+ offset = 0;
+ }
+ emit_move_insn (gen_rtx_REG (Pmode, regno),
+ adjust_address (base_address, Pmode, offset));
+ offset += UNITS_PER_WORD;
+ }
+}
+
+/* Restore function stack, frame, and registers. */
+
+void
+ix86_expand_epilogue (int style)
+{
+ int regno;
+ int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
+ struct ix86_frame frame;
+ HOST_WIDE_INT offset;
+
+ ix86_compute_frame_layout (&frame);
+
+ /* Calculate start of saved registers relative to ebp. Special care
+ must be taken for the normal return case of a function using
+ eh_return: the eax and edx registers are marked as saved, but not
+ restored along this path. */
+ offset = frame.nregs;
+ if (current_function_calls_eh_return && style != 2)
+ offset -= 2;
+ offset *= -UNITS_PER_WORD;
+
+ /* APPLE LOCAL begin CW asm blocks */
+ /* For an asm function, don't generate an epilogue. */
+ if (cfun->iasm_asm_function)
+ {
+ emit_jump_insn (gen_return_internal ());
+ return;
+ }
+ /* APPLE LOCAL end CW asm blocks */
+
+ /* If we're only restoring one register and sp is not valid then
+ using a move instruction to restore the register since it's
+ less work than reloading sp and popping the register.
+
+ The default code result in stack adjustment using add/lea instruction,
+ while this code results in LEAVE instruction (or discrete equivalent),
+ so it is profitable in some other cases as well. Especially when there
+ are no registers to restore. We also use this code when TARGET_USE_LEAVE
+ and there is exactly one register to pop. This heuristic may need some
+ tuning in future. */
+ if ((!sp_valid && frame.nregs <= 1)
+ || (TARGET_EPILOGUE_USING_MOVE
+ && cfun->machine->use_fast_prologue_epilogue
+ && (frame.nregs > 1 || frame.to_allocate))
+ || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
+ || (frame_pointer_needed && TARGET_USE_LEAVE
+ && cfun->machine->use_fast_prologue_epilogue
+ && frame.nregs == 1)
+ || current_function_calls_eh_return)
+ {
+ /* Restore registers. We can use ebp or esp to address the memory
+ locations. If both are available, default to ebp, since offsets
+ are known to be small. Only exception is esp pointing directly to the
+ end of block of saved registers, where we may simplify addressing
+ mode. */
+
+ if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
+ ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
+ frame.to_allocate, style == 2);
+ else
+ ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
+ offset, style == 2);
+
+ /* eh_return epilogues need %ecx added to the stack pointer. */
+ if (style == 2)
+ {
+ rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
+
+ if (frame_pointer_needed)
+ {
+ tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
+ tmp = plus_constant (tmp, UNITS_PER_WORD);
+ emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
+
+ tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
+ emit_move_insn (hard_frame_pointer_rtx, tmp);
+
+ pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
+ const0_rtx, style);
+ }
+ else
+ {
+ tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
+ tmp = plus_constant (tmp, (frame.to_allocate
+ + frame.nregs * UNITS_PER_WORD));
+ emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
+ }
+ }
+ else if (!frame_pointer_needed)
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (frame.to_allocate
+ + frame.nregs * UNITS_PER_WORD),
+ style);
+ /* If not an i386, mov & pop is faster than "leave". */
+ else if (TARGET_USE_LEAVE || optimize_size
+ || !cfun->machine->use_fast_prologue_epilogue)
+ emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
+ else
+ {
+ pro_epilogue_adjust_stack (stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ const0_rtx, style);
+ if (TARGET_64BIT)
+ emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
+ else
+ emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
+ }
+ }
+ else
+ {
+ /* First step is to deallocate the stack frame so that we can
+ pop the registers. */
+ if (!sp_valid)
+ {
+ gcc_assert (frame_pointer_needed);
+ pro_epilogue_adjust_stack (stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ GEN_INT (offset), style);
+ }
+ else if (frame.to_allocate)
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (frame.to_allocate), style);
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (ix86_save_reg (regno, false))
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
+ else
+ emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
+ }
+ if (frame_pointer_needed)
+ {
+ /* Leave results in shorter dependency chains on CPUs that are
+ able to grok it fast. */
+ if (TARGET_USE_LEAVE)
+ emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
+ else if (TARGET_64BIT)
+ emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
+ else
+ emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
+ }
+ }
+
+ if (cfun->machine->force_align_arg_pointer)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ cfun->machine->force_align_arg_pointer,
+ GEN_INT (-4)));
+ }
+
+ /* Sibcall epilogues don't want a return instruction. */
+ if (style == 0)
+ return;
+
+ if (current_function_pops_args && current_function_args_size)
+ {
+ rtx popc = GEN_INT (current_function_pops_args);
+
+ /* i386 can only pop 64K bytes. If asked to pop more, pop
+ return address, do explicit add, and jump indirectly to the
+ caller. */
+
+ if (current_function_pops_args >= 65536)
+ {
+ rtx ecx = gen_rtx_REG (SImode, 2);
+
+ /* There is no "pascal" calling convention in 64bit ABI. */
+ gcc_assert (!TARGET_64BIT);
+
+ emit_insn (gen_popsi1 (ecx));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
+ emit_jump_insn (gen_return_indirect_internal (ecx));
+ }
+ else
+ emit_jump_insn (gen_return_pop_internal (popc));
+ }
+ else
+ emit_jump_insn (gen_return_internal ());
+}
+
+/* Reset from the function's potential modifications. */
+
+static void
+ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ if (pic_offset_table_rtx)
+ REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
+#if TARGET_MACHO
+ /* Mach-O doesn't support labels at the end of objects, so if
+ it looks like we might want one, insert a NOP. */
+ {
+ rtx insn = get_last_insn ();
+ while (insn
+ && NOTE_P (insn)
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
+ insn = PREV_INSN (insn);
+ if (insn
+ && (LABEL_P (insn)
+ || (NOTE_P (insn)
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
+ fputs ("\tnop\n", file);
+ }
+#endif
+
+}
+
+/* Extract the parts of an RTL expression that is a valid memory address
+ for an instruction. Return 0 if the structure of the address is
+ grossly off. Return -1 if the address contains ASHIFT, so it is not
+ strictly valid, but still used for computing length of lea instruction. */
+
+int
+ix86_decompose_address (rtx addr, struct ix86_address *out)
+{
+ rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
+ rtx base_reg, index_reg;
+ HOST_WIDE_INT scale = 1;
+ rtx scale_rtx = NULL_RTX;
+ int retval = 1;
+ enum ix86_address_seg seg = SEG_DEFAULT;
+
+ if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
+ base = addr;
+ else if (GET_CODE (addr) == PLUS)
+ {
+ rtx addends[4], op;
+ int n = 0, i;
+
+ op = addr;
+ do
+ {
+ if (n >= 4)
+ return 0;
+ addends[n++] = XEXP (op, 1);
+ op = XEXP (op, 0);
+ }
+ while (GET_CODE (op) == PLUS);
+ if (n >= 4)
+ return 0;
+ addends[n] = op;
+
+ for (i = n; i >= 0; --i)
+ {
+ op = addends[i];
+ switch (GET_CODE (op))
+ {
+ case MULT:
+ if (index)
+ return 0;
+ index = XEXP (op, 0);
+ scale_rtx = XEXP (op, 1);
+ break;
+
+ case UNSPEC:
+ if (XINT (op, 1) == UNSPEC_TP
+ && TARGET_TLS_DIRECT_SEG_REFS
+ && seg == SEG_DEFAULT)
+ seg = TARGET_64BIT ? SEG_FS : SEG_GS;
+ else
+ return 0;
+ break;
+
+ case REG:
+ case SUBREG:
+ if (!base)
+ base = op;
+ else if (!index)
+ index = op;
+ else
+ return 0;
+ break;
+
+ case CONST:
+ case CONST_INT:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ if (disp)
+ return 0;
+ disp = op;
+ break;
+
+ default:
+ return 0;
+ }
+ }
+ }
+ else if (GET_CODE (addr) == MULT)
+ {
+ index = XEXP (addr, 0); /* index*scale */
+ scale_rtx = XEXP (addr, 1);
+ }
+ else if (GET_CODE (addr) == ASHIFT)
+ {
+ rtx tmp;
+
+ /* We're called for lea too, which implements ashift on occasion. */
+ index = XEXP (addr, 0);
+ tmp = XEXP (addr, 1);
+ if (GET_CODE (tmp) != CONST_INT)
+ return 0;
+ scale = INTVAL (tmp);
+ if ((unsigned HOST_WIDE_INT) scale > 3)
+ return 0;
+ scale = 1 << scale;
+ retval = -1;
+ }
+ else
+ disp = addr; /* displacement */
+
+ /* Extract the integral value of scale. */
+ if (scale_rtx)
+ {
+ if (GET_CODE (scale_rtx) != CONST_INT)
+ return 0;
+ scale = INTVAL (scale_rtx);
+ }
+
+ base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
+ index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
+
+ /* Allow arg pointer and stack pointer as index if there is not scaling. */
+ if (base_reg && index_reg && scale == 1
+ && (index_reg == arg_pointer_rtx
+ || index_reg == frame_pointer_rtx
+ || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
+ {
+ rtx tmp;
+ tmp = base, base = index, index = tmp;
+ tmp = base_reg, base_reg = index_reg, index_reg = tmp;
+ }
+
+ /* Special case: %ebp cannot be encoded as a base without a displacement. */
+ if ((base_reg == hard_frame_pointer_rtx
+ || base_reg == frame_pointer_rtx
+ || base_reg == arg_pointer_rtx) && !disp)
+ disp = const0_rtx;
+
+ /* Special case: on K6, [%esi] makes the instruction vector decoded.
+ Avoid this by transforming to [%esi+0]. */
+ if (ix86_tune == PROCESSOR_K6 && !optimize_size
+ && base_reg && !index_reg && !disp
+ && REG_P (base_reg)
+ && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
+ disp = const0_rtx;
+
+ /* Special case: encode reg+reg instead of reg*2. */
+ if (!base && index && scale && scale == 2)
+ base = index, base_reg = index_reg, scale = 1;
+
+ /* Special case: scaling cannot be encoded without base or displacement. */
+ if (!base && !disp && index && scale != 1)
+ disp = const0_rtx;
+
+ out->base = base;
+ out->index = index;
+ out->disp = disp;
+ out->scale = scale;
+ out->seg = seg;
+
+ return retval;
+}
+
+/* Return cost of the memory address x.
+ For i386, it is better to use a complex address than let gcc copy
+ the address into a reg and make a new pseudo. But not if the address
+ requires to two regs - that would mean more pseudos with longer
+ lifetimes. */
+static int
+ix86_address_cost (rtx x)
+{
+ struct ix86_address parts;
+ int cost = 1;
+ int ok = ix86_decompose_address (x, &parts);
+
+ gcc_assert (ok);
+
+ if (parts.base && GET_CODE (parts.base) == SUBREG)
+ parts.base = SUBREG_REG (parts.base);
+ if (parts.index && GET_CODE (parts.index) == SUBREG)
+ parts.index = SUBREG_REG (parts.index);
+
+ /* More complex memory references are better. */
+ if (parts.disp && parts.disp != const0_rtx)
+ cost--;
+ if (parts.seg != SEG_DEFAULT)
+ cost--;
+
+ /* Attempt to minimize number of registers in the address. */
+ if ((parts.base
+ && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
+ || (parts.index
+ && (!REG_P (parts.index)
+ || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
+ cost++;
+
+ if (parts.base
+ && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
+ && parts.index
+ && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
+ && parts.base != parts.index)
+ cost++;
+
+ /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
+ since it's predecode logic can't detect the length of instructions
+ and it degenerates to vector decoded. Increase cost of such
+ addresses here. The penalty is minimally 2 cycles. It may be worthwhile
+ to split such addresses or even refuse such addresses at all.
+
+ Following addressing modes are affected:
+ [base+scale*index]
+ [scale*index+disp]
+ [base+index]
+
+ The first and last case may be avoidable by explicitly coding the zero in
+ memory address, but I don't have AMD-K6 machine handy to check this
+ theory. */
+
+ if (TARGET_K6
+ && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
+ || (parts.disp && !parts.base && parts.index && parts.scale != 1)
+ || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
+ cost += 10;
+
+ return cost;
+}
+
+/* If X is a machine specific address (i.e. a symbol or label being
+ referenced as a displacement from the GOT implemented using an
+ UNSPEC), then return the base term. Otherwise return X. */
+
+rtx
+ix86_find_base_term (rtx x)
+{
+ rtx term;
+
+ if (TARGET_64BIT)
+ {
+ if (GET_CODE (x) != CONST)
+ return x;
+ term = XEXP (x, 0);
+ if (GET_CODE (term) == PLUS
+ && (GET_CODE (XEXP (term, 1)) == CONST_INT
+ || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
+ term = XEXP (term, 0);
+ if (GET_CODE (term) != UNSPEC
+ || XINT (term, 1) != UNSPEC_GOTPCREL)
+ return x;
+
+ term = XVECEXP (term, 0, 0);
+
+ if (GET_CODE (term) != SYMBOL_REF
+ && GET_CODE (term) != LABEL_REF)
+ return x;
+
+ return term;
+ }
+
+ term = ix86_delegitimize_address (x);
+
+ if (GET_CODE (term) != SYMBOL_REF
+ && GET_CODE (term) != LABEL_REF)
+ return x;
+
+ return term;
+}
+
+/* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
+ this is used for to form addresses to local data when -fPIC is in
+ use. */
+
+static bool
+darwin_local_data_pic (rtx disp)
+{
+ if (GET_CODE (disp) == MINUS)
+ {
+ if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
+ || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
+ if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
+ {
+ const char *sym_name = XSTR (XEXP (disp, 1), 0);
+ if (! strcmp (sym_name, "<pic base>"))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Determine if a given RTX is a valid constant. We already know this
+ satisfies CONSTANT_P. */
+
+bool
+legitimate_constant_p (rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case CONST:
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == PLUS)
+ {
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return false;
+ x = XEXP (x, 0);
+ }
+
+ if (TARGET_MACHO && darwin_local_data_pic (x))
+ return true;
+
+ /* Only some unspecs are valid as "constants". */
+ if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_GOTOFF:
+ return TARGET_64BIT;
+ case UNSPEC_TPOFF:
+ case UNSPEC_NTPOFF:
+ x = XVECEXP (x, 0, 0);
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
+ case UNSPEC_DTPOFF:
+ x = XVECEXP (x, 0, 0);
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
+ default:
+ return false;
+ }
+
+ /* We must have drilled down to a symbol. */
+ if (GET_CODE (x) == LABEL_REF)
+ return true;
+ if (GET_CODE (x) != SYMBOL_REF)
+ return false;
+ /* FALLTHRU */
+
+ case SYMBOL_REF:
+ /* TLS symbols are never valid. */
+ if (SYMBOL_REF_TLS_MODEL (x))
+ return false;
+ /* APPLE LOCAL begin dynamic-no-pic */
+#if TARGET_MACHO
+ if (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P)
+ return machopic_symbol_defined_p (x);
+#endif
+ break;
+
+ case PLUS:
+ {
+ rtx left = XEXP (x, 0);
+ rtx right = XEXP (x, 1);
+ bool left_is_constant = legitimate_constant_p (left);
+ bool right_is_constant = legitimate_constant_p (right);
+ return left_is_constant && right_is_constant;
+ }
+ break;
+ /* APPLE LOCAL end dynamic-no-pic */
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == TImode
+ && x != CONST0_RTX (TImode)
+ && !TARGET_64BIT)
+ return false;
+ break;
+
+ case CONST_VECTOR:
+ /* APPLE LOCAL begin radar 4874197 mainline candidate */
+ if (standard_sse_constant_p (x))
+ /* APPLE LOCAL end radar 4874197 mainline candidate */
+ return true;
+ return false;
+
+ default:
+ break;
+ }
+
+ /* Otherwise we handle everything else in the move patterns. */
+ return true;
+}
+
+/* Determine if it's legal to put X into the constant pool. This
+ is not possible for the address of thread-local symbols, which
+ is checked above. */
+
+static bool
+ix86_cannot_force_const_mem (rtx x)
+{
+ /* We can always put integral constants and vectors in memory. */
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST_VECTOR:
+ return false;
+
+ default:
+ break;
+ }
+ return !legitimate_constant_p (x);
+}
+
+/* Determine if a given RTX is a valid constant address. */
+
+bool
+constant_address_p (rtx x)
+{
+ return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
+}
+
+/* Nonzero if the constant value X is a legitimate general operand
+ when generating PIC code. It is given that flag_pic is on and
+ that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+bool
+legitimate_pic_operand_p (rtx x)
+{
+ rtx inner;
+
+ switch (GET_CODE (x))
+ {
+ case CONST:
+ inner = XEXP (x, 0);
+ if (GET_CODE (inner) == PLUS
+ && GET_CODE (XEXP (inner, 1)) == CONST_INT)
+ inner = XEXP (inner, 0);
+
+ /* Only some unspecs are valid as "constants". */
+ if (GET_CODE (inner) == UNSPEC)
+ switch (XINT (inner, 1))
+ {
+ case UNSPEC_GOTOFF:
+ return TARGET_64BIT;
+ case UNSPEC_TPOFF:
+ x = XVECEXP (inner, 0, 0);
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
+ default:
+ return false;
+ }
+ /* FALLTHRU */
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return legitimate_pic_address_disp_p (x);
+
+ default:
+ return true;
+ }
+}
+
+/* Determine if a given CONST RTX is a valid memory displacement
+ in PIC mode. */
+
+int
+legitimate_pic_address_disp_p (rtx disp)
+{
+ bool saw_plus;
+
+ /* In 64bit mode we can allow direct addresses of symbols and labels
+ when they are not dynamic symbols. */
+ if (TARGET_64BIT)
+ {
+ rtx op0 = disp, op1;
+
+ switch (GET_CODE (disp))
+ {
+ case LABEL_REF:
+ return true;
+
+ case CONST:
+ if (GET_CODE (XEXP (disp, 0)) != PLUS)
+ break;
+ op0 = XEXP (XEXP (disp, 0), 0);
+ op1 = XEXP (XEXP (disp, 0), 1);
+ if (GET_CODE (op1) != CONST_INT
+ || INTVAL (op1) >= 16*1024*1024
+ || INTVAL (op1) < -16*1024*1024)
+ break;
+ if (GET_CODE (op0) == LABEL_REF)
+ return true;
+ if (GET_CODE (op0) != SYMBOL_REF)
+ break;
+ /* FALLTHRU */
+
+ case SYMBOL_REF:
+ /* TLS references should always be enclosed in UNSPEC. */
+ if (SYMBOL_REF_TLS_MODEL (op0))
+ return false;
+ /* APPLE LOCAL begin fix-and-continue 6227434 */
+#if TARGET_MACHO
+ if (machopic_data_defined_p (op0))
+ return true;
+
+ /* Under -mfix-and-continue, even local storage is
+ addressed via the GOT, so that the value of local
+ statics is preserved when a function is "fixed." */
+ if (indirect_data (op0))
+ return false;
+#endif
+ /* APPLE LOCAL end fix-and-continue 6227434 */
+ if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
+ return true;
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (GET_CODE (disp) != CONST)
+ return 0;
+ disp = XEXP (disp, 0);
+
+ if (TARGET_64BIT)
+ {
+ /* We are unsafe to allow PLUS expressions. This limit allowed distance
+ of GOT tables. We should not need these anyway. */
+ if (GET_CODE (disp) != UNSPEC
+ || (XINT (disp, 1) != UNSPEC_GOTPCREL
+ && XINT (disp, 1) != UNSPEC_GOTOFF))
+ return 0;
+
+ if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
+ && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
+ return 0;
+ return 1;
+ }
+
+ saw_plus = false;
+ if (GET_CODE (disp) == PLUS)
+ {
+ if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
+ return 0;
+ disp = XEXP (disp, 0);
+ saw_plus = true;
+ }
+
+ if (TARGET_MACHO && darwin_local_data_pic (disp))
+ return 1;
+
+ if (GET_CODE (disp) != UNSPEC)
+ return 0;
+
+ switch (XINT (disp, 1))
+ {
+ case UNSPEC_GOT:
+ if (saw_plus)
+ return false;
+ return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
+ case UNSPEC_GOTOFF:
+ /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
+ While ABI specify also 32bit relocation but we don't produce it in
+ small PIC model at all. */
+ if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
+ || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
+ && !TARGET_64BIT)
+ return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
+ return false;
+ case UNSPEC_GOTTPOFF:
+ case UNSPEC_GOTNTPOFF:
+ case UNSPEC_INDNTPOFF:
+ if (saw_plus)
+ return false;
+ disp = XVECEXP (disp, 0, 0);
+ return (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
+ case UNSPEC_NTPOFF:
+ disp = XVECEXP (disp, 0, 0);
+ return (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
+ case UNSPEC_DTPOFF:
+ disp = XVECEXP (disp, 0, 0);
+ return (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
+ }
+
+ return 0;
+}
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
+ memory address for an instruction. The MODE argument is the machine mode
+ for the MEM expression that wants to use this address.
+
+ It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
+ convert common non-canonical forms to canonical form so that they will
+ be recognized. */
+
+int
+legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
+{
+ struct ix86_address parts;
+ rtx base, index, disp;
+ HOST_WIDE_INT scale;
+ const char *reason = NULL;
+ rtx reason_rtx = NULL_RTX;
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr,
+ "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
+ GET_MODE_NAME (mode), strict);
+ debug_rtx (addr);
+ }
+
+ if (ix86_decompose_address (addr, &parts) <= 0)
+ {
+ reason = "decomposition failed";
+ goto report_error;
+ }
+
+ base = parts.base;
+ index = parts.index;
+ disp = parts.disp;
+ scale = parts.scale;
+
+ /* Validate base register.
+
+ Don't allow SUBREG's that span more than a word here. It can lead to spill
+ failures when the base is one word out of a two word structure, which is
+ represented internally as a DImode int. */
+
+ if (base)
+ {
+ rtx reg;
+ reason_rtx = base;
+
+ if (REG_P (base))
+ reg = base;
+ else if (GET_CODE (base) == SUBREG
+ && REG_P (SUBREG_REG (base))
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
+ <= UNITS_PER_WORD)
+ reg = SUBREG_REG (base);
+ else
+ {
+ reason = "base is not a register";
+ goto report_error;
+ }
+
+ if (GET_MODE (base) != Pmode)
+ {
+ reason = "base is not in Pmode";
+ goto report_error;
+ }
+
+ if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
+ || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
+ {
+ reason = "base is not valid";
+ goto report_error;
+ }
+ }
+
+ /* Validate index register.
+
+ Don't allow SUBREG's that span more than a word here -- same as above. */
+
+ if (index)
+ {
+ rtx reg;
+ reason_rtx = index;
+
+ if (REG_P (index))
+ reg = index;
+ else if (GET_CODE (index) == SUBREG
+ && REG_P (SUBREG_REG (index))
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
+ <= UNITS_PER_WORD)
+ reg = SUBREG_REG (index);
+ else
+ {
+ reason = "index is not a register";
+ goto report_error;
+ }
+
+ if (GET_MODE (index) != Pmode)
+ {
+ reason = "index is not in Pmode";
+ goto report_error;
+ }
+
+ if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
+ || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
+ {
+ reason = "index is not valid";
+ goto report_error;
+ }
+ }
+
+ /* Validate scale factor. */
+ if (scale != 1)
+ {
+ reason_rtx = GEN_INT (scale);
+ if (!index)
+ {
+ reason = "scale without index";
+ goto report_error;
+ }
+
+ if (scale != 2 && scale != 4 && scale != 8)
+ {
+ reason = "scale is not a valid multiplier";
+ goto report_error;
+ }
+ }
+
+ /* Validate displacement. */
+ if (disp)
+ {
+ reason_rtx = disp;
+
+ if (GET_CODE (disp) == CONST
+ && GET_CODE (XEXP (disp, 0)) == UNSPEC)
+ switch (XINT (XEXP (disp, 0), 1))
+ {
+ /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
+ used. While ABI specify also 32bit relocations, we don't produce
+ them at all and use IP relative instead. */
+ case UNSPEC_GOT:
+ case UNSPEC_GOTOFF:
+ gcc_assert (flag_pic);
+ if (!TARGET_64BIT)
+ goto is_legitimate_pic;
+ reason = "64bit address unspec";
+ goto report_error;
+
+ case UNSPEC_GOTPCREL:
+ gcc_assert (flag_pic);
+ goto is_legitimate_pic;
+
+ case UNSPEC_GOTTPOFF:
+ case UNSPEC_GOTNTPOFF:
+ case UNSPEC_INDNTPOFF:
+ case UNSPEC_NTPOFF:
+ case UNSPEC_DTPOFF:
+ break;
+
+ default:
+ reason = "invalid address unspec";
+ goto report_error;
+ }
+
+ else if (SYMBOLIC_CONST (disp)
+ && (flag_pic
+ || (TARGET_MACHO
+#if TARGET_MACHO
+ && MACHOPIC_INDIRECT
+ && !machopic_operand_p (disp)
+#endif
+ )))
+ {
+
+ is_legitimate_pic:
+ if (TARGET_64BIT && (index || base))
+ {
+ /* foo@dtpoff(%rX) is ok. */
+ if (GET_CODE (disp) != CONST
+ || GET_CODE (XEXP (disp, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
+ || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
+ || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
+ && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
+ {
+ reason = "non-constant pic memory reference";
+ goto report_error;
+ }
+ }
+ /* APPLE LOCAL begin dynamic-no-pic */
+ else if (flag_pic && ! legitimate_pic_address_disp_p (disp))
+ {
+ reason = "displacement is an invalid pic construct";
+ goto report_error;
+ }
+#if TARGET_MACHO
+ else if (MACHO_DYNAMIC_NO_PIC_P && !legitimate_constant_p (disp))
+ {
+ reason = "displacment must be referenced via non_lazy_pointer";
+ goto report_error;
+ }
+#endif
+ /* APPLE LOCAL end dynamic-no-pic */
+
+ /* This code used to verify that a symbolic pic displacement
+ includes the pic_offset_table_rtx register.
+
+ While this is good idea, unfortunately these constructs may
+ be created by "adds using lea" optimization for incorrect
+ code like:
+
+ int a;
+ int foo(int i)
+ {
+ return *(&a+i);
+ }
+
+ This code is nonsensical, but results in addressing
+ GOT table with pic_offset_table_rtx base. We can't
+ just refuse it easily, since it gets matched by
+ "addsi3" pattern, that later gets split to lea in the
+ case output register differs from input. While this
+ can be handled by separate addsi pattern for this case
+ that never results in lea, this seems to be easier and
+ correct fix for crash to disable this test. */
+ }
+ else if (GET_CODE (disp) != LABEL_REF
+ && GET_CODE (disp) != CONST_INT
+ && (GET_CODE (disp) != CONST
+ || !legitimate_constant_p (disp))
+ && (GET_CODE (disp) != SYMBOL_REF
+ || !legitimate_constant_p (disp)))
+ {
+ reason = "displacement is not constant";
+ goto report_error;
+ }
+ else if (TARGET_64BIT
+ && !x86_64_immediate_operand (disp, VOIDmode))
+ {
+ reason = "displacement is out of range";
+ goto report_error;
+ }
+ }
+
+ /* Everything looks valid. */
+ if (TARGET_DEBUG_ADDR)
+ fprintf (stderr, "Success.\n");
+ return TRUE;
+
+ report_error:
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "Error: %s\n", reason);
+ debug_rtx (reason_rtx);
+ }
+ return FALSE;
+}
+
+/* Return a unique alias set for the GOT. */
+
+static HOST_WIDE_INT
+ix86_GOT_alias_set (void)
+{
+ static HOST_WIDE_INT set = -1;
+ if (set == -1)
+ set = new_alias_set ();
+ return set;
+}
+
+/* Return a legitimate reference for ORIG (an address) using the
+ register REG. If REG is 0, a new pseudo is generated.
+
+ There are two types of references that must be handled:
+
+ 1. Global data references must load the address from the GOT, via
+ the PIC reg. An insn is emitted to do this load, and the reg is
+ returned.
+
+ 2. Static data references, constant pool addresses, and code labels
+ compute the address as an offset from the GOT, whose base is in
+ the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
+ differentiate them from global data objects. The returned
+ address is the PIC reg + an unspec constant.
+
+ GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
+ reg also appears in the address. */
+
+static rtx
+legitimize_pic_address (rtx orig, rtx reg)
+{
+ rtx addr = orig;
+ rtx new = orig;
+ rtx base;
+
+#if TARGET_MACHO
+ if (TARGET_MACHO && !TARGET_64BIT)
+ {
+ if (reg == 0)
+ reg = gen_reg_rtx (Pmode);
+ /* Use the generic Mach-O PIC machinery. */
+ return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
+ }
+#endif
+
+ if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
+ new = addr;
+ else if (TARGET_64BIT
+ && ix86_cmodel != CM_SMALL_PIC
+ && local_symbolic_operand (addr, Pmode))
+ {
+ rtx tmpreg;
+ /* This symbol may be referenced via a displacement from the PIC
+ base address (@GOTOFF). */
+
+ if (reload_in_progress)
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ if (GET_CODE (addr) == CONST)
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) == PLUS)
+ {
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
+ new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
+ }
+ else
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
+ new = gen_rtx_CONST (Pmode, new);
+ if (!reg)
+ tmpreg = gen_reg_rtx (Pmode);
+ else
+ tmpreg = reg;
+ emit_move_insn (tmpreg, new);
+
+ if (reg != 0)
+ {
+ new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
+ tmpreg, 1, OPTAB_DIRECT);
+ new = reg;
+ }
+ else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
+ }
+ else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
+ {
+ /* This symbol may be referenced via a displacement from the PIC
+ base address (@GOTOFF). */
+
+ if (reload_in_progress)
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ if (GET_CODE (addr) == CONST)
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) == PLUS)
+ {
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
+ new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
+ }
+ else
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
+ new = gen_rtx_CONST (Pmode, new);
+ new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
+
+ if (reg != 0)
+ {
+ emit_move_insn (reg, new);
+ new = reg;
+ }
+ }
+ else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
+ {
+ if (TARGET_64BIT)
+ {
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
+ new = gen_rtx_CONST (Pmode, new);
+ new = gen_const_mem (Pmode, new);
+ set_mem_alias_set (new, ix86_GOT_alias_set ());
+
+ if (reg == 0)
+ reg = gen_reg_rtx (Pmode);
+ /* Use directly gen_movsi, otherwise the address is loaded
+ into register for CSE. We don't want to CSE this addresses,
+ instead we CSE addresses from the GOT table, so skip this. */
+ emit_insn (gen_movsi (reg, new));
+ new = reg;
+ }
+ else
+ {
+ /* This symbol must be referenced via a load from the
+ Global Offset Table (@GOT). */
+
+ if (reload_in_progress)
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
+ new = gen_rtx_CONST (Pmode, new);
+ new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
+ new = gen_const_mem (Pmode, new);
+ set_mem_alias_set (new, ix86_GOT_alias_set ());
+
+ if (reg == 0)
+ reg = gen_reg_rtx (Pmode);
+ emit_move_insn (reg, new);
+ new = reg;
+ }
+ }
+ else
+ {
+ if (GET_CODE (addr) == CONST_INT
+ && !x86_64_immediate_operand (addr, VOIDmode))
+ {
+ if (reg)
+ {
+ emit_move_insn (reg, addr);
+ new = reg;
+ }
+ else
+ new = force_reg (Pmode, addr);
+ }
+ else if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+
+ /* We must match stuff we generate before. Assume the only
+ unspecs that can get here are ours. Not that we could do
+ anything with them anyway.... */
+ if (GET_CODE (addr) == UNSPEC
+ || (GET_CODE (addr) == PLUS
+ && GET_CODE (XEXP (addr, 0)) == UNSPEC))
+ return orig;
+ gcc_assert (GET_CODE (addr) == PLUS);
+ }
+ if (GET_CODE (addr) == PLUS)
+ {
+ rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
+
+ /* Check first to see if this is a constant offset from a @GOTOFF
+ symbol reference. */
+ if (local_symbolic_operand (op0, Pmode)
+ && GET_CODE (op1) == CONST_INT)
+ {
+ if (!TARGET_64BIT)
+ {
+ if (reload_in_progress)
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
+ UNSPEC_GOTOFF);
+ new = gen_rtx_PLUS (Pmode, new, op1);
+ new = gen_rtx_CONST (Pmode, new);
+ new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
+
+ if (reg != 0)
+ {
+ emit_move_insn (reg, new);
+ new = reg;
+ }
+ }
+ else
+ {
+ if (INTVAL (op1) < -16*1024*1024
+ || INTVAL (op1) >= 16*1024*1024)
+ {
+ if (!x86_64_immediate_operand (op1, Pmode))
+ op1 = force_reg (Pmode, op1);
+ new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
+ }
+ }
+ }
+ else
+ {
+ base = legitimize_pic_address (XEXP (addr, 0), reg);
+ new = legitimize_pic_address (XEXP (addr, 1),
+ base == reg ? NULL_RTX : reg);
+
+ if (GET_CODE (new) == CONST_INT)
+ new = plus_constant (base, INTVAL (new));
+ else
+ {
+ if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
+ {
+ base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
+ new = XEXP (new, 1);
+ }
+ new = gen_rtx_PLUS (Pmode, base, new);
+ /* APPLE LOCAL begin fix-and-continue 6358507 */
+ if (!legitimate_address_p (Pmode, new, FALSE))
+ new = force_reg (Pmode, new);
+ /* APPLE LOCAL end fix-and-continue 6358507 */
+ }
+ }
+ }
+ }
+ return new;
+}
+
+/* Load the thread pointer. If TO_REG is true, force it into a register. */
+
+static rtx
+get_thread_pointer (int to_reg)
+{
+ rtx tp, reg, insn;
+
+ tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
+ if (!to_reg)
+ return tp;
+
+ reg = gen_reg_rtx (Pmode);
+ insn = gen_rtx_SET (VOIDmode, reg, tp);
+ insn = emit_insn (insn);
+
+ return reg;
+}
+
+/* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
+ false if we expect this to be used for a memory address and true if
+ we expect to load the address into a register. */
+
+static rtx
+legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
+{
+ rtx dest, base, off, pic, tp;
+ int type;
+
+ switch (model)
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ dest = gen_reg_rtx (Pmode);
+ tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
+
+ if (TARGET_64BIT && ! TARGET_GNU2_TLS)
+ {
+ rtx rax = gen_rtx_REG (Pmode, 0), insns;
+
+ start_sequence ();
+ emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, dest, rax, x);
+ }
+ else if (TARGET_64BIT && TARGET_GNU2_TLS)
+ emit_insn (gen_tls_global_dynamic_64 (dest, x));
+ else
+ emit_insn (gen_tls_global_dynamic_32 (dest, x));
+
+ if (TARGET_GNU2_TLS)
+ {
+ dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
+
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
+ }
+ break;
+
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ base = gen_reg_rtx (Pmode);
+ tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
+
+ if (TARGET_64BIT && ! TARGET_GNU2_TLS)
+ {
+ rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
+
+ start_sequence ();
+ emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
+ insns = get_insns ();
+ end_sequence ();
+
+ note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
+ note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
+ emit_libcall_block (insns, base, rax, note);
+ }
+ else if (TARGET_64BIT && TARGET_GNU2_TLS)
+ emit_insn (gen_tls_local_dynamic_base_64 (base));
+ else
+ emit_insn (gen_tls_local_dynamic_base_32 (base));
+
+ if (TARGET_GNU2_TLS)
+ {
+ rtx x = ix86_tls_module_base ();
+
+ set_unique_reg_note (get_last_insn (), REG_EQUIV,
+ gen_rtx_MINUS (Pmode, x, tp));
+ }
+
+ off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
+ off = gen_rtx_CONST (Pmode, off);
+
+ dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
+
+ if (TARGET_GNU2_TLS)
+ {
+ dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
+
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
+ }
+
+ break;
+
+ case TLS_MODEL_INITIAL_EXEC:
+ if (TARGET_64BIT)
+ {
+ pic = NULL;
+ type = UNSPEC_GOTNTPOFF;
+ }
+ else if (flag_pic)
+ {
+ if (reload_in_progress)
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+ pic = pic_offset_table_rtx;
+ type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
+ }
+ else if (!TARGET_ANY_GNU_TLS)
+ {
+ pic = gen_reg_rtx (Pmode);
+ emit_insn (gen_set_got (pic));
+ type = UNSPEC_GOTTPOFF;
+ }
+ else
+ {
+ pic = NULL;
+ type = UNSPEC_INDNTPOFF;
+ }
+
+ off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
+ off = gen_rtx_CONST (Pmode, off);
+ if (pic)
+ off = gen_rtx_PLUS (Pmode, pic, off);
+ off = gen_const_mem (Pmode, off);
+ set_mem_alias_set (off, ix86_GOT_alias_set ());
+
+ if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
+ {
+ base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
+ off = force_reg (Pmode, off);
+ return gen_rtx_PLUS (Pmode, base, off);
+ }
+ else
+ {
+ base = get_thread_pointer (true);
+ dest = gen_reg_rtx (Pmode);
+ emit_insn (gen_subsi3 (dest, base, off));
+ }
+ break;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
+ (TARGET_64BIT || TARGET_ANY_GNU_TLS)
+ ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
+ off = gen_rtx_CONST (Pmode, off);
+
+ if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
+ {
+ base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
+ return gen_rtx_PLUS (Pmode, base, off);
+ }
+ else
+ {
+ base = get_thread_pointer (true);
+ dest = gen_reg_rtx (Pmode);
+ emit_insn (gen_subsi3 (dest, base, off));
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return dest;
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For the 80386, we handle X+REG by loading X into a register R and
+ using R+REG. R will go in a general reg and indexing will be used.
+ However, if REG is a broken-out memory address or multiplication,
+ nothing needs to be done because REG can certainly go in a general reg.
+
+ When -fpic is used, special handling is needed for symbolic references.
+ See comments by legitimize_pic_address in i386.c for details. */
+
+rtx
+legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
+{
+ int changed = 0;
+ unsigned log;
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
+ GET_MODE_NAME (mode));
+ debug_rtx (x);
+ }
+
+ log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
+ if (log)
+ return legitimize_tls_address (x, log, false);
+ if (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
+ {
+ rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
+ return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
+ }
+
+ if (flag_pic && SYMBOLIC_CONST (x))
+ return legitimize_pic_address (x, 0);
+ /* APPLE LOCAL begin dynamic-no-pic */
+#if TARGET_MACHO
+ if (MACHO_DYNAMIC_NO_PIC_P && SYMBOLIC_CONST (x))
+ return machopic_indirect_data_reference (x, 0);
+#endif
+ /* APPLE LOCAL end dynamic-no-pic */
+
+ /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
+ if (GET_CODE (x) == ASHIFT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
+ {
+ changed = 1;
+ log = INTVAL (XEXP (x, 1));
+ x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
+ GEN_INT (1 << log));
+ }
+
+ if (GET_CODE (x) == PLUS)
+ {
+ /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
+
+ if (GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
+ {
+ changed = 1;
+ log = INTVAL (XEXP (XEXP (x, 0), 1));
+ XEXP (x, 0) = gen_rtx_MULT (Pmode,
+ force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
+ GEN_INT (1 << log));
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == ASHIFT
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
+ {
+ changed = 1;
+ log = INTVAL (XEXP (XEXP (x, 1), 1));
+ XEXP (x, 1) = gen_rtx_MULT (Pmode,
+ force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
+ GEN_INT (1 << log));
+ }
+
+ /* Put multiply first if it isn't already. */
+ if (GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ rtx tmp = XEXP (x, 0);
+ XEXP (x, 0) = XEXP (x, 1);
+ XEXP (x, 1) = tmp;
+ changed = 1;
+ }
+
+ /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
+ into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
+ created by virtual register instantiation, register elimination, and
+ similar optimizations. */
+ if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
+ {
+ changed = 1;
+ x = gen_rtx_PLUS (Pmode,
+ gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ XEXP (XEXP (x, 1), 0)),
+ XEXP (XEXP (x, 1), 1));
+ }
+
+ /* Canonicalize
+ (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
+ into (plus (plus (mult (reg) (const)) (reg)) (const)). */
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ rtx constant;
+ rtx other = NULL_RTX;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ constant = XEXP (x, 1);
+ other = XEXP (XEXP (XEXP (x, 0), 1), 1);
+ }
+ else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
+ {
+ constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
+ other = XEXP (x, 1);
+ }
+ else
+ constant = 0;
+
+ if (constant)
+ {
+ changed = 1;
+ x = gen_rtx_PLUS (Pmode,
+ gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
+ XEXP (XEXP (XEXP (x, 0), 1), 0)),
+ plus_constant (other, INTVAL (constant)));
+ }
+ }
+
+ if (changed && legitimate_address_p (mode, x, FALSE))
+ return x;
+
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ changed = 1;
+ XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
+ }
+
+ if (GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ changed = 1;
+ XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
+ }
+
+ if (changed
+ && GET_CODE (XEXP (x, 1)) == REG
+ && GET_CODE (XEXP (x, 0)) == REG)
+ return x;
+
+ if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
+ {
+ changed = 1;
+ x = legitimize_pic_address (x, 0);
+ }
+
+ if (changed && legitimate_address_p (mode, x, FALSE))
+ return x;
+
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ {
+ rtx temp = gen_reg_rtx (Pmode);
+ rtx val = force_operand (XEXP (x, 1), temp);
+ if (val != temp)
+ emit_move_insn (temp, val);
+
+ XEXP (x, 1) = temp;
+ return x;
+ }
+
+ else if (GET_CODE (XEXP (x, 1)) == REG)
+ {
+ rtx temp = gen_reg_rtx (Pmode);
+ rtx val = force_operand (XEXP (x, 0), temp);
+ if (val != temp)
+ emit_move_insn (temp, val);
+
+ XEXP (x, 0) = temp;
+ return x;
+ }
+ }
+
+ return x;
+}
+
+/* Print an integer constant expression in assembler syntax. Addition
+ and subtraction are the only arithmetic that may appear in these
+ expressions. FILE is the stdio stream to write to, X is the rtx, and
+ CODE is the operand print code from the output string. */
+
+static void
+output_pic_addr_const (FILE *file, rtx x, int code)
+{
+ char buf[256];
+
+ switch (GET_CODE (x))
+ {
+ case PC:
+ gcc_assert (flag_pic);
+ putc ('.', file);
+ break;
+
+ case SYMBOL_REF:
+ /* APPLE LOCAL begin axe stubs 5571540 */
+ if (! TARGET_MACHO ||
+#if TARGET_MACHO
+ ! darwin_stubs ||
+#endif
+ TARGET_64BIT)
+ /* APPLE LOCAL end axe stubs 5571540 */
+ output_addr_const (file, x);
+ else
+ {
+ const char *name = XSTR (x, 0);
+
+ /* Mark the decl as referenced so that cgraph will output the function. */
+ if (SYMBOL_REF_DECL (x))
+ mark_decl_referenced (SYMBOL_REF_DECL (x));
+
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT
+ && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
+ name = machopic_indirection_name (x, /*stub_p=*/true);
+#endif
+ assemble_name (file, name);
+ }
+ if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
+ fputs ("@PLT", file);
+ break;
+
+ case LABEL_REF:
+ x = XEXP (x, 0);
+ /* FALLTHRU */
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
+ assemble_name (asm_out_file, buf);
+ break;
+
+ case CONST_INT:
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ break;
+
+ case CONST:
+ /* This used to output parentheses around the expression,
+ but that does not work on the 386 (either ATT or BSD assembler). */
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ break;
+
+ case CONST_DOUBLE:
+ if (GET_MODE (x) == VOIDmode)
+ {
+ /* We can use %d if the number is <32 bits and positive. */
+ if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file, "0x%lx%08lx",
+ (unsigned long) CONST_DOUBLE_HIGH (x),
+ (unsigned long) CONST_DOUBLE_LOW (x));
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
+ }
+ else
+ /* We can't handle floating point constants;
+ PRINT_OPERAND must handle them. */
+ output_operand_lossage ("floating constant misused");
+ break;
+
+ case PLUS:
+ /* Some assemblers need integer constants to appear first. */
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ putc ('+', file);
+ output_pic_addr_const (file, XEXP (x, 1), code);
+ }
+ else
+ {
+ gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
+ output_pic_addr_const (file, XEXP (x, 1), code);
+ putc ('+', file);
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ }
+ break;
+
+ case MINUS:
+ if (!TARGET_MACHO)
+ putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
+ output_pic_addr_const (file, XEXP (x, 0), code);
+ putc ('-', file);
+ output_pic_addr_const (file, XEXP (x, 1), code);
+ if (!TARGET_MACHO)
+ putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
+ break;
+
+ case UNSPEC:
+ gcc_assert (XVECLEN (x, 0) == 1);
+ output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_GOT:
+ fputs ("@GOT", file);
+ break;
+ case UNSPEC_GOTOFF:
+ fputs ("@GOTOFF", file);
+ break;
+ case UNSPEC_GOTPCREL:
+ fputs ("@GOTPCREL(%rip)", file);
+ break;
+ case UNSPEC_GOTTPOFF:
+ /* FIXME: This might be @TPOFF in Sun ld too. */
+ fputs ("@GOTTPOFF", file);
+ break;
+ case UNSPEC_TPOFF:
+ fputs ("@TPOFF", file);
+ break;
+ case UNSPEC_NTPOFF:
+ if (TARGET_64BIT)
+ fputs ("@TPOFF", file);
+ else
+ fputs ("@NTPOFF", file);
+ break;
+ case UNSPEC_DTPOFF:
+ fputs ("@DTPOFF", file);
+ break;
+ case UNSPEC_GOTNTPOFF:
+ if (TARGET_64BIT)
+ fputs ("@GOTTPOFF(%rip)", file);
+ else
+ fputs ("@GOTNTPOFF", file);
+ break;
+ case UNSPEC_INDNTPOFF:
+ fputs ("@INDNTPOFF", file);
+ break;
+ default:
+ output_operand_lossage ("invalid UNSPEC as operand");
+ break;
+ }
+ break;
+
+ default:
+ output_operand_lossage ("invalid expression as operand");
+ }
+}
+
+/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
+ We need to emit DTP-relative relocations. */
+
+static void
+i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ fputs (ASM_LONG, file);
+ output_addr_const (file, x);
+ fputs ("@DTPOFF", file);
+ switch (size)
+ {
+ case 4:
+ break;
+ case 8:
+ fputs (", 0", file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* In the name of slightly smaller debug output, and to cater to
+ general assembler lossage, recognize PIC+GOTOFF and turn it back
+ into a direct symbol reference.
+
+ On Darwin, this is necessary to avoid a crash, because Darwin
+ has a different PIC label for each routine but the DWARF debugging
+ information is not associated with any particular routine, so it's
+ necessary to remove references to the PIC label from RTL stored by
+ the DWARF output code. */
+
+static rtx
+ix86_delegitimize_address (rtx orig_x)
+{
+ rtx x = orig_x;
+ /* reg_addend is NULL or a multiple of some register. */
+ rtx reg_addend = NULL_RTX;
+ /* const_addend is NULL or a const_int. */
+ rtx const_addend = NULL_RTX;
+ /* This is the result, or NULL. */
+ rtx result = NULL_RTX;
+
+ if (GET_CODE (x) == MEM)
+ x = XEXP (x, 0);
+
+ if (TARGET_64BIT)
+ {
+ if (GET_CODE (x) != CONST
+ || GET_CODE (XEXP (x, 0)) != UNSPEC
+ || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
+ || GET_CODE (orig_x) != MEM)
+ return orig_x;
+ return XVECEXP (XEXP (x, 0), 0, 0);
+ }
+
+ if (GET_CODE (x) != PLUS
+ || GET_CODE (XEXP (x, 1)) != CONST)
+ return orig_x;
+
+ if (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
+ /* %ebx + GOT/GOTOFF */
+ ;
+ else if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ /* %ebx + %reg * scale + GOT/GOTOFF */
+ reg_addend = XEXP (x, 0);
+ if (GET_CODE (XEXP (reg_addend, 0)) == REG
+ && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
+ reg_addend = XEXP (reg_addend, 1);
+ else if (GET_CODE (XEXP (reg_addend, 1)) == REG
+ && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
+ reg_addend = XEXP (reg_addend, 0);
+ else
+ return orig_x;
+ if (GET_CODE (reg_addend) != REG
+ && GET_CODE (reg_addend) != MULT
+ && GET_CODE (reg_addend) != ASHIFT)
+ return orig_x;
+ }
+ else
+ return orig_x;
+
+ x = XEXP (XEXP (x, 1), 0);
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ const_addend = XEXP (x, 1);
+ x = XEXP (x, 0);
+ }
+
+ if (GET_CODE (x) == UNSPEC
+ && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
+ || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
+ result = XVECEXP (x, 0, 0);
+
+ if (TARGET_MACHO && darwin_local_data_pic (x)
+ && GET_CODE (orig_x) != MEM)
+ result = XEXP (x, 0);
+
+ if (! result)
+ return orig_x;
+
+ if (const_addend)
+ result = gen_rtx_PLUS (Pmode, result, const_addend);
+ if (reg_addend)
+ result = gen_rtx_PLUS (Pmode, reg_addend, result);
+ return result;
+}
+
+static void
+put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
+ int fp, FILE *file)
+{
+ const char *suffix;
+
+ if (mode == CCFPmode || mode == CCFPUmode)
+ {
+ enum rtx_code second_code, bypass_code;
+ ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
+ gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
+ code = ix86_fp_compare_code_to_integer (code);
+ mode = CCmode;
+ }
+ if (reverse)
+ code = reverse_condition (code);
+
+ switch (code)
+ {
+ case EQ:
+ suffix = "e";
+ break;
+ case NE:
+ suffix = "ne";
+ break;
+ case GT:
+ gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
+ suffix = "g";
+ break;
+ case GTU:
+ /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
+ Those same assemblers have the same but opposite lossage on cmov. */
+ gcc_assert (mode == CCmode);
+ suffix = fp ? "nbe" : "a";
+ break;
+ case LT:
+ switch (mode)
+ {
+ case CCNOmode:
+ case CCGOCmode:
+ suffix = "s";
+ break;
+
+ case CCmode:
+ case CCGCmode:
+ suffix = "l";
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ case LTU:
+ gcc_assert (mode == CCmode);
+ suffix = "b";
+ break;
+ case GE:
+ switch (mode)
+ {
+ case CCNOmode:
+ case CCGOCmode:
+ suffix = "ns";
+ break;
+
+ case CCmode:
+ case CCGCmode:
+ suffix = "ge";
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ case GEU:
+ /* ??? As above. */
+ gcc_assert (mode == CCmode);
+ suffix = fp ? "nb" : "ae";
+ break;
+ case LE:
+ gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
+ suffix = "le";
+ break;
+ case LEU:
+ gcc_assert (mode == CCmode);
+ suffix = "be";
+ break;
+ case UNORDERED:
+ suffix = fp ? "u" : "p";
+ break;
+ case ORDERED:
+ suffix = fp ? "nu" : "np";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ fputs (suffix, file);
+}
+
+/* Print the name of register X to FILE based on its machine mode and number.
+ If CODE is 'w', pretend the mode is HImode.
+ If CODE is 'b', pretend the mode is QImode.
+ If CODE is 'k', pretend the mode is SImode.
+ If CODE is 'q', pretend the mode is DImode.
+ If CODE is 'h', pretend the reg is the 'high' byte register.
+ If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
+
+void
+print_reg (rtx x, int code, FILE *file)
+{
+ gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
+ && REGNO (x) != FRAME_POINTER_REGNUM
+ && REGNO (x) != FLAGS_REG
+ && REGNO (x) != FPSR_REG);
+
+ if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
+ putc ('%', file);
+
+ if (code == 'w' || MMX_REG_P (x))
+ code = 2;
+ else if (code == 'b')
+ code = 1;
+ else if (code == 'k')
+ code = 4;
+ else if (code == 'q')
+ code = 8;
+ else if (code == 'y')
+ code = 3;
+ else if (code == 'h')
+ code = 0;
+ else
+ code = GET_MODE_SIZE (GET_MODE (x));
+
+ /* Irritatingly, AMD extended registers use different naming convention
+ from the normal registers. */
+ if (REX_INT_REG_P (x))
+ {
+ gcc_assert (TARGET_64BIT);
+ switch (code)
+ {
+ case 0:
+ error ("extended registers have no high halves");
+ break;
+ case 1:
+ fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
+ break;
+ case 2:
+ fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
+ break;
+ case 4:
+ fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
+ break;
+ case 8:
+ fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
+ break;
+ default:
+ error ("unsupported operand size for extended register");
+ break;
+ }
+ return;
+ }
+ switch (code)
+ {
+ case 3:
+ if (STACK_TOP_P (x))
+ {
+ fputs ("st(0)", file);
+ break;
+ }
+ /* FALLTHRU */
+ case 8:
+ case 4:
+ case 12:
+ if (! ANY_FP_REG_P (x))
+ putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
+ /* FALLTHRU */
+ case 16:
+ case 2:
+ normal:
+ fputs (hi_reg_name[REGNO (x)], file);
+ break;
+ case 1:
+ if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
+ goto normal;
+ fputs (qi_reg_name[REGNO (x)], file);
+ break;
+ case 0:
+ if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
+ goto normal;
+ fputs (qi_high_reg_name[REGNO (x)], file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Locate some local-dynamic symbol still in use by this function
+ so that we can print its name in some tls_local_dynamic_base
+ pattern. */
+
+static const char *
+get_some_local_dynamic_name (void)
+{
+ rtx insn;
+
+ if (cfun->machine->some_ld_name)
+ return cfun->machine->some_ld_name;
+
+ for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
+ if (INSN_P (insn)
+ && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
+ return cfun->machine->some_ld_name;
+
+ gcc_unreachable ();
+}
+
+static int
+get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ if (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
+ {
+ cfun->machine->some_ld_name = XSTR (x, 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Meaning of CODE:
+ L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
+ C -- print opcode suffix for set/cmov insn.
+ c -- like C, but print reversed condition
+ F,f -- likewise, but for floating-point.
+ O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
+ otherwise nothing
+ R -- print the prefix for register names.
+ z -- print the opcode suffix for the size of the current operand.
+ * -- print a star (in certain assembler syntax)
+ A -- print an absolute memory reference.
+ w -- print the operand as if it's a "word" (HImode) even if it isn't.
+ s -- print a shift double count, followed by the assemblers argument
+ delimiter.
+ b -- print the QImode name of the register for the indicated operand.
+ %b0 would print %al if operands[0] is reg 0.
+ w -- likewise, print the HImode name of the register.
+ k -- likewise, print the SImode name of the register.
+ q -- likewise, print the DImode name of the register.
+ h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
+ y -- print "st(0)" instead of "st" as a register.
+ D -- print condition for SSE cmp instruction.
+ P -- if PIC, print an @PLT suffix.
+ X -- don't print any sort of PIC '@' suffix for a symbol.
+ & -- print some in-use local-dynamic symbol name.
+ H -- print a memory address offset by 8; used for sse high-parts
+ */
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '*':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('*', file);
+ return;
+
+ case '&':
+ assemble_name (file, get_some_local_dynamic_name ());
+ return;
+
+ case 'A':
+ switch (ASSEMBLER_DIALECT)
+ {
+ case ASM_ATT:
+ putc ('*', file);
+ break;
+
+ case ASM_INTEL:
+ /* Intel syntax. For absolute addresses, registers should not
+ be surrounded by braces. */
+ if (GET_CODE (x) != REG)
+ {
+ putc ('[', file);
+ PRINT_OPERAND (file, x, 0);
+ putc (']', file);
+ return;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ PRINT_OPERAND (file, x, 0);
+ return;
+
+
+ case 'L':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('l', file);
+ return;
+
+ case 'W':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('w', file);
+ return;
+
+ case 'B':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('b', file);
+ return;
+
+ case 'Q':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('l', file);
+ return;
+
+ case 'S':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('s', file);
+ return;
+
+ case 'T':
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('t', file);
+ return;
+
+ case 'z':
+ /* 387 opcodes don't get size suffixes if the operands are
+ registers. */
+ if (STACK_REG_P (x))
+ return;
+
+ /* Likewise if using Intel opcodes. */
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ return;
+
+ /* This is the size of op from size of operand. */
+ switch (GET_MODE_SIZE (GET_MODE (x)))
+ {
+ case 2:
+#ifdef HAVE_GAS_FILDS_FISTS
+ putc ('s', file);
+#endif
+ return;
+
+ case 4:
+ if (GET_MODE (x) == SFmode)
+ {
+ putc ('s', file);
+ return;
+ }
+ else
+ putc ('l', file);
+ return;
+
+ case 12:
+ case 16:
+ putc ('t', file);
+ return;
+
+ case 8:
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+#ifdef GAS_MNEMONICS
+ putc ('q', file);
+#else
+ putc ('l', file);
+ putc ('l', file);
+#endif
+ }
+ else
+ putc ('l', file);
+ return;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ case 'b':
+ case 'w':
+ case 'k':
+ case 'q':
+ case 'h':
+ case 'y':
+ case 'X':
+ case 'P':
+ break;
+
+ case 's':
+ if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
+ {
+ PRINT_OPERAND (file, x, 0);
+ putc (',', file);
+ }
+ return;
+
+ case 'D':
+ /* Little bit of braindamage here. The SSE compare instructions
+ does use completely different names for the comparisons that the
+ fp conditional moves. */
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ case UNEQ:
+ fputs ("eq", file);
+ break;
+ case LT:
+ case UNLT:
+ fputs ("lt", file);
+ break;
+ case LE:
+ case UNLE:
+ fputs ("le", file);
+ break;
+ case UNORDERED:
+ fputs ("unord", file);
+ break;
+ case NE:
+ case LTGT:
+ fputs ("neq", file);
+ break;
+ case UNGE:
+ case GE:
+ fputs ("nlt", file);
+ break;
+ case UNGT:
+ case GT:
+ fputs ("nle", file);
+ break;
+ case ORDERED:
+ fputs ("ord", file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return;
+ case 'O':
+#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ {
+ switch (GET_MODE (x))
+ {
+ case HImode: putc ('w', file); break;
+ case SImode:
+ case SFmode: putc ('l', file); break;
+ case DImode:
+ case DFmode: putc ('q', file); break;
+ default: gcc_unreachable ();
+ }
+ putc ('.', file);
+ }
+#endif
+ return;
+ case 'C':
+ put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
+ return;
+ case 'F':
+#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('.', file);
+#endif
+ put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
+ return;
+
+ /* Like above, but reverse condition */
+ case 'c':
+ /* Check to see if argument to %c is really a constant
+ and not a condition code which needs to be reversed. */
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
+ return;
+ }
+ put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
+ return;
+ case 'f':
+#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('.', file);
+#endif
+ put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
+ return;
+
+ case 'H':
+ /* It doesn't actually matter what mode we use here, as we're
+ only going to use this for printing. */
+ x = adjust_address_nv (x, DImode, 8);
+ break;
+
+ case '+':
+ {
+ rtx x;
+
+ if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
+ return;
+
+ x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
+ if (x)
+ {
+ int pred_val = INTVAL (XEXP (x, 0));
+
+ if (pred_val < REG_BR_PROB_BASE * 45 / 100
+ || pred_val > REG_BR_PROB_BASE * 55 / 100)
+ {
+ int taken = pred_val > REG_BR_PROB_BASE / 2;
+ int cputaken = final_forward_branch_p (current_output_insn) == 0;
+
+ /* Emit hints only in the case default branch prediction
+ heuristics would fail. */
+ if (taken != cputaken)
+ {
+ /* We use 3e (DS) prefix for taken branches and
+ 2e (CS) prefix for not taken branches. */
+ if (taken)
+ fputs ("ds ; ", file);
+ else
+ fputs ("cs ; ", file);
+ }
+ }
+ }
+ return;
+ }
+ default:
+ output_operand_lossage ("invalid operand code '%c'", code);
+ }
+ }
+
+ if (GET_CODE (x) == REG)
+ print_reg (x, code, file);
+
+ else if (GET_CODE (x) == MEM)
+ {
+ /* No `byte ptr' prefix for call instructions. */
+ if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
+ {
+ const char * size;
+ switch (GET_MODE_SIZE (GET_MODE (x)))
+ {
+ case 1: size = "BYTE"; break;
+ case 2: size = "WORD"; break;
+ case 4: size = "DWORD"; break;
+ case 8: size = "QWORD"; break;
+ case 12: size = "XWORD"; break;
+ case 16: size = "XMMWORD"; break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Check for explicit size override (codes 'b', 'w' and 'k') */
+ if (code == 'b')
+ size = "BYTE";
+ else if (code == 'w')
+ size = "WORD";
+ else if (code == 'k')
+ size = "DWORD";
+
+ fputs (size, file);
+ fputs (" PTR ", file);
+ }
+
+ x = XEXP (x, 0);
+ /* Avoid (%rip) for call operands. */
+ if (CONSTANT_ADDRESS_P (x) && code == 'P'
+ && GET_CODE (x) != CONST_INT)
+ output_addr_const (file, x);
+ else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
+ output_operand_lossage ("invalid constraints for operand");
+ else
+ output_address (x);
+ }
+
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
+ {
+ REAL_VALUE_TYPE r;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ REAL_VALUE_TO_TARGET_SINGLE (r, l);
+
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('$', file);
+ fprintf (file, "0x%08lx", l);
+ }
+
+ /* These float cases don't actually occur as immediate operands. */
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
+ {
+ char dstr[30];
+
+ real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
+ fprintf (file, "%s", dstr);
+ }
+
+ else if (GET_CODE (x) == CONST_DOUBLE
+ && GET_MODE (x) == XFmode)
+ {
+ char dstr[30];
+
+ real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
+ fprintf (file, "%s", dstr);
+ }
+
+ else
+ {
+ /* We have patterns that allow zero sets of memory, for instance.
+ In 64-bit mode, we should probably support all 8-byte vectors,
+ since we can in fact encode that into an immediate. */
+ if (GET_CODE (x) == CONST_VECTOR)
+ {
+ gcc_assert (x == CONST0_RTX (GET_MODE (x)));
+ x = const0_rtx;
+ }
+
+ if (code != 'P')
+ {
+ if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ {
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('$', file);
+ }
+ else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
+ || GET_CODE (x) == LABEL_REF)
+ {
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('$', file);
+ else
+ fputs ("OFFSET FLAT:", file);
+ }
+ }
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ /* APPLE LOCAL begin dynamic-no-pic */
+ else if (flag_pic || (TARGET_MACHO && MACHOPIC_INDIRECT))
+ /* APPLE LOCAL end dynamic-no-pic */
+ output_pic_addr_const (file, x, code);
+ else
+ output_addr_const (file, x);
+ }
+}
+
+/* Print a memory operand whose address is ADDR. */
+
+void
+print_operand_address (FILE *file, rtx addr)
+{
+ struct ix86_address parts;
+ rtx base, index, disp;
+ int scale;
+ int ok = ix86_decompose_address (addr, &parts);
+
+ gcc_assert (ok);
+
+ base = parts.base;
+ index = parts.index;
+ disp = parts.disp;
+ scale = parts.scale;
+
+ switch (parts.seg)
+ {
+ case SEG_DEFAULT:
+ break;
+ case SEG_FS:
+ case SEG_GS:
+ if (USER_LABEL_PREFIX[0] == 0)
+ putc ('%', file);
+ fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!base && !index)
+ {
+ /* Displacement only requires special attention. */
+
+ if (GET_CODE (disp) == CONST_INT)
+ {
+ if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
+ {
+ if (USER_LABEL_PREFIX[0] == 0)
+ putc ('%', file);
+ fputs ("ds:", file);
+ }
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
+ }
+ else if (flag_pic)
+ output_pic_addr_const (file, disp, 0);
+ else
+ output_addr_const (file, disp);
+
+ /* Use one byte shorter RIP relative addressing for 64bit mode. */
+ if (TARGET_64BIT)
+ {
+ if (GET_CODE (disp) == CONST
+ && GET_CODE (XEXP (disp, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
+ disp = XEXP (XEXP (disp, 0), 0);
+ if (GET_CODE (disp) == LABEL_REF
+ || (GET_CODE (disp) == SYMBOL_REF
+ && SYMBOL_REF_TLS_MODEL (disp) == 0))
+ fputs ("(%rip)", file);
+ }
+ }
+ else
+ {
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ {
+ if (disp)
+ {
+ if (flag_pic)
+ output_pic_addr_const (file, disp, 0);
+ else if (GET_CODE (disp) == LABEL_REF)
+ output_asm_label (disp);
+ else
+ output_addr_const (file, disp);
+ }
+
+ putc ('(', file);
+ if (base)
+ print_reg (base, 0, file);
+ if (index)
+ {
+ putc (',', file);
+ print_reg (index, 0, file);
+ if (scale != 1)
+ fprintf (file, ",%d", scale);
+ }
+ putc (')', file);
+ }
+ else
+ {
+ rtx offset = NULL_RTX;
+
+ if (disp)
+ {
+ /* Pull out the offset of a symbol; print any symbol itself. */
+ if (GET_CODE (disp) == CONST
+ && GET_CODE (XEXP (disp, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
+ {
+ offset = XEXP (XEXP (disp, 0), 1);
+ disp = gen_rtx_CONST (VOIDmode,
+ XEXP (XEXP (disp, 0), 0));
+ }
+
+ if (flag_pic)
+ output_pic_addr_const (file, disp, 0);
+ else if (GET_CODE (disp) == LABEL_REF)
+ output_asm_label (disp);
+ else if (GET_CODE (disp) == CONST_INT)
+ offset = disp;
+ else
+ output_addr_const (file, disp);
+ }
+
+ putc ('[', file);
+ if (base)
+ {
+ print_reg (base, 0, file);
+ if (offset)
+ {
+ if (INTVAL (offset) >= 0)
+ putc ('+', file);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
+ }
+ }
+ else if (offset)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
+ else
+ putc ('0', file);
+
+ if (index)
+ {
+ putc ('+', file);
+ print_reg (index, 0, file);
+ if (scale != 1)
+ fprintf (file, "*%d", scale);
+ }
+ putc (']', file);
+ }
+ }
+}
+
+bool
+output_addr_const_extra (FILE *file, rtx x)
+{
+ rtx op;
+
+ if (GET_CODE (x) != UNSPEC)
+ return false;
+
+ op = XVECEXP (x, 0, 0);
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_GOTTPOFF:
+ output_addr_const (file, op);
+ /* FIXME: This might be @TPOFF in Sun ld. */
+ fputs ("@GOTTPOFF", file);
+ break;
+ case UNSPEC_TPOFF:
+ output_addr_const (file, op);
+ fputs ("@TPOFF", file);
+ break;
+ case UNSPEC_NTPOFF:
+ output_addr_const (file, op);
+ if (TARGET_64BIT)
+ fputs ("@TPOFF", file);
+ else
+ fputs ("@NTPOFF", file);
+ break;
+ case UNSPEC_DTPOFF:
+ output_addr_const (file, op);
+ fputs ("@DTPOFF", file);
+ break;
+ case UNSPEC_GOTNTPOFF:
+ output_addr_const (file, op);
+ if (TARGET_64BIT)
+ fputs ("@GOTTPOFF(%rip)", file);
+ else
+ fputs ("@GOTNTPOFF", file);
+ break;
+ case UNSPEC_INDNTPOFF:
+ output_addr_const (file, op);
+ fputs ("@INDNTPOFF", file);
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/* Split one or more DImode RTL references into pairs of SImode
+ references. The RTL can be REG, offsettable MEM, integer constant, or
+ CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
+ split and "num" is its length. lo_half and hi_half are output arrays
+ that parallel "operands". */
+
+void
+split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
+{
+ while (num--)
+ {
+ rtx op = operands[num];
+
+ /* simplify_subreg refuse to split volatile memory addresses,
+ but we still have to handle it. */
+ if (GET_CODE (op) == MEM)
+ {
+ lo_half[num] = adjust_address (op, SImode, 0);
+ hi_half[num] = adjust_address (op, SImode, 4);
+ }
+ else
+ {
+ lo_half[num] = simplify_gen_subreg (SImode, op,
+ GET_MODE (op) == VOIDmode
+ ? DImode : GET_MODE (op), 0);
+ hi_half[num] = simplify_gen_subreg (SImode, op,
+ GET_MODE (op) == VOIDmode
+ ? DImode : GET_MODE (op), 4);
+ }
+ }
+}
+/* Split one or more TImode RTL references into pairs of DImode
+ references. The RTL can be REG, offsettable MEM, integer constant, or
+ CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
+ split and "num" is its length. lo_half and hi_half are output arrays
+ that parallel "operands". */
+
+void
+split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
+{
+ while (num--)
+ {
+ rtx op = operands[num];
+
+ /* simplify_subreg refuse to split volatile memory addresses, but we
+ still have to handle it. */
+ if (GET_CODE (op) == MEM)
+ {
+ lo_half[num] = adjust_address (op, DImode, 0);
+ hi_half[num] = adjust_address (op, DImode, 8);
+ }
+ else
+ {
+ lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
+ hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
+ }
+ }
+}
+
+/* Output code to perform a 387 binary operation in INSN, one of PLUS,
+ MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
+ is the expression of the binary operation. The output may either be
+ emitted here, or returned to the caller, like all output_* functions.
+
+ There is no guarantee that the operands are the same mode, as they
+ might be within FLOAT or FLOAT_EXTEND expressions. */
+
+#ifndef SYSV386_COMPAT
+/* Set to 1 for compatibility with brain-damaged assemblers. No-one
+ wants to fix the assemblers because that causes incompatibility
+ with gcc. No-one wants to fix gcc because that causes
+ incompatibility with assemblers... You can use the option of
+ -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
+#define SYSV386_COMPAT 1
+#endif
+
+const char *
+output_387_binary_op (rtx insn, rtx *operands)
+{
+ static char buf[30];
+ const char *p;
+ const char *ssep;
+ int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
+
+#ifdef ENABLE_CHECKING
+ /* Even if we do not want to check the inputs, this documents input
+ constraints. Which helps in understanding the following code. */
+ if (STACK_REG_P (operands[0])
+ && ((REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1])
+ && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
+ || (REG_P (operands[2])
+ && REGNO (operands[0]) == REGNO (operands[2])
+ && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
+ && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
+ ; /* ok */
+ else
+ gcc_assert (is_sse);
+#endif
+
+ switch (GET_CODE (operands[3]))
+ {
+ case PLUS:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ p = "fiadd";
+ else
+ p = "fadd";
+ ssep = "add";
+ break;
+
+ case MINUS:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ p = "fisub";
+ else
+ p = "fsub";
+ ssep = "sub";
+ break;
+
+ case MULT:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ p = "fimul";
+ else
+ p = "fmul";
+ ssep = "mul";
+ break;
+
+ case DIV:
+ if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
+ || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
+ p = "fidiv";
+ else
+ p = "fdiv";
+ ssep = "div";
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (is_sse)
+ {
+ strcpy (buf, ssep);
+ if (GET_MODE (operands[0]) == SFmode)
+ strcat (buf, "ss\t{%2, %0|%0, %2}");
+ else
+ strcat (buf, "sd\t{%2, %0|%0, %2}");
+ return buf;
+ }
+ strcpy (buf, p);
+
+ switch (GET_CODE (operands[3]))
+ {
+ case MULT:
+ case PLUS:
+ if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
+ {
+ rtx temp = operands[2];
+ operands[2] = operands[1];
+ operands[1] = temp;
+ }
+
+ /* know operands[0] == operands[1]. */
+
+ if (GET_CODE (operands[2]) == MEM)
+ {
+ p = "%z2\t%2";
+ break;
+ }
+
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
+ {
+ if (STACK_TOP_P (operands[0]))
+ /* How is it that we are storing to a dead operand[2]?
+ Well, presumably operands[1] is dead too. We can't
+ store the result to st(0) as st(0) gets popped on this
+ instruction. Instead store to operands[2] (which I
+ think has to be st(1)). st(1) will be popped later.
+ gcc <= 2.8.1 didn't have this check and generated
+ assembly code that the Unixware assembler rejected. */
+ p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
+ else
+ p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
+ break;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
+ else
+ p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
+ break;
+
+ case MINUS:
+ case DIV:
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ p = "r%z1\t%1";
+ break;
+ }
+
+ if (GET_CODE (operands[2]) == MEM)
+ {
+ p = "%z2\t%2";
+ break;
+ }
+
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
+ {
+#if SYSV386_COMPAT
+ /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
+ derived assemblers, confusingly reverse the direction of
+ the operation for fsub{r} and fdiv{r} when the
+ destination register is not st(0). The Intel assembler
+ doesn't have this brain damage. Read !SYSV386_COMPAT to
+ figure out what the hardware really does. */
+ if (STACK_TOP_P (operands[0]))
+ p = "{p\t%0, %2|rp\t%2, %0}";
+ else
+ p = "{rp\t%2, %0|p\t%0, %2}";
+#else
+ if (STACK_TOP_P (operands[0]))
+ /* As above for fmul/fadd, we can't store to st(0). */
+ p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
+ else
+ p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
+#endif
+ break;
+ }
+
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ {
+#if SYSV386_COMPAT
+ if (STACK_TOP_P (operands[0]))
+ p = "{rp\t%0, %1|p\t%1, %0}";
+ else
+ p = "{p\t%1, %0|rp\t%0, %1}";
+#else
+ if (STACK_TOP_P (operands[0]))
+ p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
+ else
+ p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
+#endif
+ break;
+ }
+
+ if (STACK_TOP_P (operands[0]))
+ {
+ if (STACK_TOP_P (operands[1]))
+ p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
+ else
+ p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
+ break;
+ }
+ else if (STACK_TOP_P (operands[1]))
+ {
+#if SYSV386_COMPAT
+ p = "{\t%1, %0|r\t%0, %1}";
+#else
+ p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
+#endif
+ }
+ else
+ {
+#if SYSV386_COMPAT
+ p = "{r\t%2, %0|\t%0, %2}";
+#else
+ p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
+#endif
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ strcat (buf, p);
+ return buf;
+}
+
+/* Return needed mode for entity in optimize_mode_switching pass. */
+
+int
+ix86_mode_needed (int entity, rtx insn)
+{
+ enum attr_i387_cw mode;
+
+ /* The mode UNINITIALIZED is used to store control word after a
+ function call or ASM pattern. The mode ANY specify that function
+ has no requirements on the control word and make no changes in the
+ bits we are interested in. */
+
+ if (CALL_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && (asm_noperands (PATTERN (insn)) >= 0
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
+ return I387_CW_UNINITIALIZED;
+
+ if (recog_memoized (insn) < 0)
+ return I387_CW_ANY;
+
+ mode = get_attr_i387_cw (insn);
+
+ switch (entity)
+ {
+ case I387_TRUNC:
+ if (mode == I387_CW_TRUNC)
+ return mode;
+ break;
+
+ case I387_FLOOR:
+ if (mode == I387_CW_FLOOR)
+ return mode;
+ break;
+
+ case I387_CEIL:
+ if (mode == I387_CW_CEIL)
+ return mode;
+ break;
+
+ case I387_MASK_PM:
+ if (mode == I387_CW_MASK_PM)
+ return mode;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return I387_CW_ANY;
+}
+
+/* Output code to initialize control word copies used by trunc?f?i and
+ rounding patterns. CURRENT_MODE is set to current control word,
+ while NEW_MODE is set to new control word. */
+
+void
+emit_i387_cw_initialization (int mode)
+{
+ rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ rtx new_mode;
+
+ int slot;
+
+ rtx reg = gen_reg_rtx (HImode);
+
+ emit_insn (gen_x86_fnstcw_1 (stored_mode));
+ emit_move_insn (reg, stored_mode);
+
+ if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
+ {
+ switch (mode)
+ {
+ case I387_CW_TRUNC:
+ /* round toward zero (truncate) */
+ emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
+ slot = SLOT_CW_TRUNC;
+ break;
+
+ case I387_CW_FLOOR:
+ /* round down toward -oo */
+ emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
+ emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
+ slot = SLOT_CW_FLOOR;
+ break;
+
+ case I387_CW_CEIL:
+ /* round up toward +oo */
+ emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
+ emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
+ slot = SLOT_CW_CEIL;
+ break;
+
+ case I387_CW_MASK_PM:
+ /* mask precision exception for nearbyint() */
+ emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
+ slot = SLOT_CW_MASK_PM;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ switch (mode)
+ {
+ case I387_CW_TRUNC:
+ /* round toward zero (truncate) */
+ emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
+ slot = SLOT_CW_TRUNC;
+ break;
+
+ case I387_CW_FLOOR:
+ /* round down toward -oo */
+ emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
+ slot = SLOT_CW_FLOOR;
+ break;
+
+ case I387_CW_CEIL:
+ /* round up toward +oo */
+ emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
+ slot = SLOT_CW_CEIL;
+ break;
+
+ case I387_CW_MASK_PM:
+ /* mask precision exception for nearbyint() */
+ emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
+ slot = SLOT_CW_MASK_PM;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ gcc_assert (slot < MAX_386_STACK_LOCALS);
+
+ new_mode = assign_386_stack_local (HImode, slot);
+ emit_move_insn (new_mode, reg);
+}
+
+/* Output code for INSN to convert a float to a signed int. OPERANDS
+ are the insn operands. The output may be [HSD]Imode and the input
+ operand may be [SDX]Fmode. */
+
+const char *
+output_fix_trunc (rtx insn, rtx *operands, int fisttp)
+{
+ int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+ int dimode_p = GET_MODE (operands[0]) == DImode;
+ int round_mode = get_attr_i387_cw (insn);
+
+ /* Jump through a hoop or two for DImode, since the hardware has no
+ non-popping instruction. We used to do this a different way, but
+ that was somewhat fragile and broke with post-reload splitters. */
+ if ((dimode_p || fisttp) && !stack_top_dies)
+ output_asm_insn ("fld\t%y1", operands);
+
+ gcc_assert (STACK_TOP_P (operands[1]));
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+
+ if (fisttp)
+ output_asm_insn ("fisttp%z0\t%0", operands);
+ else
+ {
+ if (round_mode != I387_CW_ANY)
+ output_asm_insn ("fldcw\t%3", operands);
+ if (stack_top_dies || dimode_p)
+ output_asm_insn ("fistp%z0\t%0", operands);
+ else
+ output_asm_insn ("fist%z0\t%0", operands);
+ if (round_mode != I387_CW_ANY)
+ output_asm_insn ("fldcw\t%2", operands);
+ }
+
+ return "";
+}
+
+/* Output code for x87 ffreep insn. The OPNO argument, which may only
+ have the values zero or one, indicates the ffreep insn's operand
+ from the OPERANDS array. */
+
+static const char *
+output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
+{
+ if (TARGET_USE_FFREEP)
+#if HAVE_AS_IX86_FFREEP
+ return opno ? "ffreep\t%y1" : "ffreep\t%y0";
+#else
+ switch (REGNO (operands[opno]))
+ {
+ case FIRST_STACK_REG + 0: return ".word\t0xc0df";
+ case FIRST_STACK_REG + 1: return ".word\t0xc1df";
+ case FIRST_STACK_REG + 2: return ".word\t0xc2df";
+ case FIRST_STACK_REG + 3: return ".word\t0xc3df";
+ case FIRST_STACK_REG + 4: return ".word\t0xc4df";
+ case FIRST_STACK_REG + 5: return ".word\t0xc5df";
+ case FIRST_STACK_REG + 6: return ".word\t0xc6df";
+ case FIRST_STACK_REG + 7: return ".word\t0xc7df";
+ }
+#endif
+
+ return opno ? "fstp\t%y1" : "fstp\t%y0";
+}
+
+
+/* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
+ should be used. UNORDERED_P is true when fucom should be used. */
+
+const char *
+output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
+{
+ int stack_top_dies;
+ rtx cmp_op0, cmp_op1;
+ int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
+
+ if (eflags_p)
+ {
+ cmp_op0 = operands[0];
+ cmp_op1 = operands[1];
+ }
+ else
+ {
+ cmp_op0 = operands[1];
+ cmp_op1 = operands[2];
+ }
+
+ if (is_sse)
+ {
+ if (GET_MODE (operands[0]) == SFmode)
+ if (unordered_p)
+ return "ucomiss\t{%1, %0|%0, %1}";
+ else
+ return "comiss\t{%1, %0|%0, %1}";
+ else
+ if (unordered_p)
+ return "ucomisd\t{%1, %0|%0, %1}";
+ else
+ return "comisd\t{%1, %0|%0, %1}";
+ }
+
+ gcc_assert (STACK_TOP_P (cmp_op0));
+
+ stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
+
+ if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
+ {
+ if (stack_top_dies)
+ {
+ output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
+ return output_387_ffreep (operands, 1);
+ }
+ else
+ return "ftst\n\tfnstsw\t%0";
+ }
+
+ if (STACK_REG_P (cmp_op1)
+ && stack_top_dies
+ && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
+ && REGNO (cmp_op1) != FIRST_STACK_REG)
+ {
+ /* If both the top of the 387 stack dies, and the other operand
+ is also a stack register that dies, then this must be a
+ `fcompp' float compare */
+
+ if (eflags_p)
+ {
+ /* There is no double popping fcomi variant. Fortunately,
+ eflags is immune from the fstp's cc clobbering. */
+ if (unordered_p)
+ output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
+ else
+ output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
+ return output_387_ffreep (operands, 0);
+ }
+ else
+ {
+ if (unordered_p)
+ return "fucompp\n\tfnstsw\t%0";
+ else
+ return "fcompp\n\tfnstsw\t%0";
+ }
+ }
+ else
+ {
+ /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
+
+ static const char * const alt[16] =
+ {
+ "fcom%z2\t%y2\n\tfnstsw\t%0",
+ "fcomp%z2\t%y2\n\tfnstsw\t%0",
+ "fucom%z2\t%y2\n\tfnstsw\t%0",
+ "fucomp%z2\t%y2\n\tfnstsw\t%0",
+
+ "ficom%z2\t%y2\n\tfnstsw\t%0",
+ "ficomp%z2\t%y2\n\tfnstsw\t%0",
+ NULL,
+ NULL,
+
+ "fcomi\t{%y1, %0|%0, %y1}",
+ "fcomip\t{%y1, %0|%0, %y1}",
+ "fucomi\t{%y1, %0|%0, %y1}",
+ "fucomip\t{%y1, %0|%0, %y1}",
+
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+
+ int mask;
+ const char *ret;
+
+ mask = eflags_p << 3;
+ mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
+ mask |= unordered_p << 1;
+ mask |= stack_top_dies;
+
+ gcc_assert (mask < 16);
+ ret = alt[mask];
+ gcc_assert (ret);
+
+ return ret;
+ }
+}
+
+void
+ix86_output_addr_vec_elt (FILE *file, int value)
+{
+ const char *directive = ASM_LONG;
+
+#ifdef ASM_QUAD
+ if (TARGET_64BIT)
+ directive = ASM_QUAD;
+#else
+ gcc_assert (!TARGET_64BIT);
+#endif
+
+ fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
+}
+
+void
+ix86_output_addr_diff_elt (FILE *file, int value, int rel)
+{
+ if (TARGET_64BIT)
+ fprintf (file, "%s%s%d-%s%d\n",
+ ASM_LONG, LPREFIX, value, LPREFIX, rel);
+ else if (HAVE_AS_GOTOFF_IN_DATA)
+ fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
+#if TARGET_MACHO
+ else if (TARGET_MACHO)
+ {
+ fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
+ machopic_output_function_base_name (file);
+ fprintf(file, "\n");
+ }
+#endif
+ else
+ asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
+ ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
+}
+
+/* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
+ for the target. */
+
+void
+ix86_expand_clear (rtx dest)
+{
+ rtx tmp;
+
+ /* We play register width games, which are only valid after reload. */
+ gcc_assert (reload_completed);
+
+ /* Avoid HImode and its attendant prefix byte. */
+ if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
+ dest = gen_rtx_REG (SImode, REGNO (dest));
+
+ tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
+
+ /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
+ if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
+ {
+ rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
+ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
+ }
+
+ emit_insn (tmp);
+}
+
+/* X is an unchanging MEM. If it is a constant pool reference, return
+ the constant pool rtx, else NULL. */
+
+rtx
+maybe_get_pool_constant (rtx x)
+{
+ x = ix86_delegitimize_address (XEXP (x, 0));
+
+ if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
+ return get_pool_constant (x);
+
+ return NULL_RTX;
+}
+
+void
+ix86_expand_move (enum machine_mode mode, rtx operands[])
+{
+ int strict = (reload_in_progress || reload_completed);
+ /* APPLE LOCAL dynamic-no-pic */
+ rtx insn, op0, op1;
+ enum tls_model model;
+
+ op0 = operands[0];
+ op1 = operands[1];
+
+ if (GET_CODE (op1) == SYMBOL_REF)
+ {
+ model = SYMBOL_REF_TLS_MODEL (op1);
+ if (model)
+ {
+ op1 = legitimize_tls_address (op1, model, true);
+ op1 = force_operand (op1, op0);
+ if (op1 == op0)
+ return;
+ }
+ }
+ else if (GET_CODE (op1) == CONST
+ && GET_CODE (XEXP (op1, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
+ {
+ model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
+ if (model)
+ {
+ rtx addend = XEXP (XEXP (op1, 0), 1);
+ op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
+ op1 = force_operand (op1, NULL);
+ op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
+ op0, 1, OPTAB_DIRECT);
+ if (op1 == op0)
+ return;
+ }
+ }
+
+ /* APPLE LOCAL begin dynamic-no-pic */
+ /* allow macho & macho for x86_64 to coexist */
+ if (((TARGET_MACHO && MACHOPIC_INDIRECT)
+ || flag_pic)
+ && mode == Pmode && symbolic_operand (op1, Pmode))
+ /* APPLE LOCAL end dynamic-no-pic */
+ {
+ if (TARGET_MACHO && !TARGET_64BIT)
+ {
+#if TARGET_MACHO
+ /* APPLE LOCAL begin dynamic-no-pic */
+ if (MACHOPIC_INDIRECT)
+ {
+ rtx temp = ((reload_in_progress
+ || ((op0 && GET_CODE (op0) == REG)
+ && mode == Pmode))
+ ? op0 : gen_reg_rtx (Pmode));
+ op1 = machopic_indirect_data_reference (op1, temp);
+ if (MACHOPIC_PURE)
+ op1 = machopic_legitimize_pic_address (op1, mode,
+ temp == op1 ? 0 : temp);
+ }
+ if (op0 != op1 && GET_CODE (op0) != MEM)
+ {
+ insn = gen_rtx_SET (VOIDmode, op0, op1);
+ emit_insn (insn);
+ return;
+ }
+ if (GET_CODE (op0) == MEM)
+ op1 = force_reg (Pmode, op1);
+ else
+ {
+ rtx temp = op0;
+ if (GET_CODE (temp) != REG)
+ temp = gen_reg_rtx (Pmode);
+ temp = legitimize_pic_address (op1, temp);
+ if (temp == op0)
+ return;
+ op1 = temp;
+ }
+ /* APPLE LOCAL end dynamic-no-pic */
+#endif
+ }
+ else
+ {
+ if (GET_CODE (op0) == MEM)
+ op1 = force_reg (Pmode, op1);
+ else
+ op1 = legitimize_address (op1, op1, Pmode);
+ }
+ }
+ else
+ {
+ if (GET_CODE (op0) == MEM
+ && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
+ || !push_operand (op0, mode))
+ && GET_CODE (op1) == MEM)
+ op1 = force_reg (mode, op1);
+
+ if (push_operand (op0, mode)
+ && ! general_no_elim_operand (op1, mode))
+ op1 = copy_to_mode_reg (mode, op1);
+
+ /* Force large constants in 64bit compilation into register
+ to get them CSEed. */
+ if (TARGET_64BIT && mode == DImode
+ && immediate_operand (op1, mode)
+ && !x86_64_zext_immediate_operand (op1, VOIDmode)
+ && !register_operand (op0, mode)
+ && optimize && !reload_completed && !reload_in_progress)
+ op1 = copy_to_mode_reg (mode, op1);
+
+ if (FLOAT_MODE_P (mode))
+ {
+ /* If we are loading a floating point constant to a register,
+ force the value to memory now, since we'll get better code
+ out the back end. */
+
+ if (strict)
+ ;
+ else if (GET_CODE (op1) == CONST_DOUBLE)
+ {
+ op1 = validize_mem (force_const_mem (mode, op1));
+ if (!register_operand (op0, mode))
+ {
+ rtx temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
+ emit_move_insn (op0, temp);
+ return;
+ }
+ }
+ }
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
+}
+
+void
+ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
+{
+ rtx op0 = operands[0], op1 = operands[1];
+ /* APPLE LOCAL begin radar 4614623 */
+ cfun->uses_vector = 1;
+ /* APPLE LOCAL end radar 4614623 */
+
+ /* Force constants other than zero into memory. We do not know how
+ the instructions used to build constants modify the upper 64 bits
+ of the register, once we have that information we may be able
+ to handle some of them more efficiently. */
+ if ((reload_in_progress | reload_completed) == 0
+ && register_operand (op0, mode)
+ && CONSTANT_P (op1)
+ && standard_sse_constant_p (op1) <= 0)
+ op1 = validize_mem (force_const_mem (mode, op1));
+
+ /* Make operand1 a register if it isn't already. */
+ if (!no_new_pseudos
+ && !register_operand (op0, mode)
+ && !register_operand (op1, mode))
+ {
+ emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
+ return;
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
+}
+
+/* Implement the movmisalign patterns for SSE. Non-SSE modes go
+ straight to ix86_expand_vector_move. */
+
+void
+ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
+{
+ rtx op0, op1, m;
+
+ op0 = operands[0];
+ op1 = operands[1];
+
+ if (MEM_P (op1))
+ {
+ /* If we're optimizing for size, movups is the smallest. */
+ if (optimize_size)
+ {
+ op0 = gen_lowpart (V4SFmode, op0);
+ op1 = gen_lowpart (V4SFmode, op1);
+ emit_insn (gen_sse_movups (op0, op1));
+ return;
+ }
+
+ /* ??? If we have typed data, then it would appear that using
+ movdqu is the only way to get unaligned data loaded with
+ integer type. */
+ if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ {
+ op0 = gen_lowpart (V16QImode, op0);
+ op1 = gen_lowpart (V16QImode, op1);
+ emit_insn (gen_sse2_movdqu (op0, op1));
+ return;
+ }
+
+ if (TARGET_SSE2 && mode == V2DFmode)
+ {
+ rtx zero;
+
+ /* When SSE registers are split into halves, we can avoid
+ writing to the top half twice. */
+ if (TARGET_SSE_SPLIT_REGS)
+ {
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
+ zero = op0;
+ }
+ else
+ {
+ /* ??? Not sure about the best option for the Intel chips.
+ The following would seem to satisfy; the register is
+ entirely cleared, breaking the dependency chain. We
+ then store to the upper half, with a dependency depth
+ of one. A rumor has it that Intel recommends two movsd
+ followed by an unpacklpd, but this is unconfirmed. And
+ given that the dependency depth of the unpacklpd would
+ still be one, I'm not sure why this would be better. */
+ zero = CONST0_RTX (V2DFmode);
+ }
+
+ m = adjust_address (op1, DFmode, 0);
+ emit_insn (gen_sse2_loadlpd (op0, zero, m));
+ m = adjust_address (op1, DFmode, 8);
+ emit_insn (gen_sse2_loadhpd (op0, op0, m));
+ }
+ else
+ {
+ if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
+ emit_move_insn (op0, CONST0_RTX (mode));
+ else
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
+
+ if (mode != V4SFmode)
+ op0 = gen_lowpart (V4SFmode, op0);
+ m = adjust_address (op1, V2SFmode, 0);
+ emit_insn (gen_sse_loadlps (op0, op0, m));
+ m = adjust_address (op1, V2SFmode, 8);
+ emit_insn (gen_sse_loadhps (op0, op0, m));
+ }
+ }
+ else if (MEM_P (op0))
+ {
+ /* If we're optimizing for size, movups is the smallest. */
+ if (optimize_size)
+ {
+ op0 = gen_lowpart (V4SFmode, op0);
+ op1 = gen_lowpart (V4SFmode, op1);
+ emit_insn (gen_sse_movups (op0, op1));
+ return;
+ }
+
+ /* ??? Similar to above, only less clear because of quote
+ typeless stores unquote. */
+ if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
+ && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
+ {
+ op0 = gen_lowpart (V16QImode, op0);
+ op1 = gen_lowpart (V16QImode, op1);
+ emit_insn (gen_sse2_movdqu (op0, op1));
+ return;
+ }
+
+ if (TARGET_SSE2 && mode == V2DFmode)
+ {
+ m = adjust_address (op0, DFmode, 0);
+ emit_insn (gen_sse2_storelpd (m, op1));
+ m = adjust_address (op0, DFmode, 8);
+ emit_insn (gen_sse2_storehpd (m, op1));
+ }
+ else
+ {
+ if (mode != V4SFmode)
+ op1 = gen_lowpart (V4SFmode, op1);
+ m = adjust_address (op0, V2SFmode, 0);
+ emit_insn (gen_sse_storelps (m, op1));
+ m = adjust_address (op0, V2SFmode, 8);
+ emit_insn (gen_sse_storehps (m, op1));
+ }
+ }
+ else
+ gcc_unreachable ();
+}
+
+/* Expand a push in MODE. This is some mode for which we do not support
+ proper push instructions, at least from the registers that we expect
+ the value to live in. */
+
+void
+ix86_expand_push (enum machine_mode mode, rtx x)
+{
+ rtx tmp;
+
+ tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
+ GEN_INT (-GET_MODE_SIZE (mode)),
+ stack_pointer_rtx, 1, OPTAB_DIRECT);
+ if (tmp != stack_pointer_rtx)
+ emit_move_insn (stack_pointer_rtx, tmp);
+
+ tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
+ emit_move_insn (tmp, x);
+}
+
+/* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
+ destination to use for the operation. If different from the true
+ destination in operands[0], a copy operation will be required. */
+
+rtx
+ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
+ rtx operands[])
+{
+ int matching_memory;
+ rtx src1, src2, dst;
+
+ dst = operands[0];
+ src1 = operands[1];
+ src2 = operands[2];
+
+ /* Recognize <var1> = <value> <op> <var1> for commutative operators */
+ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && (rtx_equal_p (dst, src2)
+ || immediate_operand (src1, mode)))
+ {
+ rtx temp = src1;
+ src1 = src2;
+ src2 = temp;
+ }
+
+ /* If the destination is memory, and we do not have matching source
+ operands, do things in registers. */
+ matching_memory = 0;
+ if (GET_CODE (dst) == MEM)
+ {
+ if (rtx_equal_p (dst, src1))
+ matching_memory = 1;
+ else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && rtx_equal_p (dst, src2))
+ matching_memory = 2;
+ else
+ dst = gen_reg_rtx (mode);
+ }
+
+ /* Both source operands cannot be in memory. */
+ if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
+ {
+ if (matching_memory != 2)
+ src2 = force_reg (mode, src2);
+ else
+ src1 = force_reg (mode, src1);
+ }
+
+ /* If the operation is not commutable, source 1 cannot be a constant
+ or non-matching memory. */
+ if ((CONSTANT_P (src1)
+ || (!matching_memory && GET_CODE (src1) == MEM))
+ && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
+ src1 = force_reg (mode, src1);
+
+ src1 = operands[1] = src1;
+ src2 = operands[2] = src2;
+ return dst;
+}
+
+/* Similarly, but assume that the destination has already been
+ set up properly. */
+
+void
+ix86_fixup_binary_operands_no_copy (enum rtx_code code,
+ enum machine_mode mode, rtx operands[])
+{
+ rtx dst = ix86_fixup_binary_operands (code, mode, operands);
+ gcc_assert (dst == operands[0]);
+}
+
+/* Attempt to expand a binary operator. Make the expansion closer to the
+ actual machine, then just general_operand, which will allow 3 separate
+ memory references (one output, two input) in a single insn. */
+
+void
+ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
+ rtx operands[])
+{
+ rtx src1, src2, dst, op, clob;
+
+ dst = ix86_fixup_binary_operands (code, mode, operands);
+ src1 = operands[1];
+ src2 = operands[2];
+
+ /* Emit the instruction. */
+
+ op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
+ if (reload_in_progress)
+ {
+ /* Reload doesn't know about the flags register, and doesn't know that
+ it doesn't want to clobber it. We can only do this with PLUS. */
+ gcc_assert (code == PLUS);
+ emit_insn (op);
+ }
+ else
+ {
+ clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
+ }
+
+ /* Fix up the destination if needed. */
+ if (dst != operands[0])
+ emit_move_insn (operands[0], dst);
+}
+
+/* Return TRUE or FALSE depending on whether the binary operator meets the
+ appropriate constraints. */
+
+int
+ix86_binary_operator_ok (enum rtx_code code,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx operands[3])
+{
+ /* Both source operands cannot be in memory. */
+ if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
+ return 0;
+ /* If the operation is not commutable, source 1 cannot be a constant. */
+ if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
+ return 0;
+ /* If the destination is memory, we must have a matching source operand. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! (rtx_equal_p (operands[0], operands[1])
+ || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
+ && rtx_equal_p (operands[0], operands[2]))))
+ return 0;
+ /* If the operation is not commutable and the source 1 is memory, we must
+ have a matching destination. */
+ if (GET_CODE (operands[1]) == MEM
+ && GET_RTX_CLASS (code) != RTX_COMM_ARITH
+ && ! rtx_equal_p (operands[0], operands[1]))
+ return 0;
+ return 1;
+}
+
+/* Attempt to expand a unary operator. Make the expansion closer to the
+ actual machine, then just general_operand, which will allow 2 separate
+ memory references (one output, one input) in a single insn. */
+
+void
+ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
+ rtx operands[])
+{
+ int matching_memory;
+ rtx src, dst, op, clob;
+
+ dst = operands[0];
+ src = operands[1];
+
+ /* If the destination is memory, and we do not have matching source
+ operands, do things in registers. */
+ matching_memory = 0;
+ if (MEM_P (dst))
+ {
+ if (rtx_equal_p (dst, src))
+ matching_memory = 1;
+ else
+ dst = gen_reg_rtx (mode);
+ }
+
+ /* When source operand is memory, destination must match. */
+ if (MEM_P (src) && !matching_memory)
+ src = force_reg (mode, src);
+
+ /* Emit the instruction. */
+
+ op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
+ if (reload_in_progress || code == NOT)
+ {
+ /* Reload doesn't know about the flags register, and doesn't know that
+ it doesn't want to clobber it. */
+ gcc_assert (code == NOT);
+ emit_insn (op);
+ }
+ else
+ {
+ clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
+ }
+
+ /* Fix up the destination if needed. */
+ if (dst != operands[0])
+ emit_move_insn (operands[0], dst);
+}
+
+/* Return TRUE or FALSE depending on whether the unary operator meets the
+ appropriate constraints. */
+
+int
+ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx operands[2] ATTRIBUTE_UNUSED)
+{
+ /* If one of operands is memory, source and destination must match. */
+ if ((GET_CODE (operands[0]) == MEM
+ || GET_CODE (operands[1]) == MEM)
+ && ! rtx_equal_p (operands[0], operands[1]))
+ return FALSE;
+ return TRUE;
+}
+
+/* APPLE LOCAL begin 4176531 4424891 */
+static void
+ix86_expand_vector_move2 (enum machine_mode mode, rtx op0, rtx op1)
+{
+ rtx operands[2];
+ operands[0] = op0;
+ operands[1] = op1;
+ ix86_expand_vector_move (mode, operands);
+}
+
+static rtvec
+gen_2_4_rtvec (int scalars_per_vector, rtx val, enum machine_mode mode)
+{
+ rtvec rval;
+ switch (scalars_per_vector)
+ {
+ case 2: rval = gen_rtvec (2, val, CONST0_RTX (mode));
+ break;
+ case 4: rval = gen_rtvec (4, val, CONST0_RTX (mode),
+ CONST0_RTX (mode), CONST0_RTX (mode));
+ break;
+ default: abort ();
+ }
+ return rval;
+}
+
+/* Convert a DFmode value in an SSE register into an unsigned SImode.
+ When -fpmath=387, this is done with an x87 st(0)_FP->signed-int-64
+ conversion, and ignoring the upper 32 bits of the result. On
+ x86_64, there is an equivalent SSE %xmm->signed-int-64 conversion.
+ On x86_32, we don't have the instruction, nor the 64-bit
+ destination register it requires. Do the conversion inline in the
+ SSE registers. Requires SSE2. For x86_32, -mfpmath=sse,
+ !optimize_size only. */
+const char *
+ix86_expand_convert_uns_DF2SI_sse (rtx operands[])
+{
+ rtx int_zero_as_fp, int_maxval_as_fp, int_two31_as_fp;
+ REAL_VALUE_TYPE rvt_zero, rvt_int_maxval, rvt_int_two31;
+ rtx int_zero_as_xmm, int_maxval_as_xmm;
+ rtx fp_value = operands[1];
+ rtx target = operands[0];
+ rtx large_xmm;
+ rtx large_xmm_v2di;
+ rtx le_op;
+ rtx zero_or_two31_xmm;
+ rtx final_result_rtx;
+ rtx v_rtx;
+ rtx incoming_value;
+
+ cfun->uses_vector = 1;
+
+ real_from_integer (&rvt_zero, DFmode, 0ULL, 0ULL, 1);
+ int_zero_as_fp = const_double_from_real_value (rvt_zero, DFmode);
+
+ real_from_integer (&rvt_int_maxval, DFmode, 0xffffffffULL, 0ULL, 1);
+ int_maxval_as_fp = const_double_from_real_value (rvt_int_maxval, DFmode);
+
+ real_from_integer (&rvt_int_two31, DFmode, 0x80000000ULL, 0ULL, 1);
+ int_two31_as_fp = const_double_from_real_value (rvt_int_two31, DFmode);
+
+ incoming_value = force_reg (GET_MODE (operands[1]), operands[1]);
+
+ gcc_assert (ix86_preferred_stack_boundary >= 128);
+
+ fp_value = gen_reg_rtx (V2DFmode);
+ ix86_expand_vector_move2 (V2DFmode, fp_value,
+ gen_rtx_SUBREG (V2DFmode, incoming_value, 0));
+ large_xmm = gen_reg_rtx (V2DFmode);
+
+ v_rtx = gen_rtx_CONST_VECTOR (V2DFmode,
+ gen_2_4_rtvec (2, int_two31_as_fp, DFmode));
+ ix86_expand_vector_move2 (DFmode, large_xmm, v_rtx);
+ le_op = gen_rtx_fmt_ee (LE, V2DFmode,
+ gen_rtx_SUBREG (V2DFmode, fp_value, 0), large_xmm);
+ /* large_xmm = (fp_value >= 2**31) ? -1 : 0 ; */
+ emit_insn (gen_sse2_vmmaskcmpv2df3 (large_xmm, large_xmm, fp_value, le_op));
+
+ int_maxval_as_xmm = gen_reg_rtx (V2DFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V2DFmode,
+ gen_2_4_rtvec (2, int_maxval_as_fp, DFmode));
+ ix86_expand_vector_move2 (DFmode, int_maxval_as_xmm, v_rtx);
+
+ emit_insn (gen_sse2_vmsminv2df3 (fp_value, fp_value, int_maxval_as_xmm));
+
+ int_zero_as_xmm = gen_reg_rtx (V2DFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V2DFmode,
+ gen_2_4_rtvec (2, int_zero_as_fp, DFmode));
+
+ ix86_expand_vector_move2 (DFmode, int_zero_as_xmm, v_rtx);
+
+ emit_insn (gen_sse2_vmsmaxv2df3 (fp_value, fp_value, int_zero_as_xmm));
+
+ zero_or_two31_xmm = gen_reg_rtx (V2DFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V2DFmode,
+ gen_2_4_rtvec (2, int_two31_as_fp, DFmode));
+ ix86_expand_vector_move2 (DFmode, zero_or_two31_xmm, v_rtx);
+
+ /* zero_or_two31 = (large_xmm) ? 2**31 : 0; */
+ emit_insn (gen_andv2df3 (zero_or_two31_xmm, zero_or_two31_xmm, large_xmm));
+ /* if (large_xmm) fp_value -= 2**31; */
+ emit_insn (gen_subv2df3 (fp_value, fp_value, zero_or_two31_xmm));
+ /* assert (0 <= fp_value && fp_value < 2**31);
+ int_result = trunc (fp_value); */
+ final_result_rtx = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_cvttpd2dq (final_result_rtx, fp_value));
+
+ large_xmm_v2di = gen_reg_rtx (V2DImode);
+ emit_move_insn (large_xmm_v2di, gen_rtx_SUBREG (V2DImode, large_xmm, 0));
+ emit_insn (gen_ashlv2di3 (large_xmm_v2di, large_xmm_v2di,
+ gen_rtx_CONST_INT (SImode, 31)));
+
+ emit_insn (gen_xorv4si3 (final_result_rtx, final_result_rtx,
+ gen_rtx_SUBREG (V4SImode, large_xmm_v2di, 0)));
+ if (!rtx_equal_p (target, final_result_rtx))
+ emit_insn (gen_sse2_stored (target, final_result_rtx));
+ return "";
+}
+
+/* Convert a SFmode value in an SSE register into an unsigned DImode.
+ When -fpmath=387, this is done with an x87 st(0)_FP->signed-int-64
+ conversion, and subsequently ignoring the upper 32 bits of the
+ result. On x86_64, there is an equivalent SSE %xmm->signed-int-64
+ conversion. On x86_32, we don't have the instruction, nor the
+ 64-bit destination register it requires. Do the conversion inline
+ in the SSE registers. Requires SSE2. For x86_32, -mfpmath=sse,
+ !optimize_size only. */
+const char *
+ix86_expand_convert_uns_SF2SI_sse (rtx operands[])
+{
+ rtx int_zero_as_fp, int_two31_as_fp, int_two32_as_fp;
+ REAL_VALUE_TYPE rvt_zero, rvt_int_two31, rvt_int_two32;
+ rtx int_zero_as_xmm;
+ rtx fp_value = operands[1];
+ rtx target = operands[0];
+ rtx large_xmm;
+ rtx two31_xmm, two32_xmm;
+ rtx above_two31_xmm, above_two32_xmm;
+ rtx zero_or_two31_SI_xmm;
+ rtx le_op;
+ rtx zero_or_two31_SF_xmm;
+ rtx int_result_xmm;
+ rtx v_rtx;
+ rtx incoming_value;
+
+ cfun->uses_vector = 1;
+
+ real_from_integer (&rvt_zero, SFmode, 0ULL, 0ULL, 1);
+ int_zero_as_fp = const_double_from_real_value (rvt_zero, SFmode);
+
+ real_from_integer (&rvt_int_two31, SFmode, 0x80000000ULL, 0ULL, 1);
+ int_two31_as_fp = const_double_from_real_value (rvt_int_two31, SFmode);
+
+ real_from_integer (&rvt_int_two32, SFmode, (HOST_WIDE_INT)0x100000000ULL,
+ 0ULL, 1);
+ int_two32_as_fp = const_double_from_real_value (rvt_int_two32, SFmode);
+
+ incoming_value = force_reg (GET_MODE (operands[1]), operands[1]);
+
+ gcc_assert (ix86_preferred_stack_boundary >= 128);
+
+ fp_value = gen_reg_rtx (V4SFmode);
+ ix86_expand_vector_move2 (V4SFmode, fp_value,
+ gen_rtx_SUBREG (V4SFmode, incoming_value, 0));
+ large_xmm = gen_reg_rtx (V4SFmode);
+
+ /* fp_value = MAX (fp_value, 0.0); */
+ /* Preclude negative values; truncate at zero. */
+ int_zero_as_xmm = gen_reg_rtx (V4SFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V4SFmode,
+ gen_2_4_rtvec (4, int_zero_as_fp, SFmode));
+ ix86_expand_vector_move2 (SFmode, int_zero_as_xmm, v_rtx);
+ emit_insn (gen_sse_vmsmaxv4sf3 (fp_value, fp_value, int_zero_as_xmm));
+
+ /* two31_xmm = 0x8000000; */
+ two31_xmm = gen_reg_rtx (V4SFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V4SFmode,
+ gen_2_4_rtvec (4, int_two31_as_fp, SFmode));
+ ix86_expand_vector_move2 (SFmode, two31_xmm, v_rtx);
+
+ /* zero_or_two31_xmm = 0x8000000; */
+ zero_or_two31_SF_xmm = gen_reg_rtx (V4SFmode);
+ ix86_expand_vector_move2 (SFmode, zero_or_two31_SF_xmm, two31_xmm);
+
+ /* above_two31_xmm = (fp_value >= 2**31) ? 0xffff_ffff : 0 ; */
+ above_two31_xmm = gen_reg_rtx (V4SFmode);
+ ix86_expand_vector_move2 (SFmode, above_two31_xmm, two31_xmm);
+ le_op = gen_rtx_fmt_ee (LE, V4SFmode, above_two31_xmm,
+ gen_rtx_SUBREG (V4SFmode, two31_xmm, 0));
+ emit_insn (gen_sse_vmmaskcmpv4sf3 (above_two31_xmm, above_two31_xmm,
+ fp_value, le_op));
+
+ /* two32_xmm = 0x1_0000_0000; */
+ two32_xmm = gen_reg_rtx (V4SFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V4SFmode,
+ gen_2_4_rtvec (4, int_two32_as_fp, SFmode));
+ ix86_expand_vector_move2 (SFmode, two32_xmm, v_rtx);
+
+ /* above_two32_xmm = (fp_value >= 2**32) ? 0xffff_ffff : 0 ; */
+ above_two32_xmm = gen_reg_rtx (V4SFmode);
+ ix86_expand_vector_move2 (SFmode, above_two32_xmm, two32_xmm);
+ le_op = gen_rtx_fmt_ee (LE, V4SFmode, above_two32_xmm,
+ gen_rtx_SUBREG (V4SFmode, two32_xmm, 0));
+ emit_insn (gen_sse_vmmaskcmpv4sf3 (above_two32_xmm, above_two32_xmm,
+ fp_value, le_op));
+
+ /* zero_or_two31_SF_xmm = (above_two31_xmm) ? 2**31 : 0; */
+ emit_insn (gen_andv4sf3 (zero_or_two31_SF_xmm, zero_or_two31_SF_xmm,
+ above_two31_xmm));
+
+ /* zero_or_two31_SI_xmm = (above_two31_xmm & 0x8000_0000); */
+ zero_or_two31_SI_xmm = gen_reg_rtx (V4SImode);
+ emit_move_insn (zero_or_two31_SI_xmm,
+ gen_rtx_SUBREG (V4SImode, above_two31_xmm, 0));
+ emit_insn (gen_ashlv4si3 (zero_or_two31_SI_xmm, zero_or_two31_SI_xmm,
+ gen_rtx_CONST_INT (SImode, 31)));
+
+ /* zero_or_two31_SI_xmm = (above_two_31_xmm << 31); */
+ zero_or_two31_SI_xmm = gen_reg_rtx (V4SImode);
+ emit_move_insn (zero_or_two31_SI_xmm,
+ gen_rtx_SUBREG (V4SImode, above_two31_xmm, 0));
+ emit_insn (gen_ashlv4si3 (zero_or_two31_SI_xmm, zero_or_two31_SI_xmm,
+ gen_rtx_CONST_INT (SImode, 31)));
+
+ /* if (above_two31_xmm) fp_value -= 2**31; */
+ /* If the input FP value is greater than 2**31, subtract that amount
+ from the FP value before conversion. We'll re-add that amount as
+ an integer after the conversion. */
+ emit_insn (gen_subv4sf3 (fp_value, fp_value, zero_or_two31_SF_xmm));
+
+ /* assert (0.0 <= fp_value && fp_value < 2**31);
+ int_result_xmm = trunc (fp_value); */
+ /* Apply the SSE double -> signed_int32 conversion to our biased,
+ clamped SF value. */
+ int_result_xmm = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_cvttps2dq (int_result_xmm, fp_value));
+
+ /* int_result_xmm += zero_or_two_31_SI_xmm; */
+ /* Restore the 2**31 bias we may have subtracted earlier. If the
+ input FP value was between 2**31 and 2**32, this will unbias the
+ result.
+
+ input_fp_value < 2**31: this won't change the value
+ 2**31 <= input_fp_value < 2**32:
+ this will restore the 2**31 bias we subtracted earler
+ input_fp_value >= 2**32: this insn doesn't matter;
+ the next insn will clobber this result
+ */
+ emit_insn (gen_addv4si3 (int_result_xmm, int_result_xmm,
+ zero_or_two31_SI_xmm));
+
+ /* int_result_xmm |= above_two32_xmm; */
+ /* If the input value was greater than 2**32, force the integral
+ result to 0xffff_ffff. */
+ emit_insn (gen_iorv4si3 (int_result_xmm, int_result_xmm,
+ gen_rtx_SUBREG (V4SImode, above_two32_xmm, 0)));
+
+ if (!rtx_equal_p (target, int_result_xmm))
+ emit_insn (gen_sse2_stored (target, int_result_xmm));
+ return "";
+}
+
+/* Convert an unsigned DImode value into a DFmode, using only SSE.
+ Expects the 64-bit DImode to be supplied as two 32-bit parts in two
+ SSE %xmm registers; result returned in an %xmm register. Requires
+ SSE2; will use SSE3 if available. For x86_32, -mfpmath=sse,
+ !optimize_size only. */
+const char *
+ix86_expand_convert_uns_DI2DF_sse (rtx operands[])
+{
+ REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
+ rtx bias_lo_rtx, bias_hi_rtx;
+ rtx target = operands[0];
+ rtx fp_value = operands[1];
+ rtx fp_value_hi, fp_value_lo;
+ rtx fp_value_hi_xmm, fp_value_lo_xmm;
+ rtx int_xmm;
+ rtx final_result_xmm, result_lo_xmm;
+ rtx biases, exponents;
+ rtvec biases_rtvec, exponents_rtvec;
+
+ cfun->uses_vector = 1;
+
+ gcc_assert (ix86_preferred_stack_boundary >= 128);
+
+ int_xmm = gen_reg_rtx (V4SImode);
+
+ fp_value = force_reg (GET_MODE (operands[1]), operands[1]);
+
+ fp_value_lo = gen_rtx_SUBREG (SImode, fp_value, 0);
+ fp_value_lo_xmm = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_loadld (fp_value_lo_xmm, CONST0_RTX (V4SImode),
+ fp_value_lo));
+
+ fp_value_hi = gen_rtx_SUBREG (SImode, fp_value, 4);
+ fp_value_hi_xmm = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_loadld (fp_value_hi_xmm, CONST0_RTX (V4SImode),
+ fp_value_hi));
+
+ ix86_expand_vector_move2 (V4SImode, int_xmm, fp_value_hi_xmm);
+ emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, fp_value_lo_xmm));
+
+ exponents_rtvec = gen_rtvec (4, GEN_INT (0x45300000UL),
+ GEN_INT (0x43300000UL),
+ CONST0_RTX (SImode), CONST0_RTX (SImode));
+ exponents = validize_mem (
+ force_const_mem (V4SImode, gen_rtx_CONST_VECTOR (V4SImode,
+ exponents_rtvec)));
+ emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
+
+ final_result_xmm = gen_reg_rtx (V2DFmode);
+ ix86_expand_vector_move2 (V2DFmode, final_result_xmm,
+ gen_rtx_SUBREG (V2DFmode, int_xmm, 0));
+
+ /* Integral versions of the DFmode 'exponents' above. */
+ REAL_VALUE_FROM_INT (bias_lo_rvt, 0x00000000000000ULL, 0x100000ULL, DFmode);
+ REAL_VALUE_FROM_INT (bias_hi_rvt, 0x10000000000000ULL, 0x000000ULL, DFmode);
+ bias_lo_rtx = CONST_DOUBLE_FROM_REAL_VALUE (bias_lo_rvt, DFmode);
+ bias_hi_rtx = CONST_DOUBLE_FROM_REAL_VALUE (bias_hi_rvt, DFmode);
+ biases_rtvec = gen_rtvec (2, bias_lo_rtx, bias_hi_rtx);
+ biases = validize_mem (force_const_mem (V2DFmode,
+ gen_rtx_CONST_VECTOR (V2DFmode,
+ biases_rtvec)));
+ emit_insn (gen_subv2df3 (final_result_xmm, final_result_xmm, biases));
+
+ if (TARGET_SSE3)
+ {
+ emit_insn (gen_sse3_haddv2df3 (final_result_xmm, final_result_xmm,
+ final_result_xmm));
+ }
+ else
+ {
+ result_lo_xmm = gen_reg_rtx (V2DFmode);
+ ix86_expand_vector_move2 (V2DFmode, result_lo_xmm, final_result_xmm);
+ emit_insn (gen_sse2_unpckhpd (final_result_xmm, final_result_xmm,
+ final_result_xmm));
+ emit_insn (gen_addv2df3 (final_result_xmm, final_result_xmm,
+ result_lo_xmm));
+ }
+
+ if (!rtx_equal_p (target, final_result_xmm))
+ emit_move_insn (target, gen_rtx_SUBREG (DFmode, final_result_xmm, 0));
+
+ return "";
+}
+/* APPLE LOCAL end 4176531 4424891 */
+
+/* APPLE LOCAL begin 4424891 */
+/* Convert an unsigned SImode value into a DFmode, using only SSE.
+ Result returned in an %xmm register. For x86_32, -mfpmath=sse,
+ !optimize_size only. */
+const char *
+ix86_expand_convert_uns_SI2DF_sse (rtx operands[])
+{
+ REAL_VALUE_TYPE rvt_int_two31;
+ rtx int_value_reg;
+ rtx fp_value_xmm, fp_value_as_int_xmm;
+ rtx final_result_xmm;
+ rtx int_two31_as_fp, int_two31_as_fp_vec;
+ rtx v_rtx;
+ rtx target = operands[0];
+
+ gcc_assert (ix86_preferred_stack_boundary >= 128);
+ gcc_assert (GET_MODE (operands[1]) == SImode);
+
+ cfun->uses_vector = 1;
+
+ int_value_reg = gen_reg_rtx (SImode);
+ emit_move_insn (int_value_reg, operands[1]);
+ emit_insn (gen_addsi3 (int_value_reg, int_value_reg,
+ GEN_INT (-2147483648LL /* MIN_INT */)));
+
+ fp_value_as_int_xmm = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_loadld (fp_value_as_int_xmm, CONST0_RTX (V4SImode),
+ int_value_reg));
+
+ fp_value_xmm = gen_reg_rtx (V2DFmode);
+ emit_insn (gen_sse2_cvtdq2pd (fp_value_xmm,
+ gen_rtx_SUBREG (V4SImode,
+ fp_value_as_int_xmm, 0)));
+
+ real_from_integer (&rvt_int_two31, DFmode, 0x80000000ULL, 0ULL, 1);
+ int_two31_as_fp = const_double_from_real_value (rvt_int_two31, DFmode);
+ v_rtx = gen_rtx_CONST_VECTOR (V2DFmode,
+ gen_2_4_rtvec (2, int_two31_as_fp, DFmode));
+
+ int_two31_as_fp_vec = validize_mem (force_const_mem (V2DFmode, v_rtx));
+
+ final_result_xmm = gen_reg_rtx (V2DFmode);
+ emit_move_insn (final_result_xmm, fp_value_xmm);
+ emit_insn (gen_sse2_vmaddv2df3 (final_result_xmm, final_result_xmm,
+ int_two31_as_fp_vec));
+
+ if (!rtx_equal_p (target, final_result_xmm))
+ emit_move_insn (target, gen_rtx_SUBREG (DFmode, final_result_xmm, 0));
+
+ return "";
+}
+
+/* Convert a signed DImode value into a DFmode, using only SSE.
+ Result returned in an %xmm register. For x86_32, -mfpmath=sse,
+ !optimize_size only. */
+const char *
+ix86_expand_convert_sign_DI2DF_sse (rtx operands[])
+{
+ rtx my_operands[2];
+ REAL_VALUE_TYPE rvt_int_two32;
+ rtx rvt_int_two32_vec;
+ rtx fp_value_hi_xmm, fp_value_hi_shifted_xmm;
+ rtx final_result_xmm;
+ rtx int_two32_as_fp, int_two32_as_fp_vec;
+ rtx target = operands[0];
+ rtx input = force_reg (DImode, operands[1]);
+
+ gcc_assert (ix86_preferred_stack_boundary >= 128);
+ gcc_assert (GET_MODE (input) == DImode);
+
+ cfun->uses_vector = 1;
+
+ fp_value_hi_xmm = gen_reg_rtx (V2DFmode);
+ emit_insn (gen_sse2_cvtsi2sd (fp_value_hi_xmm, fp_value_hi_xmm,
+ gen_rtx_SUBREG (SImode, input, 4)));
+
+ real_from_integer (&rvt_int_two32, DFmode, 0x100000000ULL, 0ULL, 1);
+ int_two32_as_fp = const_double_from_real_value (rvt_int_two32, DFmode);
+ rvt_int_two32_vec = gen_rtx_CONST_VECTOR (V2DFmode,
+ gen_2_4_rtvec (2, int_two32_as_fp, DFmode));
+
+ int_two32_as_fp_vec = validize_mem (force_const_mem (V2DFmode,
+ rvt_int_two32_vec));
+
+ fp_value_hi_shifted_xmm = gen_reg_rtx (V2DFmode);
+ emit_move_insn (fp_value_hi_shifted_xmm, fp_value_hi_xmm);
+ emit_insn (gen_sse2_vmmulv2df3 (fp_value_hi_shifted_xmm,
+ fp_value_hi_shifted_xmm,
+ int_two32_as_fp_vec));
+
+ my_operands[0] = gen_reg_rtx (DFmode);
+ my_operands[1] = gen_rtx_SUBREG (SImode, input, 0);
+ (void) ix86_expand_convert_uns_SI2DF_sse (my_operands);
+
+ final_result_xmm = REG_P (target) && GET_MODE (target) == V2DFmode
+ ? target : gen_reg_rtx (V2DFmode);
+ emit_move_insn (final_result_xmm, gen_rtx_SUBREG (V2DFmode,
+ my_operands[0], 0));
+ emit_insn (gen_sse2_vmaddv2df3 (final_result_xmm, final_result_xmm,
+ fp_value_hi_shifted_xmm));
+
+ if (!rtx_equal_p (target, final_result_xmm))
+ emit_move_insn (target, gen_rtx_SUBREG (DFmode, final_result_xmm, 0));
+
+ return "";
+}
+/* APPLE LOCAL end 4424891 */
+
+/* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
+ Create a mask for the sign bit in MODE for an SSE register. If VECT is
+ true, then replicate the mask for all elements of the vector register.
+ If INVERT is true, then create a mask excluding the sign bit. */
+
+rtx
+ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
+{
+ enum machine_mode vec_mode;
+ HOST_WIDE_INT hi, lo;
+ int shift = 63;
+ rtvec v;
+ rtx mask;
+
+ /* Find the sign bit, sign extended to 2*HWI. */
+ if (mode == SFmode)
+ lo = 0x80000000, hi = lo < 0;
+ else if (HOST_BITS_PER_WIDE_INT >= 64)
+ lo = (HOST_WIDE_INT)1 << shift, hi = -1;
+ else
+ lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
+
+ if (invert)
+ lo = ~lo, hi = ~hi;
+
+ /* Force this value into the low part of a fp vector constant. */
+ mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
+ mask = gen_lowpart (mode, mask);
+
+ if (mode == SFmode)
+ {
+ if (vect)
+ v = gen_rtvec (4, mask, mask, mask, mask);
+ else
+ v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
+ CONST0_RTX (SFmode), CONST0_RTX (SFmode));
+ vec_mode = V4SFmode;
+ }
+ else
+ {
+ if (vect)
+ v = gen_rtvec (2, mask, mask);
+ else
+ v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
+ vec_mode = V2DFmode;
+ }
+
+ return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
+}
+
+/* Generate code for floating point ABS or NEG. */
+
+void
+ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
+ rtx operands[])
+{
+ rtx mask, set, use, clob, dst, src;
+ bool matching_memory;
+ bool use_sse = false;
+ bool vector_mode = VECTOR_MODE_P (mode);
+ enum machine_mode elt_mode = mode;
+
+ if (vector_mode)
+ {
+ elt_mode = GET_MODE_INNER (mode);
+ use_sse = true;
+ }
+ else if (TARGET_SSE_MATH)
+ use_sse = SSE_FLOAT_MODE_P (mode);
+
+ /* NEG and ABS performed with SSE use bitwise mask operations.
+ Create the appropriate mask now. */
+ if (use_sse)
+ mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
+ else
+ mask = NULL_RTX;
+
+ dst = operands[0];
+ src = operands[1];
+
+ /* If the destination is memory, and we don't have matching source
+ operands or we're using the x87, do things in registers. */
+ matching_memory = false;
+ if (MEM_P (dst))
+ {
+ if (use_sse && rtx_equal_p (dst, src))
+ matching_memory = true;
+ else
+ dst = gen_reg_rtx (mode);
+ }
+ if (MEM_P (src) && !matching_memory)
+ src = force_reg (mode, src);
+
+ if (vector_mode)
+ {
+ set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
+ set = gen_rtx_SET (VOIDmode, dst, set);
+ emit_insn (set);
+ }
+ else
+ {
+ set = gen_rtx_fmt_e (code, mode, src);
+ set = gen_rtx_SET (VOIDmode, dst, set);
+ if (mask)
+ {
+ use = gen_rtx_USE (VOIDmode, mask);
+ clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (3, set, use, clob)));
+ }
+ else
+ emit_insn (set);
+ }
+
+ if (dst != operands[0])
+ emit_move_insn (operands[0], dst);
+}
+
+/* Expand a copysign operation. Special case operand 0 being a constant. */
+
+void
+ix86_expand_copysign (rtx operands[])
+{
+ enum machine_mode mode, vmode;
+ rtx dest, op0, op1, mask, nmask;
+
+ dest = operands[0];
+ op0 = operands[1];
+ op1 = operands[2];
+
+ mode = GET_MODE (dest);
+ vmode = mode == SFmode ? V4SFmode : V2DFmode;
+
+ if (GET_CODE (op0) == CONST_DOUBLE)
+ {
+ rtvec v;
+
+ if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
+ op0 = simplify_unary_operation (ABS, mode, op0, mode);
+
+ if (op0 == CONST0_RTX (mode))
+ op0 = CONST0_RTX (vmode);
+ else
+ {
+ if (mode == SFmode)
+ v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
+ CONST0_RTX (SFmode), CONST0_RTX (SFmode));
+ else
+ v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
+ op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
+ }
+
+ mask = ix86_build_signbit_mask (mode, 0, 0);
+
+ if (mode == SFmode)
+ emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
+ else
+ emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
+ }
+ else
+ {
+ nmask = ix86_build_signbit_mask (mode, 0, 1);
+ mask = ix86_build_signbit_mask (mode, 0, 0);
+
+ if (mode == SFmode)
+ emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
+ else
+ emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
+ }
+}
+
+/* Deconstruct a copysign operation into bit masks. Operand 0 is known to
+ be a constant, and so has already been expanded into a vector constant. */
+
+void
+ix86_split_copysign_const (rtx operands[])
+{
+ enum machine_mode mode, vmode;
+ rtx dest, op0, op1, mask, x;
+
+ dest = operands[0];
+ op0 = operands[1];
+ op1 = operands[2];
+ mask = operands[3];
+
+ mode = GET_MODE (dest);
+ vmode = GET_MODE (mask);
+
+ dest = simplify_gen_subreg (vmode, dest, mode, 0);
+ x = gen_rtx_AND (vmode, dest, mask);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+
+ if (op0 != CONST0_RTX (vmode))
+ {
+ x = gen_rtx_IOR (vmode, dest, op0);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+ }
+}
+
+/* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
+ so we have to do two masks. */
+
+void
+ix86_split_copysign_var (rtx operands[])
+{
+ enum machine_mode mode, vmode;
+ rtx dest, scratch, op0, op1, mask, nmask, x;
+
+ dest = operands[0];
+ scratch = operands[1];
+ op0 = operands[2];
+ op1 = operands[3];
+ nmask = operands[4];
+ mask = operands[5];
+
+ mode = GET_MODE (dest);
+ vmode = GET_MODE (mask);
+
+ if (rtx_equal_p (op0, op1))
+ {
+ /* Shouldn't happen often (it's useless, obviously), but when it does
+ we'd generate incorrect code if we continue below. */
+ emit_move_insn (dest, op0);
+ return;
+ }
+
+ if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
+ {
+ gcc_assert (REGNO (op1) == REGNO (scratch));
+
+ x = gen_rtx_AND (vmode, scratch, mask);
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ dest = mask;
+ op0 = simplify_gen_subreg (vmode, op0, mode, 0);
+ x = gen_rtx_NOT (vmode, dest);
+ x = gen_rtx_AND (vmode, x, op0);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+ }
+ else
+ {
+ if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
+ {
+ x = gen_rtx_AND (vmode, scratch, mask);
+ }
+ else /* alternative 2,4 */
+ {
+ gcc_assert (REGNO (mask) == REGNO (scratch));
+ op1 = simplify_gen_subreg (vmode, op1, mode, 0);
+ x = gen_rtx_AND (vmode, scratch, op1);
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
+ {
+ dest = simplify_gen_subreg (vmode, op0, mode, 0);
+ x = gen_rtx_AND (vmode, dest, nmask);
+ }
+ else /* alternative 3,4 */
+ {
+ gcc_assert (REGNO (nmask) == REGNO (dest));
+ dest = nmask;
+ op0 = simplify_gen_subreg (vmode, op0, mode, 0);
+ x = gen_rtx_AND (vmode, dest, op0);
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+ }
+
+ x = gen_rtx_IOR (vmode, dest, scratch);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+}
+
+/* Return TRUE or FALSE depending on whether the first SET in INSN
+ has source and destination with matching CC modes, and that the
+ CC mode is at least as constrained as REQ_MODE. */
+
+int
+ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
+{
+ rtx set;
+ enum machine_mode set_mode;
+
+ set = PATTERN (insn);
+ if (GET_CODE (set) == PARALLEL)
+ set = XVECEXP (set, 0, 0);
+ gcc_assert (GET_CODE (set) == SET);
+ gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
+
+ set_mode = GET_MODE (SET_DEST (set));
+ switch (set_mode)
+ {
+ case CCNOmode:
+ if (req_mode != CCNOmode
+ && (req_mode != CCmode
+ || XEXP (SET_SRC (set), 1) != const0_rtx))
+ return 0;
+ break;
+ case CCmode:
+ if (req_mode == CCGCmode)
+ return 0;
+ /* FALLTHRU */
+ case CCGCmode:
+ if (req_mode == CCGOCmode || req_mode == CCNOmode)
+ return 0;
+ /* FALLTHRU */
+ case CCGOCmode:
+ if (req_mode == CCZmode)
+ return 0;
+ /* FALLTHRU */
+ case CCZmode:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return (GET_MODE (SET_SRC (set)) == set_mode);
+}
+
+/* Generate insn patterns to do an integer compare of OPERANDS. */
+
+static rtx
+ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
+{
+ enum machine_mode cmpmode;
+ rtx tmp, flags;
+
+ cmpmode = SELECT_CC_MODE (code, op0, op1);
+ flags = gen_rtx_REG (cmpmode, FLAGS_REG);
+
+ /* This is very simple, but making the interface the same as in the
+ FP case makes the rest of the code easier. */
+ tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
+
+ /* Return the test that should be put into the flags user, i.e.
+ the bcc, scc, or cmov instruction. */
+ return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
+}
+
+/* Figure out whether to use ordered or unordered fp comparisons.
+ Return the appropriate mode to use. */
+
+enum machine_mode
+ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
+{
+ /* ??? In order to make all comparisons reversible, we do all comparisons
+ non-trapping when compiling for IEEE. Once gcc is able to distinguish
+ all forms trapping and nontrapping comparisons, we can make inequality
+ comparisons trapping again, since it results in better code when using
+ FCOM based compares. */
+ return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
+}
+
+enum machine_mode
+ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
+{
+ if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
+ return ix86_fp_compare_mode (code);
+ switch (code)
+ {
+ /* Only zero flag is needed. */
+ case EQ: /* ZF=0 */
+ case NE: /* ZF!=0 */
+ return CCZmode;
+ /* Codes needing carry flag. */
+ case GEU: /* CF=0 */
+ case GTU: /* CF=0 & ZF=0 */
+ case LTU: /* CF=1 */
+ case LEU: /* CF=1 | ZF=1 */
+ return CCmode;
+ /* Codes possibly doable only with sign flag when
+ comparing against zero. */
+ case GE: /* SF=OF or SF=0 */
+ case LT: /* SF<>OF or SF=1 */
+ if (op1 == const0_rtx)
+ return CCGOCmode;
+ else
+ /* For other cases Carry flag is not required. */
+ return CCGCmode;
+ /* Codes doable only with sign flag when comparing
+ against zero, but we miss jump instruction for it
+ so we need to use relational tests against overflow
+ that thus needs to be zero. */
+ case GT: /* ZF=0 & SF=OF */
+ case LE: /* ZF=1 | SF<>OF */
+ if (op1 == const0_rtx)
+ return CCNOmode;
+ else
+ return CCGCmode;
+ /* strcmp pattern do (use flags) and combine may ask us for proper
+ mode. */
+ case USE:
+ return CCmode;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the fixed registers used for condition codes. */
+
+static bool
+ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
+{
+ *p1 = FLAGS_REG;
+ *p2 = FPSR_REG;
+ return true;
+}
+
+/* If two condition code modes are compatible, return a condition code
+ mode which is compatible with both. Otherwise, return
+ VOIDmode. */
+
+static enum machine_mode
+ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
+{
+ if (m1 == m2)
+ return m1;
+
+ if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
+ return VOIDmode;
+
+ if ((m1 == CCGCmode && m2 == CCGOCmode)
+ || (m1 == CCGOCmode && m2 == CCGCmode))
+ return CCGCmode;
+
+ switch (m1)
+ {
+ default:
+ gcc_unreachable ();
+
+ case CCmode:
+ case CCGCmode:
+ case CCGOCmode:
+ case CCNOmode:
+ case CCZmode:
+ switch (m2)
+ {
+ default:
+ return VOIDmode;
+
+ case CCmode:
+ case CCGCmode:
+ case CCGOCmode:
+ case CCNOmode:
+ case CCZmode:
+ return CCmode;
+ }
+
+ case CCFPmode:
+ case CCFPUmode:
+ /* These are only compatible with themselves, which we already
+ checked above. */
+ return VOIDmode;
+ }
+}
+
+/* Return true if we should use an FCOMI instruction for this fp comparison. */
+
+int
+ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
+{
+ enum rtx_code swapped_code = swap_condition (code);
+ return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
+ || (ix86_fp_comparison_cost (swapped_code)
+ == ix86_fp_comparison_fcomi_cost (swapped_code)));
+}
+
+/* Swap, force into registers, or otherwise massage the two operands
+ to a fp comparison. The operands are updated in place; the new
+ comparison code is returned. */
+
+static enum rtx_code
+ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
+{
+ enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
+ rtx op0 = *pop0, op1 = *pop1;
+ enum machine_mode op_mode = GET_MODE (op0);
+ int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
+
+ /* All of the unordered compare instructions only work on registers.
+ The same is true of the fcomi compare instructions. The XFmode
+ compare instructions require registers except when comparing
+ against zero or when converting operand 1 from fixed point to
+ floating point. */
+
+ if (!is_sse
+ && (fpcmp_mode == CCFPUmode
+ || (op_mode == XFmode
+ && ! (standard_80387_constant_p (op0) == 1
+ || standard_80387_constant_p (op1) == 1)
+ && GET_CODE (op1) != FLOAT)
+ || ix86_use_fcomi_compare (code)))
+ {
+ op0 = force_reg (op_mode, op0);
+ op1 = force_reg (op_mode, op1);
+ }
+ else
+ {
+ /* %%% We only allow op1 in memory; op0 must be st(0). So swap
+ things around if they appear profitable, otherwise force op0
+ into a register. */
+
+ if (standard_80387_constant_p (op0) == 0
+ || (GET_CODE (op0) == MEM
+ && ! (standard_80387_constant_p (op1) == 0
+ || GET_CODE (op1) == MEM)))
+ {
+ rtx tmp;
+ tmp = op0, op0 = op1, op1 = tmp;
+ code = swap_condition (code);
+ }
+
+ if (GET_CODE (op0) != REG)
+ op0 = force_reg (op_mode, op0);
+
+ if (CONSTANT_P (op1))
+ {
+ int tmp = standard_80387_constant_p (op1);
+ if (tmp == 0)
+ op1 = validize_mem (force_const_mem (op_mode, op1));
+ else if (tmp == 1)
+ {
+ if (TARGET_CMOVE)
+ op1 = force_reg (op_mode, op1);
+ }
+ else
+ op1 = force_reg (op_mode, op1);
+ }
+ }
+
+ /* Try to rearrange the comparison to make it cheaper. */
+ if (ix86_fp_comparison_cost (code)
+ > ix86_fp_comparison_cost (swap_condition (code))
+ && (GET_CODE (op1) == REG || !no_new_pseudos))
+ {
+ rtx tmp;
+ tmp = op0, op0 = op1, op1 = tmp;
+ code = swap_condition (code);
+ if (GET_CODE (op0) != REG)
+ op0 = force_reg (op_mode, op0);
+ }
+
+ *pop0 = op0;
+ *pop1 = op1;
+ return code;
+}
+
+/* Convert comparison codes we use to represent FP comparison to integer
+ code that will result in proper branch. Return UNKNOWN if no such code
+ is available. */
+
+enum rtx_code
+ix86_fp_compare_code_to_integer (enum rtx_code code)
+{
+ switch (code)
+ {
+ case GT:
+ return GTU;
+ case GE:
+ return GEU;
+ case ORDERED:
+ case UNORDERED:
+ return code;
+ break;
+ case UNEQ:
+ return EQ;
+ break;
+ case UNLT:
+ return LTU;
+ break;
+ case UNLE:
+ return LEU;
+ break;
+ case LTGT:
+ return NE;
+ break;
+ default:
+ return UNKNOWN;
+ }
+}
+
+/* Split comparison code CODE into comparisons we can do using branch
+ instructions. BYPASS_CODE is comparison code for branch that will
+ branch around FIRST_CODE and SECOND_CODE. If some of branches
+ is not required, set value to UNKNOWN.
+ We never require more than two branches. */
+
+void
+ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
+ enum rtx_code *first_code,
+ enum rtx_code *second_code)
+{
+ *first_code = code;
+ *bypass_code = UNKNOWN;
+ *second_code = UNKNOWN;
+
+ /* The fcomi comparison sets flags as follows:
+
+ cmp ZF PF CF
+ > 0 0 0
+ < 0 0 1
+ = 1 0 0
+ un 1 1 1 */
+
+ switch (code)
+ {
+ case GT: /* GTU - CF=0 & ZF=0 */
+ case GE: /* GEU - CF=0 */
+ case ORDERED: /* PF=0 */
+ case UNORDERED: /* PF=1 */
+ case UNEQ: /* EQ - ZF=1 */
+ case UNLT: /* LTU - CF=1 */
+ case UNLE: /* LEU - CF=1 | ZF=1 */
+ case LTGT: /* EQ - ZF=0 */
+ break;
+ case LT: /* LTU - CF=1 - fails on unordered */
+ *first_code = UNLT;
+ *bypass_code = UNORDERED;
+ break;
+ case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
+ *first_code = UNLE;
+ *bypass_code = UNORDERED;
+ break;
+ case EQ: /* EQ - ZF=1 - fails on unordered */
+ *first_code = UNEQ;
+ *bypass_code = UNORDERED;
+ break;
+ case NE: /* NE - ZF=0 - fails on unordered */
+ *first_code = LTGT;
+ *second_code = UNORDERED;
+ break;
+ case UNGE: /* GEU - CF=0 - fails on unordered */
+ *first_code = GE;
+ *second_code = UNORDERED;
+ break;
+ case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
+ *first_code = GT;
+ *second_code = UNORDERED;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (!TARGET_IEEE_FP)
+ {
+ *second_code = UNKNOWN;
+ *bypass_code = UNKNOWN;
+ }
+}
+
+/* Return cost of comparison done fcom + arithmetics operations on AX.
+ All following functions do use number of instructions as a cost metrics.
+ In future this should be tweaked to compute bytes for optimize_size and
+ take into account performance of various instructions on various CPUs. */
+static int
+ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
+{
+ if (!TARGET_IEEE_FP)
+ return 4;
+ /* The cost of code output by ix86_expand_fp_compare. */
+ switch (code)
+ {
+ case UNLE:
+ case UNLT:
+ case LTGT:
+ case GT:
+ case GE:
+ case UNORDERED:
+ case ORDERED:
+ case UNEQ:
+ return 4;
+ break;
+ case LT:
+ case NE:
+ case EQ:
+ case UNGE:
+ return 5;
+ break;
+ case LE:
+ case UNGT:
+ return 6;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return cost of comparison done using fcomi operation.
+ See ix86_fp_comparison_arithmetics_cost for the metrics. */
+static int
+ix86_fp_comparison_fcomi_cost (enum rtx_code code)
+{
+ enum rtx_code bypass_code, first_code, second_code;
+ /* Return arbitrarily high cost when instruction is not supported - this
+ prevents gcc from using it. */
+ if (!TARGET_CMOVE)
+ return 1024;
+ ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
+ return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
+}
+
+/* Return cost of comparison done using sahf operation.
+ See ix86_fp_comparison_arithmetics_cost for the metrics. */
+static int
+ix86_fp_comparison_sahf_cost (enum rtx_code code)
+{
+ enum rtx_code bypass_code, first_code, second_code;
+ /* Return arbitrarily high cost when instruction is not preferred - this
+ avoids gcc from using it. */
+ if (!TARGET_USE_SAHF && !optimize_size)
+ return 1024;
+ ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
+ return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
+}
+
+/* Compute cost of the comparison done using any method.
+ See ix86_fp_comparison_arithmetics_cost for the metrics. */
+static int
+ix86_fp_comparison_cost (enum rtx_code code)
+{
+ int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
+ int min;
+
+ fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
+ sahf_cost = ix86_fp_comparison_sahf_cost (code);
+
+ min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
+ if (min > sahf_cost)
+ min = sahf_cost;
+ if (min > fcomi_cost)
+ min = fcomi_cost;
+ return min;
+}
+
+/* Generate insn patterns to do a floating point compare of OPERANDS. */
+
+static rtx
+ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
+ rtx *second_test, rtx *bypass_test)
+{
+ enum machine_mode fpcmp_mode, intcmp_mode;
+ rtx tmp, tmp2;
+ int cost = ix86_fp_comparison_cost (code);
+ enum rtx_code bypass_code, first_code, second_code;
+
+ fpcmp_mode = ix86_fp_compare_mode (code);
+ code = ix86_prepare_fp_compare_args (code, &op0, &op1);
+
+ if (second_test)
+ *second_test = NULL_RTX;
+ if (bypass_test)
+ *bypass_test = NULL_RTX;
+
+ ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
+
+ /* Do fcomi/sahf based test when profitable. */
+ if ((bypass_code == UNKNOWN || bypass_test)
+ && (second_code == UNKNOWN || second_test)
+ && ix86_fp_comparison_arithmetics_cost (code) > cost)
+ {
+ if (TARGET_CMOVE)
+ {
+ tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
+ tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
+ tmp);
+ emit_insn (tmp);
+ }
+ else
+ {
+ tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
+ tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
+ if (!scratch)
+ scratch = gen_reg_rtx (HImode);
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
+ emit_insn (gen_x86_sahf_1 (scratch));
+ }
+
+ /* The FP codes work out to act like unsigned. */
+ intcmp_mode = fpcmp_mode;
+ code = first_code;
+ if (bypass_code != UNKNOWN)
+ *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
+ gen_rtx_REG (intcmp_mode, FLAGS_REG),
+ const0_rtx);
+ if (second_code != UNKNOWN)
+ *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
+ gen_rtx_REG (intcmp_mode, FLAGS_REG),
+ const0_rtx);
+ }
+ else
+ {
+ /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
+ tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
+ tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
+ if (!scratch)
+ scratch = gen_reg_rtx (HImode);
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
+
+ /* In the unordered case, we have to check C2 for NaN's, which
+ doesn't happen to work out to anything nice combination-wise.
+ So do some bit twiddling on the value we've got in AH to come
+ up with an appropriate set of condition codes. */
+
+ intcmp_mode = CCNOmode;
+ switch (code)
+ {
+ case GT:
+ case UNGT:
+ if (code == GT || !TARGET_IEEE_FP)
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
+ code = EQ;
+ }
+ else
+ {
+ emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
+ emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
+ emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
+ intcmp_mode = CCmode;
+ code = GEU;
+ }
+ break;
+ case LT:
+ case UNLT:
+ if (code == LT && TARGET_IEEE_FP)
+ {
+ emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
+ emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
+ intcmp_mode = CCmode;
+ code = EQ;
+ }
+ else
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
+ code = NE;
+ }
+ break;
+ case GE:
+ case UNGE:
+ if (code == GE || !TARGET_IEEE_FP)
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
+ code = EQ;
+ }
+ else
+ {
+ emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
+ emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
+ GEN_INT (0x01)));
+ code = NE;
+ }
+ break;
+ case LE:
+ case UNLE:
+ if (code == LE && TARGET_IEEE_FP)
+ {
+ emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
+ emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
+ emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
+ intcmp_mode = CCmode;
+ code = LTU;
+ }
+ else
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
+ code = NE;
+ }
+ break;
+ case EQ:
+ case UNEQ:
+ if (code == EQ && TARGET_IEEE_FP)
+ {
+ emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
+ emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
+ intcmp_mode = CCmode;
+ code = EQ;
+ }
+ else
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
+ code = NE;
+ break;
+ }
+ break;
+ case NE:
+ case LTGT:
+ if (code == NE && TARGET_IEEE_FP)
+ {
+ emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
+ emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
+ GEN_INT (0x40)));
+ code = NE;
+ }
+ else
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
+ code = EQ;
+ }
+ break;
+
+ case UNORDERED:
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
+ code = NE;
+ break;
+ case ORDERED:
+ emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
+ code = EQ;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* Return the test that should be put into the flags user, i.e.
+ the bcc, scc, or cmov instruction. */
+ return gen_rtx_fmt_ee (code, VOIDmode,
+ gen_rtx_REG (intcmp_mode, FLAGS_REG),
+ const0_rtx);
+}
+
+rtx
+ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
+{
+ rtx op0, op1, ret;
+ op0 = ix86_compare_op0;
+ op1 = ix86_compare_op1;
+
+ if (second_test)
+ *second_test = NULL_RTX;
+ if (bypass_test)
+ *bypass_test = NULL_RTX;
+
+ if (ix86_compare_emitted)
+ {
+ ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
+ ix86_compare_emitted = NULL_RTX;
+ }
+ else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
+ ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
+ second_test, bypass_test);
+ else
+ ret = ix86_expand_int_compare (code, op0, op1);
+
+ return ret;
+}
+
+/* Return true if the CODE will result in nontrivial jump sequence. */
+bool
+ix86_fp_jump_nontrivial_p (enum rtx_code code)
+{
+ enum rtx_code bypass_code, first_code, second_code;
+ if (!TARGET_CMOVE)
+ return true;
+ ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
+ return bypass_code != UNKNOWN || second_code != UNKNOWN;
+}
+
+void
+ix86_expand_branch (enum rtx_code code, rtx label)
+{
+ rtx tmp;
+
+ /* If we have emitted a compare insn, go straight to simple.
+ ix86_expand_compare won't emit anything if ix86_compare_emitted
+ is non NULL. */
+ if (ix86_compare_emitted)
+ goto simple;
+
+ switch (GET_MODE (ix86_compare_op0))
+ {
+ case QImode:
+ case HImode:
+ case SImode:
+ simple:
+ tmp = ix86_expand_compare (code, NULL, NULL);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
+ return;
+
+ case SFmode:
+ case DFmode:
+ case XFmode:
+ {
+ rtvec vec;
+ int use_fcomi;
+ enum rtx_code bypass_code, first_code, second_code;
+
+ code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
+ &ix86_compare_op1);
+
+ ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
+
+ /* Check whether we will use the natural sequence with one jump. If
+ so, we can expand jump early. Otherwise delay expansion by
+ creating compound insn to not confuse optimizers. */
+ if (bypass_code == UNKNOWN && second_code == UNKNOWN
+ && TARGET_CMOVE)
+ {
+ ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx, NULL_RTX, NULL_RTX);
+ }
+ else
+ {
+ tmp = gen_rtx_fmt_ee (code, VOIDmode,
+ ix86_compare_op0, ix86_compare_op1);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx);
+ tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
+
+ use_fcomi = ix86_use_fcomi_compare (code);
+ vec = rtvec_alloc (3 + !use_fcomi);
+ RTVEC_ELT (vec, 0) = tmp;
+ RTVEC_ELT (vec, 1)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
+ RTVEC_ELT (vec, 2)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
+ if (! use_fcomi)
+ RTVEC_ELT (vec, 3)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
+
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+ }
+ return;
+ }
+
+ case DImode:
+ if (TARGET_64BIT)
+ goto simple;
+ case TImode:
+ /* Expand DImode branch into multiple compare+branch. */
+ {
+ rtx lo[2], hi[2], label2;
+ enum rtx_code code1, code2, code3;
+ enum machine_mode submode;
+
+ if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
+ {
+ tmp = ix86_compare_op0;
+ ix86_compare_op0 = ix86_compare_op1;
+ ix86_compare_op1 = tmp;
+ code = swap_condition (code);
+ }
+ if (GET_MODE (ix86_compare_op0) == DImode)
+ {
+ split_di (&ix86_compare_op0, 1, lo+0, hi+0);
+ split_di (&ix86_compare_op1, 1, lo+1, hi+1);
+ submode = SImode;
+ }
+ else
+ {
+ split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
+ split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
+ submode = DImode;
+ }
+
+ /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
+ avoid two branches. This costs one extra insn, so disable when
+ optimizing for size. */
+
+ if ((code == EQ || code == NE)
+ && (!optimize_size
+ || hi[1] == const0_rtx || lo[1] == const0_rtx))
+ {
+ rtx xor0, xor1;
+
+ xor1 = hi[0];
+ if (hi[1] != const0_rtx)
+ xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
+ NULL_RTX, 0, OPTAB_WIDEN);
+
+ xor0 = lo[0];
+ if (lo[1] != const0_rtx)
+ xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
+ NULL_RTX, 0, OPTAB_WIDEN);
+
+ tmp = expand_binop (submode, ior_optab, xor1, xor0,
+ NULL_RTX, 0, OPTAB_WIDEN);
+
+ ix86_compare_op0 = tmp;
+ ix86_compare_op1 = const0_rtx;
+ ix86_expand_branch (code, label);
+ return;
+ }
+
+ /* Otherwise, if we are doing less-than or greater-or-equal-than,
+ op1 is a constant and the low word is zero, then we can just
+ examine the high word. */
+
+ if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
+ switch (code)
+ {
+ case LT: case LTU: case GE: case GEU:
+ ix86_compare_op0 = hi[0];
+ ix86_compare_op1 = hi[1];
+ ix86_expand_branch (code, label);
+ return;
+ default:
+ break;
+ }
+
+ /* Otherwise, we need two or three jumps. */
+
+ label2 = gen_label_rtx ();
+
+ code1 = code;
+ code2 = swap_condition (code);
+ code3 = unsigned_condition (code);
+
+ switch (code)
+ {
+ case LT: case GT: case LTU: case GTU:
+ break;
+
+ case LE: code1 = LT; code2 = GT; break;
+ case GE: code1 = GT; code2 = LT; break;
+ case LEU: code1 = LTU; code2 = GTU; break;
+ case GEU: code1 = GTU; code2 = LTU; break;
+
+ case EQ: code1 = UNKNOWN; code2 = NE; break;
+ case NE: code2 = UNKNOWN; break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /*
+ * a < b =>
+ * if (hi(a) < hi(b)) goto true;
+ * if (hi(a) > hi(b)) goto false;
+ * if (lo(a) < lo(b)) goto true;
+ * false:
+ */
+
+ ix86_compare_op0 = hi[0];
+ ix86_compare_op1 = hi[1];
+
+ if (code1 != UNKNOWN)
+ ix86_expand_branch (code1, label);
+ if (code2 != UNKNOWN)
+ ix86_expand_branch (code2, label2);
+
+ ix86_compare_op0 = lo[0];
+ ix86_compare_op1 = lo[1];
+ ix86_expand_branch (code3, label);
+
+ if (code2 != UNKNOWN)
+ emit_label (label2);
+ return;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Split branch based on floating point condition. */
+void
+ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
+ rtx target1, rtx target2, rtx tmp, rtx pushed)
+{
+ rtx second, bypass;
+ rtx label = NULL_RTX;
+ rtx condition;
+ int bypass_probability = -1, second_probability = -1, probability = -1;
+ rtx i;
+
+ if (target2 != pc_rtx)
+ {
+ rtx tmp = target2;
+ code = reverse_condition_maybe_unordered (code);
+ target2 = target1;
+ target1 = tmp;
+ }
+
+ condition = ix86_expand_fp_compare (code, op1, op2,
+ tmp, &second, &bypass);
+
+ /* Remove pushed operand from stack. */
+ if (pushed)
+ ix86_free_from_memory (GET_MODE (pushed));
+
+ if (split_branch_probability >= 0)
+ {
+ /* Distribute the probabilities across the jumps.
+ Assume the BYPASS and SECOND to be always test
+ for UNORDERED. */
+ probability = split_branch_probability;
+
+ /* Value of 1 is low enough to make no need for probability
+ to be updated. Later we may run some experiments and see
+ if unordered values are more frequent in practice. */
+ if (bypass)
+ bypass_probability = 1;
+ if (second)
+ second_probability = 1;
+ }
+ if (bypass != NULL_RTX)
+ {
+ label = gen_label_rtx ();
+ i = emit_jump_insn (gen_rtx_SET
+ (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ bypass,
+ gen_rtx_LABEL_REF (VOIDmode,
+ label),
+ pc_rtx)));
+ if (bypass_probability >= 0)
+ REG_NOTES (i)
+ = gen_rtx_EXPR_LIST (REG_BR_PROB,
+ GEN_INT (bypass_probability),
+ REG_NOTES (i));
+ }
+ i = emit_jump_insn (gen_rtx_SET
+ (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ condition, target1, target2)));
+ if (probability >= 0)
+ REG_NOTES (i)
+ = gen_rtx_EXPR_LIST (REG_BR_PROB,
+ GEN_INT (probability),
+ REG_NOTES (i));
+ if (second != NULL_RTX)
+ {
+ i = emit_jump_insn (gen_rtx_SET
+ (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
+ target2)));
+ if (second_probability >= 0)
+ REG_NOTES (i)
+ = gen_rtx_EXPR_LIST (REG_BR_PROB,
+ GEN_INT (second_probability),
+ REG_NOTES (i));
+ }
+ if (label != NULL_RTX)
+ emit_label (label);
+}
+
+int
+ix86_expand_setcc (enum rtx_code code, rtx dest)
+{
+ rtx ret, tmp, tmpreg, equiv;
+ rtx second_test, bypass_test;
+
+ if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
+ return 0; /* FAIL */
+
+ gcc_assert (GET_MODE (dest) == QImode);
+
+ ret = ix86_expand_compare (code, &second_test, &bypass_test);
+ PUT_MODE (ret, QImode);
+
+ tmp = dest;
+ tmpreg = dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
+ if (bypass_test || second_test)
+ {
+ rtx test = second_test;
+ int bypass = 0;
+ rtx tmp2 = gen_reg_rtx (QImode);
+ if (bypass_test)
+ {
+ gcc_assert (!second_test);
+ test = bypass_test;
+ bypass = 1;
+ PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
+ }
+ PUT_MODE (test, QImode);
+ emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
+
+ if (bypass)
+ emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
+ else
+ emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
+ }
+
+ /* Attach a REG_EQUAL note describing the comparison result. */
+ if (ix86_compare_op0 && ix86_compare_op1)
+ {
+ equiv = simplify_gen_relational (code, QImode,
+ GET_MODE (ix86_compare_op0),
+ ix86_compare_op0, ix86_compare_op1);
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
+ }
+
+ return 1; /* DONE */
+}
+
+/* Expand comparison setting or clearing carry flag. Return true when
+ successful and set pop for the operation. */
+static bool
+ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
+{
+ enum machine_mode mode =
+ GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
+
+ /* Do not handle DImode compares that go through special path. Also we can't
+ deal with FP compares yet. This is possible to add. */
+ if (mode == (TARGET_64BIT ? TImode : DImode))
+ return false;
+ if (FLOAT_MODE_P (mode))
+ {
+ rtx second_test = NULL, bypass_test = NULL;
+ rtx compare_op, compare_seq;
+
+ /* Shortcut: following common codes never translate into carry flag compares. */
+ if (code == EQ || code == NE || code == UNEQ || code == LTGT
+ || code == ORDERED || code == UNORDERED)
+ return false;
+
+ /* These comparisons require zero flag; swap operands so they won't. */
+ if ((code == GT || code == UNLE || code == LE || code == UNGT)
+ && !TARGET_IEEE_FP)
+ {
+ rtx tmp = op0;
+ op0 = op1;
+ op1 = tmp;
+ code = swap_condition (code);
+ }
+
+ /* Try to expand the comparison and verify that we end up with carry flag
+ based comparison. This is fails to be true only when we decide to expand
+ comparison using arithmetic that is not too common scenario. */
+ start_sequence ();
+ compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
+ &second_test, &bypass_test);
+ compare_seq = get_insns ();
+ end_sequence ();
+
+ if (second_test || bypass_test)
+ return false;
+ if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
+ || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
+ code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
+ else
+ code = GET_CODE (compare_op);
+ if (code != LTU && code != GEU)
+ return false;
+ emit_insn (compare_seq);
+ *pop = compare_op;
+ return true;
+ }
+ if (!INTEGRAL_MODE_P (mode))
+ return false;
+ switch (code)
+ {
+ case LTU:
+ case GEU:
+ break;
+
+ /* Convert a==0 into (unsigned)a<1. */
+ case EQ:
+ case NE:
+ if (op1 != const0_rtx)
+ return false;
+ op1 = const1_rtx;
+ code = (code == EQ ? LTU : GEU);
+ break;
+
+ /* Convert a>b into b<a or a>=b-1. */
+ case GTU:
+ case LEU:
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
+ /* Bail out on overflow. We still can swap operands but that
+ would force loading of the constant into register. */
+ if (op1 == const0_rtx
+ || !x86_64_immediate_operand (op1, GET_MODE (op1)))
+ return false;
+ code = (code == GTU ? GEU : LTU);
+ }
+ else
+ {
+ rtx tmp = op1;
+ op1 = op0;
+ op0 = tmp;
+ code = (code == GTU ? LTU : GEU);
+ }
+ break;
+
+ /* Convert a>=0 into (unsigned)a<0x80000000. */
+ case LT:
+ case GE:
+ if (mode == DImode || op1 != const0_rtx)
+ return false;
+ op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
+ code = (code == LT ? GEU : LTU);
+ break;
+ case LE:
+ case GT:
+ if (mode == DImode || op1 != constm1_rtx)
+ return false;
+ op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
+ code = (code == LE ? GEU : LTU);
+ break;
+
+ default:
+ return false;
+ }
+ /* Swapping operands may cause constant to appear as first operand. */
+ if (!nonimmediate_operand (op0, VOIDmode))
+ {
+ if (no_new_pseudos)
+ return false;
+ op0 = force_reg (mode, op0);
+ }
+ ix86_compare_op0 = op0;
+ ix86_compare_op1 = op1;
+ *pop = ix86_expand_compare (code, NULL, NULL);
+ gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
+ return true;
+}
+
+int
+ix86_expand_int_movcc (rtx operands[])
+{
+ enum rtx_code code = GET_CODE (operands[1]), compare_code;
+ rtx compare_seq, compare_op;
+ rtx second_test, bypass_test;
+ enum machine_mode mode = GET_MODE (operands[0]);
+ bool sign_bit_compare_p = false;;
+
+ start_sequence ();
+ compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
+ compare_seq = get_insns ();
+ end_sequence ();
+
+ compare_code = GET_CODE (compare_op);
+
+ if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
+ || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
+ sign_bit_compare_p = true;
+
+ /* Don't attempt mode expansion here -- if we had to expand 5 or 6
+ HImode insns, we'd be swallowed in word prefix ops. */
+
+ if ((mode != HImode || TARGET_FAST_PREFIX)
+ && (mode != (TARGET_64BIT ? TImode : DImode))
+ && GET_CODE (operands[2]) == CONST_INT
+ && GET_CODE (operands[3]) == CONST_INT)
+ {
+ rtx out = operands[0];
+ HOST_WIDE_INT ct = INTVAL (operands[2]);
+ HOST_WIDE_INT cf = INTVAL (operands[3]);
+ HOST_WIDE_INT diff;
+
+ diff = ct - cf;
+ /* Sign bit compares are better done using shifts than we do by using
+ sbb. */
+ if (sign_bit_compare_p
+ || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
+ ix86_compare_op1, &compare_op))
+ {
+ /* Detect overlap between destination and compare sources. */
+ rtx tmp = out;
+
+ if (!sign_bit_compare_p)
+ {
+ bool fpcmp = false;
+
+ compare_code = GET_CODE (compare_op);
+
+ if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
+ || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
+ {
+ fpcmp = true;
+ compare_code = ix86_fp_compare_code_to_integer (compare_code);
+ }
+
+ /* To simplify rest of code, restrict to the GEU case. */
+ if (compare_code == LTU)
+ {
+ HOST_WIDE_INT tmp = ct;
+ ct = cf;
+ cf = tmp;
+ compare_code = reverse_condition (compare_code);
+ code = reverse_condition (code);
+ }
+ else
+ {
+ if (fpcmp)
+ PUT_CODE (compare_op,
+ reverse_condition_maybe_unordered
+ (GET_CODE (compare_op)));
+ else
+ PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
+ }
+ diff = ct - cf;
+
+ if (reg_overlap_mentioned_p (out, ix86_compare_op0)
+ || reg_overlap_mentioned_p (out, ix86_compare_op1))
+ tmp = gen_reg_rtx (mode);
+
+ if (mode == DImode)
+ emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
+ else
+ emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
+ }
+ else
+ {
+ if (code == GT || code == GE)
+ code = reverse_condition (code);
+ else
+ {
+ HOST_WIDE_INT tmp = ct;
+ ct = cf;
+ cf = tmp;
+ diff = ct - cf;
+ }
+ tmp = emit_store_flag (tmp, code, ix86_compare_op0,
+ ix86_compare_op1, VOIDmode, 0, -1);
+ }
+
+ if (diff == 1)
+ {
+ /*
+ * cmpl op0,op1
+ * sbbl dest,dest
+ * [addl dest, ct]
+ *
+ * Size 5 - 8.
+ */
+ if (ct)
+ tmp = expand_simple_binop (mode, PLUS,
+ tmp, GEN_INT (ct),
+ copy_rtx (tmp), 1, OPTAB_DIRECT);
+ }
+ else if (cf == -1)
+ {
+ /*
+ * cmpl op0,op1
+ * sbbl dest,dest
+ * orl $ct, dest
+ *
+ * Size 8.
+ */
+ tmp = expand_simple_binop (mode, IOR,
+ tmp, GEN_INT (ct),
+ copy_rtx (tmp), 1, OPTAB_DIRECT);
+ }
+ else if (diff == -1 && ct)
+ {
+ /*
+ * cmpl op0,op1
+ * sbbl dest,dest
+ * notl dest
+ * [addl dest, cf]
+ *
+ * Size 8 - 11.
+ */
+ tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
+ if (cf)
+ tmp = expand_simple_binop (mode, PLUS,
+ copy_rtx (tmp), GEN_INT (cf),
+ copy_rtx (tmp), 1, OPTAB_DIRECT);
+ }
+ else
+ {
+ /*
+ * cmpl op0,op1
+ * sbbl dest,dest
+ * [notl dest]
+ * andl cf - ct, dest
+ * [addl dest, ct]
+ *
+ * Size 8 - 11.
+ */
+
+ if (cf == 0)
+ {
+ cf = ct;
+ ct = 0;
+ tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
+ }
+
+ tmp = expand_simple_binop (mode, AND,
+ copy_rtx (tmp),
+ gen_int_mode (cf - ct, mode),
+ copy_rtx (tmp), 1, OPTAB_DIRECT);
+ if (ct)
+ tmp = expand_simple_binop (mode, PLUS,
+ copy_rtx (tmp), GEN_INT (ct),
+ copy_rtx (tmp), 1, OPTAB_DIRECT);
+ }
+
+ if (!rtx_equal_p (tmp, out))
+ emit_move_insn (copy_rtx (out), copy_rtx (tmp));
+
+ return 1; /* DONE */
+ }
+
+ if (diff < 0)
+ {
+ HOST_WIDE_INT tmp;
+ tmp = ct, ct = cf, cf = tmp;
+ diff = -diff;
+ if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
+ {
+ /* We may be reversing unordered compare to normal compare, that
+ is not valid in general (we may convert non-trapping condition
+ to trapping one), however on i386 we currently emit all
+ comparisons unordered. */
+ compare_code = reverse_condition_maybe_unordered (compare_code);
+ code = reverse_condition_maybe_unordered (code);
+ }
+ else
+ {
+ compare_code = reverse_condition (compare_code);
+ code = reverse_condition (code);
+ }
+ }
+
+ compare_code = UNKNOWN;
+ if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
+ && GET_CODE (ix86_compare_op1) == CONST_INT)
+ {
+ if (ix86_compare_op1 == const0_rtx
+ && (code == LT || code == GE))
+ compare_code = code;
+ else if (ix86_compare_op1 == constm1_rtx)
+ {
+ if (code == LE)
+ compare_code = LT;
+ else if (code == GT)
+ compare_code = GE;
+ }
+ }
+
+ /* Optimize dest = (op0 < 0) ? -1 : cf. */
+ if (compare_code != UNKNOWN
+ && GET_MODE (ix86_compare_op0) == GET_MODE (out)
+ && (cf == -1 || ct == -1))
+ {
+ /* If lea code below could be used, only optimize
+ if it results in a 2 insn sequence. */
+
+ if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
+ || diff == 3 || diff == 5 || diff == 9)
+ || (compare_code == LT && ct == -1)
+ || (compare_code == GE && cf == -1))
+ {
+ /*
+ * notl op1 (if necessary)
+ * sarl $31, op1
+ * orl cf, op1
+ */
+ if (ct != -1)
+ {
+ cf = ct;
+ ct = -1;
+ code = reverse_condition (code);
+ }
+
+ out = emit_store_flag (out, code, ix86_compare_op0,
+ ix86_compare_op1, VOIDmode, 0, -1);
+
+ out = expand_simple_binop (mode, IOR,
+ out, GEN_INT (cf),
+ out, 1, OPTAB_DIRECT);
+ if (out != operands[0])
+ emit_move_insn (operands[0], out);
+
+ return 1; /* DONE */
+ }
+ }
+
+
+ if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
+ || diff == 3 || diff == 5 || diff == 9)
+ && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
+ && (mode != DImode
+ || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
+ {
+ /*
+ * xorl dest,dest
+ * cmpl op1,op2
+ * setcc dest
+ * lea cf(dest*(ct-cf)),dest
+ *
+ * Size 14.
+ *
+ * This also catches the degenerate setcc-only case.
+ */
+
+ rtx tmp;
+ int nops;
+
+ out = emit_store_flag (out, code, ix86_compare_op0,
+ ix86_compare_op1, VOIDmode, 0, 1);
+
+ nops = 0;
+ /* On x86_64 the lea instruction operates on Pmode, so we need
+ to get arithmetics done in proper mode to match. */
+ if (diff == 1)
+ tmp = copy_rtx (out);
+ else
+ {
+ rtx out1;
+ out1 = copy_rtx (out);
+ tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
+ nops++;
+ if (diff & 1)
+ {
+ tmp = gen_rtx_PLUS (mode, tmp, out1);
+ nops++;
+ }
+ }
+ if (cf != 0)
+ {
+ tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
+ nops++;
+ }
+ if (!rtx_equal_p (tmp, out))
+ {
+ if (nops == 1)
+ out = force_operand (tmp, copy_rtx (out));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
+ }
+ if (!rtx_equal_p (out, operands[0]))
+ emit_move_insn (operands[0], copy_rtx (out));
+
+ return 1; /* DONE */
+ }
+
+ /*
+ * General case: Jumpful:
+ * xorl dest,dest cmpl op1, op2
+ * cmpl op1, op2 movl ct, dest
+ * setcc dest jcc 1f
+ * decl dest movl cf, dest
+ * andl (cf-ct),dest 1:
+ * addl ct,dest
+ *
+ * Size 20. Size 14.
+ *
+ * This is reasonably steep, but branch mispredict costs are
+ * high on modern cpus, so consider failing only if optimizing
+ * for space.
+ */
+
+ if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
+ && BRANCH_COST >= 2)
+ {
+ if (cf == 0)
+ {
+ cf = ct;
+ ct = 0;
+ if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
+ /* We may be reversing unordered compare to normal compare,
+ that is not valid in general (we may convert non-trapping
+ condition to trapping one), however on i386 we currently
+ emit all comparisons unordered. */
+ code = reverse_condition_maybe_unordered (code);
+ else
+ {
+ code = reverse_condition (code);
+ if (compare_code != UNKNOWN)
+ compare_code = reverse_condition (compare_code);
+ }
+ }
+
+ if (compare_code != UNKNOWN)
+ {
+ /* notl op1 (if needed)
+ sarl $31, op1
+ andl (cf-ct), op1
+ addl ct, op1
+
+ For x < 0 (resp. x <= -1) there will be no notl,
+ so if possible swap the constants to get rid of the
+ complement.
+ True/false will be -1/0 while code below (store flag
+ followed by decrement) is 0/-1, so the constants need
+ to be exchanged once more. */
+
+ if (compare_code == GE || !cf)
+ {
+ code = reverse_condition (code);
+ compare_code = LT;
+ }
+ else
+ {
+ HOST_WIDE_INT tmp = cf;
+ cf = ct;
+ ct = tmp;
+ }
+
+ out = emit_store_flag (out, code, ix86_compare_op0,
+ ix86_compare_op1, VOIDmode, 0, -1);
+ }
+ else
+ {
+ out = emit_store_flag (out, code, ix86_compare_op0,
+ ix86_compare_op1, VOIDmode, 0, 1);
+
+ out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
+ copy_rtx (out), 1, OPTAB_DIRECT);
+ }
+
+ out = expand_simple_binop (mode, AND, copy_rtx (out),
+ gen_int_mode (cf - ct, mode),
+ copy_rtx (out), 1, OPTAB_DIRECT);
+ if (ct)
+ out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
+ copy_rtx (out), 1, OPTAB_DIRECT);
+ if (!rtx_equal_p (out, operands[0]))
+ emit_move_insn (operands[0], copy_rtx (out));
+
+ return 1; /* DONE */
+ }
+ }
+
+ if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
+ {
+ /* Try a few things more with specific constants and a variable. */
+
+ optab op;
+ rtx var, orig_out, out, tmp;
+
+ if (BRANCH_COST <= 2)
+ return 0; /* FAIL */
+
+ /* If one of the two operands is an interesting constant, load a
+ constant with the above and mask it in with a logical operation. */
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ var = operands[3];
+ if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
+ operands[3] = constm1_rtx, op = and_optab;
+ else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
+ operands[3] = const0_rtx, op = ior_optab;
+ else
+ return 0; /* FAIL */
+ }
+ else if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ var = operands[2];
+ if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
+ operands[2] = constm1_rtx, op = and_optab;
+ else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
+ operands[2] = const0_rtx, op = ior_optab;
+ else
+ return 0; /* FAIL */
+ }
+ else
+ return 0; /* FAIL */
+
+ orig_out = operands[0];
+ tmp = gen_reg_rtx (mode);
+ operands[0] = tmp;
+
+ /* Recurse to get the constant loaded. */
+ if (ix86_expand_int_movcc (operands) == 0)
+ return 0; /* FAIL */
+
+ /* Mask in the interesting variable. */
+ out = expand_binop (mode, op, var, tmp, orig_out, 0,
+ OPTAB_WIDEN);
+ if (!rtx_equal_p (out, orig_out))
+ emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
+
+ return 1; /* DONE */
+ }
+
+ /*
+ * For comparison with above,
+ *
+ * movl cf,dest
+ * movl ct,tmp
+ * cmpl op1,op2
+ * cmovcc tmp,dest
+ *
+ * Size 15.
+ */
+
+ if (! nonimmediate_operand (operands[2], mode))
+ operands[2] = force_reg (mode, operands[2]);
+ if (! nonimmediate_operand (operands[3], mode))
+ operands[3] = force_reg (mode, operands[3]);
+
+ if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
+ {
+ rtx tmp = gen_reg_rtx (mode);
+ emit_move_insn (tmp, operands[3]);
+ operands[3] = tmp;
+ }
+ if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (mode);
+ emit_move_insn (tmp, operands[2]);
+ operands[2] = tmp;
+ }
+
+ if (! register_operand (operands[2], VOIDmode)
+ && (mode == QImode
+ || ! register_operand (operands[3], VOIDmode)))
+ operands[2] = force_reg (mode, operands[2]);
+
+ if (mode == QImode
+ && ! register_operand (operands[3], VOIDmode))
+ operands[3] = force_reg (mode, operands[3]);
+
+ emit_insn (compare_seq);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (mode,
+ compare_op, operands[2],
+ operands[3])));
+ if (bypass_test)
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
+ gen_rtx_IF_THEN_ELSE (mode,
+ bypass_test,
+ copy_rtx (operands[3]),
+ copy_rtx (operands[0]))));
+ if (second_test)
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
+ gen_rtx_IF_THEN_ELSE (mode,
+ second_test,
+ copy_rtx (operands[2]),
+ copy_rtx (operands[0]))));
+
+ return 1; /* DONE */
+}
+
+/* Swap, force into registers, or otherwise massage the two operands
+ to an sse comparison with a mask result. Thus we differ a bit from
+ ix86_prepare_fp_compare_args which expects to produce a flags result.
+
+ The DEST operand exists to help determine whether to commute commutative
+ operators. The POP0/POP1 operands are updated in place. The new
+ comparison code is returned, or UNKNOWN if not implementable. */
+
+static enum rtx_code
+ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
+ rtx *pop0, rtx *pop1)
+{
+ rtx tmp;
+
+ switch (code)
+ {
+ case LTGT:
+ case UNEQ:
+ /* We have no LTGT as an operator. We could implement it with
+ NE & ORDERED, but this requires an extra temporary. It's
+ not clear that it's worth it. */
+ return UNKNOWN;
+
+ case LT:
+ case LE:
+ case UNGT:
+ case UNGE:
+ /* These are supported directly. */
+ break;
+
+ case EQ:
+ case NE:
+ case UNORDERED:
+ case ORDERED:
+ /* For commutative operators, try to canonicalize the destination
+ operand to be first in the comparison - this helps reload to
+ avoid extra moves. */
+ if (!dest || !rtx_equal_p (dest, *pop1))
+ break;
+ /* FALLTHRU */
+
+ case GE:
+ case GT:
+ case UNLE:
+ case UNLT:
+ /* These are not supported directly. Swap the comparison operands
+ to transform into something that is supported. */
+ tmp = *pop0;
+ *pop0 = *pop1;
+ *pop1 = tmp;
+ code = swap_condition (code);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return code;
+}
+
+/* Detect conditional moves that exactly match min/max operational
+ semantics. Note that this is IEEE safe, as long as we don't
+ interchange the operands.
+
+ Returns FALSE if this conditional move doesn't match a MIN/MAX,
+ and TRUE if the operation is successful and instructions are emitted. */
+
+static bool
+ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
+ rtx cmp_op1, rtx if_true, rtx if_false)
+{
+ enum machine_mode mode;
+ bool is_min;
+ rtx tmp;
+
+ if (code == LT)
+ ;
+ else if (code == UNGE)
+ {
+ tmp = if_true;
+ if_true = if_false;
+ if_false = tmp;
+ }
+ else
+ return false;
+
+ if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
+ is_min = true;
+ else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
+ is_min = false;
+ else
+ return false;
+
+ mode = GET_MODE (dest);
+
+ /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
+ but MODE may be a vector mode and thus not appropriate. */
+ if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
+ {
+ int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
+ rtvec v;
+
+ if_true = force_reg (mode, if_true);
+ v = gen_rtvec (2, if_true, if_false);
+ tmp = gen_rtx_UNSPEC (mode, v, u);
+ }
+ else
+ {
+ code = is_min ? SMIN : SMAX;
+ tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
+ return true;
+}
+
+/* Expand an sse vector comparison. Return the register with the result. */
+
+static rtx
+ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
+ rtx op_true, rtx op_false)
+{
+ enum machine_mode mode = GET_MODE (dest);
+ rtx x;
+
+ cmp_op0 = force_reg (mode, cmp_op0);
+ if (!nonimmediate_operand (cmp_op1, mode))
+ cmp_op1 = force_reg (mode, cmp_op1);
+
+ if (optimize
+ || reg_overlap_mentioned_p (dest, op_true)
+ || reg_overlap_mentioned_p (dest, op_false))
+ dest = gen_reg_rtx (mode);
+
+ x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+
+ return dest;
+}
+
+/* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
+ operations. This is used for both scalar and vector conditional moves. */
+
+static void
+ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
+{
+ enum machine_mode mode = GET_MODE (dest);
+ rtx t2, t3, x;
+
+ if (op_false == CONST0_RTX (mode))
+ {
+ op_true = force_reg (mode, op_true);
+ x = gen_rtx_AND (mode, cmp, op_true);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+ }
+ else if (op_true == CONST0_RTX (mode))
+ {
+ op_false = force_reg (mode, op_false);
+ x = gen_rtx_NOT (mode, cmp);
+ x = gen_rtx_AND (mode, x, op_false);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+ }
+ else
+ {
+ op_true = force_reg (mode, op_true);
+ op_false = force_reg (mode, op_false);
+
+ t2 = gen_reg_rtx (mode);
+ if (optimize)
+ t3 = gen_reg_rtx (mode);
+ else
+ t3 = dest;
+
+ x = gen_rtx_AND (mode, op_true, cmp);
+ emit_insn (gen_rtx_SET (VOIDmode, t2, x));
+
+ x = gen_rtx_NOT (mode, cmp);
+ x = gen_rtx_AND (mode, x, op_false);
+ emit_insn (gen_rtx_SET (VOIDmode, t3, x));
+
+ x = gen_rtx_IOR (mode, t3, t2);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+ }
+}
+
+/* Expand a floating-point conditional move. Return true if successful. */
+
+int
+ix86_expand_fp_movcc (rtx operands[])
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx tmp, compare_op, second_test, bypass_test;
+
+ if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
+ {
+ enum machine_mode cmode;
+
+ /* Since we've no cmove for sse registers, don't force bad register
+ allocation just to gain access to it. Deny movcc when the
+ comparison mode doesn't match the move mode. */
+ cmode = GET_MODE (ix86_compare_op0);
+ if (cmode == VOIDmode)
+ cmode = GET_MODE (ix86_compare_op1);
+ if (cmode != mode)
+ return 0;
+
+ code = ix86_prepare_sse_fp_compare_args (operands[0], code,
+ &ix86_compare_op0,
+ &ix86_compare_op1);
+ if (code == UNKNOWN)
+ return 0;
+
+ if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
+ ix86_compare_op1, operands[2],
+ operands[3]))
+ return 1;
+
+ tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
+ ix86_compare_op1, operands[2], operands[3]);
+ ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
+ return 1;
+ }
+
+ /* The floating point conditional move instructions don't directly
+ support conditions resulting from a signed integer comparison. */
+
+ compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
+
+ /* The floating point conditional move instructions don't directly
+ support signed integer comparisons. */
+
+ if (!fcmov_comparison_operator (compare_op, VOIDmode))
+ {
+ gcc_assert (!second_test && !bypass_test);
+ tmp = gen_reg_rtx (QImode);
+ ix86_expand_setcc (code, tmp);
+ code = NE;
+ ix86_compare_op0 = tmp;
+ ix86_compare_op1 = const0_rtx;
+ compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
+ }
+ if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
+ {
+ tmp = gen_reg_rtx (mode);
+ emit_move_insn (tmp, operands[3]);
+ operands[3] = tmp;
+ }
+ if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
+ {
+ tmp = gen_reg_rtx (mode);
+ emit_move_insn (tmp, operands[2]);
+ operands[2] = tmp;
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (mode, compare_op,
+ operands[2], operands[3])));
+ if (bypass_test)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (mode, bypass_test,
+ operands[3], operands[0])));
+ if (second_test)
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (mode, second_test,
+ operands[2], operands[0])));
+
+ return 1;
+}
+
+/* Expand a floating-point vector conditional move; a vcond operation
+ rather than a movcc operation. */
+
+bool
+ix86_expand_fp_vcond (rtx operands[])
+{
+ enum rtx_code code = GET_CODE (operands[3]);
+ rtx cmp;
+
+ code = ix86_prepare_sse_fp_compare_args (operands[0], code,
+ &operands[4], &operands[5]);
+ if (code == UNKNOWN)
+ return false;
+
+ if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
+ operands[5], operands[1], operands[2]))
+ return true;
+
+ cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
+ operands[1], operands[2]);
+ ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
+ return true;
+}
+
+/* Expand a signed integral vector conditional move. */
+
+bool
+ix86_expand_int_vcond (rtx operands[])
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ enum rtx_code code = GET_CODE (operands[3]);
+ bool negate = false;
+ rtx x, cop0, cop1;
+
+ cop0 = operands[4];
+ cop1 = operands[5];
+
+ /* Canonicalize the comparison to EQ, GT, GTU. */
+ switch (code)
+ {
+ case EQ:
+ case GT:
+ case GTU:
+ break;
+
+ case NE:
+ case LE:
+ case LEU:
+ code = reverse_condition (code);
+ negate = true;
+ break;
+
+ case GE:
+ case GEU:
+ code = reverse_condition (code);
+ negate = true;
+ /* FALLTHRU */
+
+ case LT:
+ case LTU:
+ code = swap_condition (code);
+ x = cop0, cop0 = cop1, cop1 = x;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* Only SSE4.1/SSE4.2 supports V2DImode. */
+ if (mode == V2DImode)
+ {
+ switch (code)
+ {
+ case EQ:
+ /* SSE4.1 supports EQ. */
+ if (!TARGET_SSE4_1)
+ return false;
+ break;
+
+ case GT:
+ case GTU:
+ /* SSE4.2 supports GT/GTU. */
+ if (!TARGET_SSE4_2)
+ return false;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ /* Unsigned parallel compare is not supported by the hardware. Play some
+ tricks to turn this into a signed comparison against 0. */
+ if (code == GTU)
+ {
+ cop0 = force_reg (mode, cop0);
+
+ switch (mode)
+ {
+ case V4SImode:
+ {
+ rtx t1, t2, mask;
+
+ /* Perform a parallel modulo subtraction. */
+ t1 = gen_reg_rtx (mode);
+ emit_insn (gen_subv4si3 (t1, cop0, cop1));
+
+ /* Extract the original sign bit of op0. */
+ mask = GEN_INT (-0x80000000);
+ mask = gen_rtx_CONST_VECTOR (mode,
+ gen_rtvec (4, mask, mask, mask, mask));
+ mask = force_reg (mode, mask);
+ t2 = gen_reg_rtx (mode);
+ emit_insn (gen_andv4si3 (t2, cop0, mask));
+
+ /* XOR it back into the result of the subtraction. This results
+ in the sign bit set iff we saw unsigned underflow. */
+ x = gen_reg_rtx (mode);
+ emit_insn (gen_xorv4si3 (x, t1, t2));
+
+ code = GT;
+ }
+ break;
+
+ case V16QImode:
+ case V8HImode:
+ /* Perform a parallel unsigned saturating subtraction. */
+ x = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, x,
+ gen_rtx_US_MINUS (mode, cop0, cop1)));
+
+ code = EQ;
+ negate = !negate;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ cop0 = x;
+ cop1 = CONST0_RTX (mode);
+ }
+
+ x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
+ operands[1+negate], operands[2-negate]);
+
+ ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
+ operands[2-negate]);
+ return true;
+}
+
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+/* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
+ true if we should do zero extension, else sign extension. HIGH_P is
+ true if we want the N/2 high elements, else the low elements. */
+
+void
+ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
+{
+ enum machine_mode imode = GET_MODE (operands[1]);
+ rtx (*unpack)(rtx, rtx, rtx);
+ rtx se, dest;
+
+ switch (imode)
+ {
+ case V16QImode:
+ if (high_p)
+ unpack = gen_vec_interleave_highv16qi;
+ else
+ unpack = gen_vec_interleave_lowv16qi;
+ break;
+ case V8HImode:
+ if (high_p)
+ unpack = gen_vec_interleave_highv8hi;
+ else
+ unpack = gen_vec_interleave_lowv8hi;
+ break;
+ case V4SImode:
+ if (high_p)
+ unpack = gen_vec_interleave_highv4si;
+ else
+ unpack = gen_vec_interleave_lowv4si;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ dest = gen_lowpart (imode, operands[0]);
+
+ if (unsigned_p)
+ se = force_reg (imode, CONST0_RTX (imode));
+ else
+ se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
+ operands[1], pc_rtx, pc_rtx);
+
+ emit_insn (unpack (dest, operands[1], se));
+}
+
+/* This function performs the same task as ix86_expand_sse_unpack,
+ but with SSE4.1 instructions. */
+
+void
+ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
+{
+ enum machine_mode imode = GET_MODE (operands[1]);
+ rtx (*unpack)(rtx, rtx);
+ rtx src, dest;
+
+ switch (imode)
+ {
+ case V16QImode:
+ if (unsigned_p)
+ unpack = gen_sse4_1_zero_extendv8qiv8hi2;
+ else
+ unpack = gen_sse4_1_extendv8qiv8hi2;
+ break;
+ case V8HImode:
+ if (unsigned_p)
+ unpack = gen_sse4_1_zero_extendv4hiv4si2;
+ else
+ unpack = gen_sse4_1_extendv4hiv4si2;
+ break;
+ case V4SImode:
+ if (unsigned_p)
+ unpack = gen_sse4_1_zero_extendv2siv2di2;
+ else
+ unpack = gen_sse4_1_extendv2siv2di2;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ dest = operands[0];
+ if (high_p)
+ {
+ /* Shift higher 8 bytes to lower 8 bytes. */
+ src = gen_reg_rtx (imode);
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
+ gen_lowpart (TImode, operands[1]),
+ GEN_INT (64)));
+ }
+ else
+ src = operands[1];
+
+ emit_insn (unpack (dest, src));
+}
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+/* Expand conditional increment or decrement using adb/sbb instructions.
+ The default case using setcc followed by the conditional move can be
+ done by generic code. */
+int
+ix86_expand_int_addcc (rtx operands[])
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx compare_op;
+ rtx val = const0_rtx;
+ bool fpcmp = false;
+ enum machine_mode mode = GET_MODE (operands[0]);
+
+ if (operands[3] != const1_rtx
+ && operands[3] != constm1_rtx)
+ return 0;
+ if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
+ ix86_compare_op1, &compare_op))
+ return 0;
+ code = GET_CODE (compare_op);
+
+ if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
+ || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
+ {
+ fpcmp = true;
+ code = ix86_fp_compare_code_to_integer (code);
+ }
+
+ if (code != LTU)
+ {
+ val = constm1_rtx;
+ if (fpcmp)
+ PUT_CODE (compare_op,
+ reverse_condition_maybe_unordered
+ (GET_CODE (compare_op)));
+ else
+ PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
+ }
+ PUT_MODE (compare_op, mode);
+
+ /* Construct either adc or sbb insn. */
+ if ((code == LTU) == (operands[3] == constm1_rtx))
+ {
+ switch (GET_MODE (operands[0]))
+ {
+ case QImode:
+ emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
+ break;
+ case HImode:
+ emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
+ break;
+ case SImode:
+ emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
+ break;
+ case DImode:
+ emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ switch (GET_MODE (operands[0]))
+ {
+ case QImode:
+ emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
+ break;
+ case HImode:
+ emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
+ break;
+ case SImode:
+ emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
+ break;
+ case DImode:
+ emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ return 1; /* DONE */
+}
+
+
+/* Split operands 0 and 1 into SImode parts. Similar to split_di, but
+ works for floating pointer parameters and nonoffsetable memories.
+ For pushes, it returns just stack offsets; the values will be saved
+ in the right order. Maximally three parts are generated. */
+
+static int
+ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
+{
+ int size;
+
+ if (!TARGET_64BIT)
+ size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
+ else
+ size = (GET_MODE_SIZE (mode) + 4) / 8;
+
+ gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
+ gcc_assert (size >= 2 && size <= 3);
+
+ /* Optimize constant pool reference to immediates. This is used by fp
+ moves, that force all constants to memory to allow combining. */
+ if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
+ {
+ rtx tmp = maybe_get_pool_constant (operand);
+ if (tmp)
+ operand = tmp;
+ }
+
+ if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
+ {
+ /* The only non-offsetable memories we handle are pushes. */
+ int ok = push_operand (operand, VOIDmode);
+
+ gcc_assert (ok);
+
+ operand = copy_rtx (operand);
+ PUT_MODE (operand, Pmode);
+ parts[0] = parts[1] = parts[2] = operand;
+ return size;
+ }
+
+ if (GET_CODE (operand) == CONST_VECTOR)
+ {
+ enum machine_mode imode = int_mode_for_mode (mode);
+ /* Caution: if we looked through a constant pool memory above,
+ the operand may actually have a different mode now. That's
+ ok, since we want to pun this all the way back to an integer. */
+ operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
+ gcc_assert (operand != NULL);
+ mode = imode;
+ }
+
+ if (!TARGET_64BIT)
+ {
+ if (mode == DImode)
+ split_di (&operand, 1, &parts[0], &parts[1]);
+ else
+ {
+ if (REG_P (operand))
+ {
+ gcc_assert (reload_completed);
+ parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
+ parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
+ if (size == 3)
+ parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
+ }
+ else if (offsettable_memref_p (operand))
+ {
+ operand = adjust_address (operand, SImode, 0);
+ parts[0] = operand;
+ parts[1] = adjust_address (operand, SImode, 4);
+ if (size == 3)
+ parts[2] = adjust_address (operand, SImode, 8);
+ }
+ else if (GET_CODE (operand) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r;
+ long l[4];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
+ switch (mode)
+ {
+ case XFmode:
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
+ parts[2] = gen_int_mode (l[2], SImode);
+ break;
+ case DFmode:
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ parts[1] = gen_int_mode (l[1], SImode);
+ parts[0] = gen_int_mode (l[0], SImode);
+ }
+ else
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ if (mode == TImode)
+ split_ti (&operand, 1, &parts[0], &parts[1]);
+ if (mode == XFmode || mode == TFmode)
+ {
+ enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
+ if (REG_P (operand))
+ {
+ gcc_assert (reload_completed);
+ parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
+ parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
+ }
+ else if (offsettable_memref_p (operand))
+ {
+ operand = adjust_address (operand, DImode, 0);
+ parts[0] = operand;
+ parts[1] = adjust_address (operand, upper_mode, 8);
+ }
+ else if (GET_CODE (operand) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r;
+ long l[4];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
+ real_to_target (l, &r, mode);
+
+ /* Do not use shift by 32 to avoid warning on 32bit systems. */
+ if (HOST_BITS_PER_WIDE_INT >= 64)
+ parts[0]
+ = gen_int_mode
+ ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
+ + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
+ DImode);
+ else
+ parts[0] = immed_double_const (l[0], l[1], DImode);
+
+ if (upper_mode == SImode)
+ parts[1] = gen_int_mode (l[2], SImode);
+ else if (HOST_BITS_PER_WIDE_INT >= 64)
+ parts[1]
+ = gen_int_mode
+ ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
+ + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
+ DImode);
+ else
+ parts[1] = immed_double_const (l[2], l[3], DImode);
+ }
+ else
+ gcc_unreachable ();
+ }
+ }
+
+ return size;
+}
+
+/* Emit insns to perform a move or push of DI, DF, and XF values.
+ Return false when normal moves are needed; true when all required
+ insns have been emitted. Operands 2-4 contain the input values
+ int the correct order; operands 5-7 contain the output values. */
+
+void
+ix86_split_long_move (rtx operands[])
+{
+ rtx part[2][3];
+ int nparts;
+ int push = 0;
+ int collisions = 0;
+ enum machine_mode mode = GET_MODE (operands[0]);
+
+ /* The DFmode expanders may ask us to move double.
+ For 64bit target this is single move. By hiding the fact
+ here we simplify i386.md splitters. */
+ if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
+ {
+ /* Optimize constant pool reference to immediates. This is used by
+ fp moves, that force all constants to memory to allow combining. */
+
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
+ operands[1] = get_pool_constant (XEXP (operands[1], 0));
+ if (push_operand (operands[0], VOIDmode))
+ {
+ operands[0] = copy_rtx (operands[0]);
+ PUT_MODE (operands[0], Pmode);
+ }
+ else
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, operands[1]);
+ emit_move_insn (operands[0], operands[1]);
+ return;
+ }
+
+ /* The only non-offsettable memory we handle is push. */
+ if (push_operand (operands[0], VOIDmode))
+ push = 1;
+ else
+ gcc_assert (GET_CODE (operands[0]) != MEM
+ || offsettable_memref_p (operands[0]));
+
+ nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
+ ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
+
+ /* When emitting push, take care for source operands on the stack. */
+ if (push && GET_CODE (operands[1]) == MEM
+ && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
+ {
+ /* APPLE LOCAL begin 4099768 */
+ if (nparts == 3 && TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
+ part[1][2] = adjust_address (part[1][2], SImode, 4);
+ /* APPLE LOCAL end 4099768 */
+ if (nparts == 3)
+ part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
+ XEXP (part[1][2], 0));
+ part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
+ XEXP (part[1][1], 0));
+ }
+
+ /* We need to do copy in the right order in case an address register
+ of the source overlaps the destination. */
+ if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
+ {
+ if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
+ collisions++;
+ if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
+ collisions++;
+ if (nparts == 3
+ && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
+ collisions++;
+
+ /* Collision in the middle part can be handled by reordering. */
+ if (collisions == 1 && nparts == 3
+ && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
+ {
+ rtx tmp;
+ tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
+ tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
+ }
+
+ /* If there are more collisions, we can't handle it by reordering.
+ Do an lea to the last part and use only one colliding move. */
+ else if (collisions > 1)
+ {
+ rtx base;
+
+ collisions = 1;
+
+ base = part[0][nparts - 1];
+
+ /* Handle the case when the last part isn't valid for lea.
+ Happens in 64-bit mode storing the 12-byte XFmode. */
+ if (GET_MODE (base) != Pmode)
+ base = gen_rtx_REG (Pmode, REGNO (base));
+
+ emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
+ part[1][0] = replace_equiv_address (part[1][0], base);
+ part[1][1] = replace_equiv_address (part[1][1],
+ plus_constant (base, UNITS_PER_WORD));
+ if (nparts == 3)
+ part[1][2] = replace_equiv_address (part[1][2],
+ plus_constant (base, 8));
+ }
+ }
+
+ if (push)
+ {
+ if (!TARGET_64BIT)
+ {
+ if (nparts == 3)
+ {
+ if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
+ emit_move_insn (part[0][2], part[1][2]);
+ }
+ }
+ else
+ {
+ /* In 64bit mode we don't have 32bit push available. In case this is
+ register, it is OK - we will just use larger counterpart. We also
+ retype memory - these comes from attempt to avoid REX prefix on
+ moving of second half of TFmode value. */
+ if (GET_MODE (part[1][1]) == SImode)
+ {
+ switch (GET_CODE (part[1][1]))
+ {
+ case MEM:
+ part[1][1] = adjust_address (part[1][1], DImode, 0);
+ break;
+
+ case REG:
+ part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (GET_MODE (part[1][0]) == SImode)
+ part[1][0] = part[1][1];
+ }
+ }
+ emit_move_insn (part[0][1], part[1][1]);
+ emit_move_insn (part[0][0], part[1][0]);
+ return;
+ }
+
+ /* Choose correct order to not overwrite the source before it is copied. */
+ if ((REG_P (part[0][0])
+ && REG_P (part[1][1])
+ && (REGNO (part[0][0]) == REGNO (part[1][1])
+ || (nparts == 3
+ && REGNO (part[0][0]) == REGNO (part[1][2]))))
+ || (collisions > 0
+ && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
+ {
+ if (nparts == 3)
+ {
+ operands[2] = part[0][2];
+ operands[3] = part[0][1];
+ operands[4] = part[0][0];
+ operands[5] = part[1][2];
+ operands[6] = part[1][1];
+ operands[7] = part[1][0];
+ }
+ else
+ {
+ operands[2] = part[0][1];
+ operands[3] = part[0][0];
+ operands[5] = part[1][1];
+ operands[6] = part[1][0];
+ }
+ }
+ else
+ {
+ if (nparts == 3)
+ {
+ operands[2] = part[0][0];
+ operands[3] = part[0][1];
+ operands[4] = part[0][2];
+ operands[5] = part[1][0];
+ operands[6] = part[1][1];
+ operands[7] = part[1][2];
+ }
+ else
+ {
+ operands[2] = part[0][0];
+ operands[3] = part[0][1];
+ operands[5] = part[1][0];
+ operands[6] = part[1][1];
+ }
+ }
+
+ /* If optimizing for size, attempt to locally unCSE nonzero constants. */
+ if (optimize_size)
+ {
+ if (GET_CODE (operands[5]) == CONST_INT
+ && operands[5] != const0_rtx
+ && REG_P (operands[2]))
+ {
+ if (GET_CODE (operands[6]) == CONST_INT
+ && INTVAL (operands[6]) == INTVAL (operands[5]))
+ operands[6] = operands[2];
+
+ if (nparts == 3
+ && GET_CODE (operands[7]) == CONST_INT
+ && INTVAL (operands[7]) == INTVAL (operands[5]))
+ operands[7] = operands[2];
+ }
+
+ if (nparts == 3
+ && GET_CODE (operands[6]) == CONST_INT
+ && operands[6] != const0_rtx
+ && REG_P (operands[3])
+ && GET_CODE (operands[7]) == CONST_INT
+ && INTVAL (operands[7]) == INTVAL (operands[6]))
+ operands[7] = operands[3];
+ }
+
+ emit_move_insn (operands[2], operands[5]);
+ emit_move_insn (operands[3], operands[6]);
+ if (nparts == 3)
+ emit_move_insn (operands[4], operands[7]);
+
+ return;
+}
+
+/* Helper function of ix86_split_ashl used to generate an SImode/DImode
+ left shift by a constant, either using a single shift or
+ a sequence of add instructions. */
+
+static void
+ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
+{
+ if (count == 1)
+ {
+ emit_insn ((mode == DImode
+ ? gen_addsi3
+ : gen_adddi3) (operand, operand, operand));
+ }
+ else if (!optimize_size
+ && count * ix86_cost->add <= ix86_cost->shift_const)
+ {
+ int i;
+ for (i=0; i<count; i++)
+ {
+ emit_insn ((mode == DImode
+ ? gen_addsi3
+ : gen_adddi3) (operand, operand, operand));
+ }
+ }
+ else
+ emit_insn ((mode == DImode
+ ? gen_ashlsi3
+ : gen_ashldi3) (operand, operand, GEN_INT (count)));
+}
+
+void
+ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
+{
+ rtx low[2], high[2];
+ int count;
+ const int single_width = mode == DImode ? 32 : 64;
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
+ count = INTVAL (operands[2]) & (single_width * 2 - 1);
+
+ if (count >= single_width)
+ {
+ emit_move_insn (high[0], low[1]);
+ emit_move_insn (low[0], const0_rtx);
+
+ if (count > single_width)
+ ix86_expand_ashl_const (high[0], count - single_width, mode);
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ emit_insn ((mode == DImode
+ ? gen_x86_shld_1
+ : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
+ ix86_expand_ashl_const (low[0], count, mode);
+ }
+ return;
+ }
+
+ (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
+
+ if (operands[1] == const1_rtx)
+ {
+ /* Assuming we've chosen a QImode capable registers, then 1 << N
+ can be done with two 32/64-bit shifts, no branches, no cmoves. */
+ if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
+ {
+ rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
+
+ ix86_expand_clear (low[0]);
+ ix86_expand_clear (high[0]);
+ emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
+
+ d = gen_lowpart (QImode, low[0]);
+ d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
+ s = gen_rtx_EQ (QImode, flags, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, d, s));
+
+ d = gen_lowpart (QImode, high[0]);
+ d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
+ s = gen_rtx_NE (QImode, flags, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, d, s));
+ }
+
+ /* Otherwise, we can get the same results by manually performing
+ a bit extract operation on bit 5/6, and then performing the two
+ shifts. The two methods of getting 0/1 into low/high are exactly
+ the same size. Avoiding the shift in the bit extract case helps
+ pentium4 a bit; no one else seems to care much either way. */
+ else
+ {
+ rtx x;
+
+ if (TARGET_PARTIAL_REG_STALL && !optimize_size)
+ x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
+ else
+ x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
+ emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
+
+ emit_insn ((mode == DImode
+ ? gen_lshrsi3
+ : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
+ emit_insn ((mode == DImode
+ ? gen_andsi3
+ : gen_anddi3) (high[0], high[0], GEN_INT (1)));
+ emit_move_insn (low[0], high[0]);
+ emit_insn ((mode == DImode
+ ? gen_xorsi3
+ : gen_xordi3) (low[0], low[0], GEN_INT (1)));
+ }
+
+ emit_insn ((mode == DImode
+ ? gen_ashlsi3
+ : gen_ashldi3) (low[0], low[0], operands[2]));
+ emit_insn ((mode == DImode
+ ? gen_ashlsi3
+ : gen_ashldi3) (high[0], high[0], operands[2]));
+ return;
+ }
+
+ if (operands[1] == constm1_rtx)
+ {
+ /* For -1 << N, we can avoid the shld instruction, because we
+ know that we're shifting 0...31/63 ones into a -1. */
+ emit_move_insn (low[0], constm1_rtx);
+ if (optimize_size)
+ emit_move_insn (high[0], low[0]);
+ else
+ emit_move_insn (high[0], constm1_rtx);
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+
+ (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
+ emit_insn ((mode == DImode
+ ? gen_x86_shld_1
+ : gen_x86_64_shld) (high[0], low[0], operands[2]));
+ }
+
+ emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
+
+ if (TARGET_CMOVE && scratch)
+ {
+ ix86_expand_clear (scratch);
+ emit_insn ((mode == DImode
+ ? gen_x86_shift_adj_1
+ : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
+ }
+ else
+ emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
+}
+
+void
+ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
+{
+ rtx low[2], high[2];
+ int count;
+ const int single_width = mode == DImode ? 32 : 64;
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
+ count = INTVAL (operands[2]) & (single_width * 2 - 1);
+
+ if (count == single_width * 2 - 1)
+ {
+ emit_move_insn (high[0], high[1]);
+ emit_insn ((mode == DImode
+ ? gen_ashrsi3
+ : gen_ashrdi3) (high[0], high[0],
+ GEN_INT (single_width - 1)));
+ emit_move_insn (low[0], high[0]);
+
+ }
+ else if (count >= single_width)
+ {
+ emit_move_insn (low[0], high[1]);
+ emit_move_insn (high[0], low[0]);
+ emit_insn ((mode == DImode
+ ? gen_ashrsi3
+ : gen_ashrdi3) (high[0], high[0],
+ GEN_INT (single_width - 1)));
+ if (count > single_width)
+ emit_insn ((mode == DImode
+ ? gen_ashrsi3
+ : gen_ashrdi3) (low[0], low[0],
+ GEN_INT (count - single_width)));
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ emit_insn ((mode == DImode
+ ? gen_x86_shrd_1
+ : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
+ emit_insn ((mode == DImode
+ ? gen_ashrsi3
+ : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
+ }
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+
+ (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
+
+ emit_insn ((mode == DImode
+ ? gen_x86_shrd_1
+ : gen_x86_64_shrd) (low[0], high[0], operands[2]));
+ emit_insn ((mode == DImode
+ ? gen_ashrsi3
+ : gen_ashrdi3) (high[0], high[0], operands[2]));
+
+ if (TARGET_CMOVE && scratch)
+ {
+ emit_move_insn (scratch, high[0]);
+ emit_insn ((mode == DImode
+ ? gen_ashrsi3
+ : gen_ashrdi3) (scratch, scratch,
+ GEN_INT (single_width - 1)));
+ emit_insn ((mode == DImode
+ ? gen_x86_shift_adj_1
+ : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
+ scratch));
+ }
+ else
+ emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
+ }
+}
+
+void
+ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
+{
+ rtx low[2], high[2];
+ int count;
+ const int single_width = mode == DImode ? 32 : 64;
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
+ count = INTVAL (operands[2]) & (single_width * 2 - 1);
+
+ if (count >= single_width)
+ {
+ emit_move_insn (low[0], high[1]);
+ ix86_expand_clear (high[0]);
+
+ if (count > single_width)
+ emit_insn ((mode == DImode
+ ? gen_lshrsi3
+ : gen_lshrdi3) (low[0], low[0],
+ GEN_INT (count - single_width)));
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ emit_insn ((mode == DImode
+ ? gen_x86_shrd_1
+ : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
+ emit_insn ((mode == DImode
+ ? gen_lshrsi3
+ : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
+ }
+ }
+ else
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+
+ (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
+
+ emit_insn ((mode == DImode
+ ? gen_x86_shrd_1
+ : gen_x86_64_shrd) (low[0], high[0], operands[2]));
+ emit_insn ((mode == DImode
+ ? gen_lshrsi3
+ : gen_lshrdi3) (high[0], high[0], operands[2]));
+
+ /* Heh. By reversing the arguments, we can reuse this pattern. */
+ if (TARGET_CMOVE && scratch)
+ {
+ ix86_expand_clear (scratch);
+ emit_insn ((mode == DImode
+ ? gen_x86_shift_adj_1
+ : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
+ scratch));
+ }
+ else
+ emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
+ }
+}
+
+/* Helper function for the string operations below. Dest VARIABLE whether
+ it is aligned to VALUE bytes. If true, jump to the label. */
+static rtx
+ix86_expand_aligntest (rtx variable, int value)
+{
+ rtx label = gen_label_rtx ();
+ rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
+ if (GET_MODE (variable) == DImode)
+ emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
+ else
+ emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
+ emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
+ 1, label);
+ return label;
+}
+
+/* Adjust COUNTER by the VALUE. */
+static void
+ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
+{
+ if (GET_MODE (countreg) == DImode)
+ emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
+ else
+ emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
+}
+
+/* Zero extend possibly SImode EXP to Pmode register. */
+rtx
+ix86_zero_extend_to_Pmode (rtx exp)
+{
+ rtx r;
+ if (GET_MODE (exp) == VOIDmode)
+ return force_reg (Pmode, exp);
+ if (GET_MODE (exp) == Pmode)
+ return copy_to_mode_reg (Pmode, exp);
+ r = gen_reg_rtx (Pmode);
+ emit_insn (gen_zero_extendsidi2 (r, exp));
+ return r;
+}
+
+/* Expand string move (memcpy) operation. Use i386 string operations when
+ profitable. expand_clrmem contains similar code. */
+int
+ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
+{
+ rtx srcreg, destreg, countreg, srcexp, destexp;
+ enum machine_mode counter_mode;
+ HOST_WIDE_INT align = 0;
+ unsigned HOST_WIDE_INT count = 0;
+
+ if (GET_CODE (align_exp) == CONST_INT)
+ align = INTVAL (align_exp);
+
+ /* Can't use any of this if the user has appropriated esi or edi. */
+ if (global_regs[4] || global_regs[5])
+ return 0;
+
+ /* This simple hack avoids all inlining code and simplifies code below. */
+ if (!TARGET_ALIGN_STRINGOPS)
+ align = 64;
+
+ if (GET_CODE (count_exp) == CONST_INT)
+ {
+ count = INTVAL (count_exp);
+ if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
+ return 0;
+ }
+
+ /* Figure out proper mode for counter. For 32bits it is always SImode,
+ for 64bits use SImode when possible, otherwise DImode.
+ Set count to number of bytes copied when known at compile time. */
+ if (!TARGET_64BIT
+ || GET_MODE (count_exp) == SImode
+ || x86_64_zext_immediate_operand (count_exp, VOIDmode))
+ counter_mode = SImode;
+ else
+ counter_mode = DImode;
+
+ gcc_assert (counter_mode == SImode || counter_mode == DImode);
+
+ destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
+ if (destreg != XEXP (dst, 0))
+ dst = replace_equiv_address_nv (dst, destreg);
+ srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
+ if (srcreg != XEXP (src, 0))
+ src = replace_equiv_address_nv (src, srcreg);
+
+ /* When optimizing for size emit simple rep ; movsb instruction for
+ counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
+ sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
+ Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
+ count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
+ but we don't know whether upper 24 (resp. 56) bits of %ecx will be
+ known to be zero or not. The rep; movsb sequence causes higher
+ register pressure though, so take that into account. */
+
+ if ((!optimize || optimize_size)
+ && (count == 0
+ || ((count & 0x03)
+ && (!optimize_size
+ || count > 5 * 4
+ || (count & 3) + count / 4 > 6))))
+ {
+ emit_insn (gen_cld ());
+ countreg = ix86_zero_extend_to_Pmode (count_exp);
+ destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
+ srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
+ emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
+ destexp, srcexp));
+ }
+
+ /* For constant aligned (or small unaligned) copies use rep movsl
+ followed by code copying the rest. For PentiumPro ensure 8 byte
+ alignment to allow rep movsl acceleration. */
+
+ else if (count != 0
+ && (align >= 8
+ || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
+ || optimize_size || count < (unsigned int) 64))
+ {
+ unsigned HOST_WIDE_INT offset = 0;
+ int size = TARGET_64BIT && !optimize_size ? 8 : 4;
+ rtx srcmem, dstmem;
+
+ emit_insn (gen_cld ());
+ if (count & ~(size - 1))
+ {
+ if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
+ {
+ enum machine_mode movs_mode = size == 4 ? SImode : DImode;
+
+ while (offset < (count & ~(size - 1)))
+ {
+ srcmem = adjust_automodify_address_nv (src, movs_mode,
+ srcreg, offset);
+ dstmem = adjust_automodify_address_nv (dst, movs_mode,
+ destreg, offset);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ offset += size;
+ }
+ }
+ else
+ {
+ countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
+ & (TARGET_64BIT ? -1 : 0x3fffffff));
+ countreg = copy_to_mode_reg (counter_mode, countreg);
+ countreg = ix86_zero_extend_to_Pmode (countreg);
+
+ destexp = gen_rtx_ASHIFT (Pmode, countreg,
+ GEN_INT (size == 4 ? 2 : 3));
+ srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
+ destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
+
+ emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
+ countreg, destexp, srcexp));
+ offset = count & ~(size - 1);
+ }
+ }
+ if (size == 8 && (count & 0x04))
+ {
+ srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
+ offset);
+ dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
+ offset);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ offset += 4;
+ }
+ if (count & 0x02)
+ {
+ srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
+ offset);
+ dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
+ offset);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ offset += 2;
+ }
+ if (count & 0x01)
+ {
+ srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
+ offset);
+ dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
+ offset);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ }
+ }
+ /* The generic code based on the glibc implementation:
+ - align destination to 4 bytes (8 byte alignment is used for PentiumPro
+ allowing accelerated copying there)
+ - copy the data using rep movsl
+ - copy the rest. */
+ else
+ {
+ rtx countreg2;
+ rtx label = NULL;
+ rtx srcmem, dstmem;
+ int desired_alignment = (TARGET_PENTIUMPRO
+ && (count == 0 || count >= (unsigned int) 260)
+ ? 8 : UNITS_PER_WORD);
+ /* Get rid of MEM_OFFSETs, they won't be accurate. */
+ dst = change_address (dst, BLKmode, destreg);
+ src = change_address (src, BLKmode, srcreg);
+
+ /* In case we don't know anything about the alignment, default to
+ library version, since it is usually equally fast and result in
+ shorter code.
+
+ Also emit call when we know that the count is large and call overhead
+ will not be important. */
+ if (!TARGET_INLINE_ALL_STRINGOPS
+ && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
+ return 0;
+
+ if (TARGET_SINGLE_STRINGOP)
+ emit_insn (gen_cld ());
+
+ countreg2 = gen_reg_rtx (Pmode);
+ countreg = copy_to_mode_reg (counter_mode, count_exp);
+
+ /* We don't use loops to align destination and to copy parts smaller
+ than 4 bytes, because gcc is able to optimize such code better (in
+ the case the destination or the count really is aligned, gcc is often
+ able to predict the branches) and also it is friendlier to the
+ hardware branch prediction.
+
+ Using loops is beneficial for generic case, because we can
+ handle small counts using the loops. Many CPUs (such as Athlon)
+ have large REP prefix setup costs.
+
+ This is quite costly. Maybe we can revisit this decision later or
+ add some customizability to this code. */
+
+ if (count == 0 && align < desired_alignment)
+ {
+ label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
+ LEU, 0, counter_mode, 1, label);
+ }
+ if (align <= 1)
+ {
+ rtx label = ix86_expand_aligntest (destreg, 1);
+ srcmem = change_address (src, QImode, srcreg);
+ dstmem = change_address (dst, QImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ ix86_adjust_counter (countreg, 1);
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align <= 2)
+ {
+ rtx label = ix86_expand_aligntest (destreg, 2);
+ srcmem = change_address (src, HImode, srcreg);
+ dstmem = change_address (dst, HImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ ix86_adjust_counter (countreg, 2);
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align <= 4 && desired_alignment > 4)
+ {
+ rtx label = ix86_expand_aligntest (destreg, 4);
+ srcmem = change_address (src, SImode, srcreg);
+ dstmem = change_address (dst, SImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ ix86_adjust_counter (countreg, 4);
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+
+ if (label && desired_alignment > 4 && !TARGET_64BIT)
+ {
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ label = NULL_RTX;
+ }
+ if (!TARGET_SINGLE_STRINGOP)
+ emit_insn (gen_cld ());
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
+ GEN_INT (3)));
+ destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
+ }
+ else
+ {
+ emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
+ destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
+ }
+ srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
+ destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
+ emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
+ countreg2, destexp, srcexp));
+
+ if (label)
+ {
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
+ {
+ srcmem = change_address (src, SImode, srcreg);
+ dstmem = change_address (dst, SImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ }
+ if ((align <= 4 || count == 0) && TARGET_64BIT)
+ {
+ rtx label = ix86_expand_aligntest (countreg, 4);
+ srcmem = change_address (src, SImode, srcreg);
+ dstmem = change_address (dst, SImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align > 2 && count != 0 && (count & 2))
+ {
+ srcmem = change_address (src, HImode, srcreg);
+ dstmem = change_address (dst, HImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ }
+ if (align <= 2 || count == 0)
+ {
+ rtx label = ix86_expand_aligntest (countreg, 2);
+ srcmem = change_address (src, HImode, srcreg);
+ dstmem = change_address (dst, HImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align > 1 && count != 0 && (count & 1))
+ {
+ srcmem = change_address (src, QImode, srcreg);
+ dstmem = change_address (dst, QImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ }
+ if (align <= 1 || count == 0)
+ {
+ rtx label = ix86_expand_aligntest (countreg, 1);
+ srcmem = change_address (src, QImode, srcreg);
+ dstmem = change_address (dst, QImode, destreg);
+ emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ }
+
+ return 1;
+}
+
+/* Expand string clear operation (bzero). Use i386 string operations when
+ profitable. expand_movmem contains similar code. */
+int
+ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
+{
+ rtx destreg, zeroreg, countreg, destexp;
+ enum machine_mode counter_mode;
+ HOST_WIDE_INT align = 0;
+ unsigned HOST_WIDE_INT count = 0;
+
+ if (GET_CODE (align_exp) == CONST_INT)
+ align = INTVAL (align_exp);
+
+ /* Can't use any of this if the user has appropriated esi. */
+ if (global_regs[4])
+ return 0;
+
+ /* This simple hack avoids all inlining code and simplifies code below. */
+ if (!TARGET_ALIGN_STRINGOPS)
+ align = 32;
+
+ if (GET_CODE (count_exp) == CONST_INT)
+ {
+ count = INTVAL (count_exp);
+ if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
+ return 0;
+ }
+ /* Figure out proper mode for counter. For 32bits it is always SImode,
+ for 64bits use SImode when possible, otherwise DImode.
+ Set count to number of bytes copied when known at compile time. */
+ if (!TARGET_64BIT
+ || GET_MODE (count_exp) == SImode
+ || x86_64_zext_immediate_operand (count_exp, VOIDmode))
+ counter_mode = SImode;
+ else
+ counter_mode = DImode;
+
+ destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
+ if (destreg != XEXP (dst, 0))
+ dst = replace_equiv_address_nv (dst, destreg);
+
+
+ /* When optimizing for size emit simple rep ; movsb instruction for
+ counts not divisible by 4. The movl $N, %ecx; rep; stosb
+ sequence is 7 bytes long, so if optimizing for size and count is
+ small enough that some stosl, stosw and stosb instructions without
+ rep are shorter, fall back into the next if. */
+
+ if ((!optimize || optimize_size)
+ && (count == 0
+ || ((count & 0x03)
+ && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
+ {
+ emit_insn (gen_cld ());
+
+ countreg = ix86_zero_extend_to_Pmode (count_exp);
+ zeroreg = copy_to_mode_reg (QImode, const0_rtx);
+ destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
+ emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
+ }
+ else if (count != 0
+ && (align >= 8
+ || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
+ || optimize_size || count < (unsigned int) 64))
+ {
+ int size = TARGET_64BIT && !optimize_size ? 8 : 4;
+ unsigned HOST_WIDE_INT offset = 0;
+
+ emit_insn (gen_cld ());
+
+ zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
+ if (count & ~(size - 1))
+ {
+ unsigned HOST_WIDE_INT repcount;
+ unsigned int max_nonrep;
+
+ repcount = count >> (size == 4 ? 2 : 3);
+ if (!TARGET_64BIT)
+ repcount &= 0x3fffffff;
+
+ /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
+ movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
+ bytes. In both cases the latter seems to be faster for small
+ values of N. */
+ max_nonrep = size == 4 ? 7 : 4;
+ if (!optimize_size)
+ switch (ix86_tune)
+ {
+ case PROCESSOR_PENTIUM4:
+ case PROCESSOR_NOCONA:
+ max_nonrep = 3;
+ break;
+ default:
+ break;
+ }
+
+ if (repcount <= max_nonrep)
+ while (repcount-- > 0)
+ {
+ rtx mem = adjust_automodify_address_nv (dst,
+ GET_MODE (zeroreg),
+ destreg, offset);
+ emit_insn (gen_strset (destreg, mem, zeroreg));
+ offset += size;
+ }
+ else
+ {
+ countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
+ countreg = ix86_zero_extend_to_Pmode (countreg);
+ destexp = gen_rtx_ASHIFT (Pmode, countreg,
+ GEN_INT (size == 4 ? 2 : 3));
+ destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
+ emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
+ destexp));
+ offset = count & ~(size - 1);
+ }
+ }
+ if (size == 8 && (count & 0x04))
+ {
+ rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
+ offset);
+ emit_insn (gen_strset (destreg, mem,
+ gen_rtx_SUBREG (SImode, zeroreg, 0)));
+ offset += 4;
+ }
+ if (count & 0x02)
+ {
+ rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
+ offset);
+ emit_insn (gen_strset (destreg, mem,
+ gen_rtx_SUBREG (HImode, zeroreg, 0)));
+ offset += 2;
+ }
+ if (count & 0x01)
+ {
+ rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
+ offset);
+ emit_insn (gen_strset (destreg, mem,
+ gen_rtx_SUBREG (QImode, zeroreg, 0)));
+ }
+ }
+ else
+ {
+ rtx countreg2;
+ rtx label = NULL;
+ /* Compute desired alignment of the string operation. */
+ int desired_alignment = (TARGET_PENTIUMPRO
+ && (count == 0 || count >= (unsigned int) 260)
+ ? 8 : UNITS_PER_WORD);
+
+ /* In case we don't know anything about the alignment, default to
+ library version, since it is usually equally fast and result in
+ shorter code.
+
+ Also emit call when we know that the count is large and call overhead
+ will not be important. */
+ if (!TARGET_INLINE_ALL_STRINGOPS
+ && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
+ return 0;
+
+ if (TARGET_SINGLE_STRINGOP)
+ emit_insn (gen_cld ());
+
+ countreg2 = gen_reg_rtx (Pmode);
+ countreg = copy_to_mode_reg (counter_mode, count_exp);
+ zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
+ /* Get rid of MEM_OFFSET, it won't be accurate. */
+ dst = change_address (dst, BLKmode, destreg);
+
+ if (count == 0 && align < desired_alignment)
+ {
+ label = gen_label_rtx ();
+ emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
+ LEU, 0, counter_mode, 1, label);
+ }
+ if (align <= 1)
+ {
+ rtx label = ix86_expand_aligntest (destreg, 1);
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (QImode, zeroreg, 0)));
+ ix86_adjust_counter (countreg, 1);
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align <= 2)
+ {
+ rtx label = ix86_expand_aligntest (destreg, 2);
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (HImode, zeroreg, 0)));
+ ix86_adjust_counter (countreg, 2);
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align <= 4 && desired_alignment > 4)
+ {
+ rtx label = ix86_expand_aligntest (destreg, 4);
+ emit_insn (gen_strset (destreg, dst,
+ (TARGET_64BIT
+ ? gen_rtx_SUBREG (SImode, zeroreg, 0)
+ : zeroreg)));
+ ix86_adjust_counter (countreg, 4);
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+
+ if (label && desired_alignment > 4 && !TARGET_64BIT)
+ {
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ label = NULL_RTX;
+ }
+
+ if (!TARGET_SINGLE_STRINGOP)
+ emit_insn (gen_cld ());
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
+ GEN_INT (3)));
+ destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
+ }
+ else
+ {
+ emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
+ destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
+ }
+ destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
+ emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
+
+ if (label)
+ {
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+
+ if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (SImode, zeroreg, 0)));
+ if (TARGET_64BIT && (align <= 4 || count == 0))
+ {
+ rtx label = ix86_expand_aligntest (countreg, 4);
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (SImode, zeroreg, 0)));
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align > 2 && count != 0 && (count & 2))
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (HImode, zeroreg, 0)));
+ if (align <= 2 || count == 0)
+ {
+ rtx label = ix86_expand_aligntest (countreg, 2);
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (HImode, zeroreg, 0)));
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ if (align > 1 && count != 0 && (count & 1))
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (QImode, zeroreg, 0)));
+ if (align <= 1 || count == 0)
+ {
+ rtx label = ix86_expand_aligntest (countreg, 1);
+ emit_insn (gen_strset (destreg, dst,
+ gen_rtx_SUBREG (QImode, zeroreg, 0)));
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+ }
+ }
+ return 1;
+}
+
+/* Expand strlen. */
+int
+ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
+{
+ rtx addr, scratch1, scratch2, scratch3, scratch4;
+
+ /* The generic case of strlen expander is long. Avoid it's
+ expanding unless TARGET_INLINE_ALL_STRINGOPS. */
+
+ if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
+ && !TARGET_INLINE_ALL_STRINGOPS
+ && !optimize_size
+ && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
+ return 0;
+
+ addr = force_reg (Pmode, XEXP (src, 0));
+ scratch1 = gen_reg_rtx (Pmode);
+
+ if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
+ && !optimize_size)
+ {
+ /* Well it seems that some optimizer does not combine a call like
+ foo(strlen(bar), strlen(bar));
+ when the move and the subtraction is done here. It does calculate
+ the length just once when these instructions are done inside of
+ output_strlen_unroll(). But I think since &bar[strlen(bar)] is
+ often used and I use one fewer register for the lifetime of
+ output_strlen_unroll() this is better. */
+
+ emit_move_insn (out, addr);
+
+ ix86_expand_strlensi_unroll_1 (out, src, align);
+
+ /* strlensi_unroll_1 returns the address of the zero at the end of
+ the string, like memchr(), so compute the length by subtracting
+ the start address. */
+ if (TARGET_64BIT)
+ emit_insn (gen_subdi3 (out, out, addr));
+ else
+ emit_insn (gen_subsi3 (out, out, addr));
+ }
+ else
+ {
+ rtx unspec;
+ scratch2 = gen_reg_rtx (Pmode);
+ scratch3 = gen_reg_rtx (Pmode);
+ scratch4 = force_reg (Pmode, constm1_rtx);
+
+ emit_move_insn (scratch3, addr);
+ eoschar = force_reg (QImode, eoschar);
+
+ emit_insn (gen_cld ());
+ src = replace_equiv_address_nv (src, scratch3);
+
+ /* If .md starts supporting :P, this can be done in .md. */
+ unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
+ scratch4), UNSPEC_SCAS);
+ emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
+ emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
+ }
+ else
+ {
+ emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
+ emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
+ }
+ }
+ return 1;
+}
+
+/* Expand the appropriate insns for doing strlen if not just doing
+ repnz; scasb
+
+ out = result, initialized with the start address
+ align_rtx = alignment of the address.
+ scratch = scratch register, initialized with the startaddress when
+ not aligned, otherwise undefined
+
+ This is just the body. It needs the initializations mentioned above and
+ some address computing at the end. These things are done in i386.md. */
+
+static void
+ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
+{
+ int align;
+ rtx tmp;
+ rtx align_2_label = NULL_RTX;
+ rtx align_3_label = NULL_RTX;
+ rtx align_4_label = gen_label_rtx ();
+ rtx end_0_label = gen_label_rtx ();
+ rtx mem;
+ rtx tmpreg = gen_reg_rtx (SImode);
+ rtx scratch = gen_reg_rtx (SImode);
+ rtx cmp;
+
+ align = 0;
+ if (GET_CODE (align_rtx) == CONST_INT)
+ align = INTVAL (align_rtx);
+
+ /* Loop to check 1..3 bytes for null to get an aligned pointer. */
+
+ /* Is there a known alignment and is it less than 4? */
+ if (align < 4)
+ {
+ rtx scratch1 = gen_reg_rtx (Pmode);
+ emit_move_insn (scratch1, out);
+ /* Is there a known alignment and is it not 2? */
+ if (align != 2)
+ {
+ align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
+ align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
+
+ /* Leave just the 3 lower bits. */
+ align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
+ NULL_RTX, 0, OPTAB_WIDEN);
+
+ emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
+ Pmode, 1, align_4_label);
+ emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
+ Pmode, 1, align_2_label);
+ emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
+ Pmode, 1, align_3_label);
+ }
+ else
+ {
+ /* Since the alignment is 2, we have to check 2 or 0 bytes;
+ check if is aligned to 4 - byte. */
+
+ align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
+ NULL_RTX, 0, OPTAB_WIDEN);
+
+ emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
+ Pmode, 1, align_4_label);
+ }
+
+ mem = change_address (src, QImode, out);
+
+ /* Now compare the bytes. */
+
+ /* Compare the first n unaligned byte on a byte per byte basis. */
+ emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
+ QImode, 1, end_0_label);
+
+ /* Increment the address. */
+ if (TARGET_64BIT)
+ emit_insn (gen_adddi3 (out, out, const1_rtx));
+ else
+ emit_insn (gen_addsi3 (out, out, const1_rtx));
+
+ /* Not needed with an alignment of 2 */
+ if (align != 2)
+ {
+ emit_label (align_2_label);
+
+ emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
+ end_0_label);
+
+ if (TARGET_64BIT)
+ emit_insn (gen_adddi3 (out, out, const1_rtx));
+ else
+ emit_insn (gen_addsi3 (out, out, const1_rtx));
+
+ emit_label (align_3_label);
+ }
+
+ emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
+ end_0_label);
+
+ if (TARGET_64BIT)
+ emit_insn (gen_adddi3 (out, out, const1_rtx));
+ else
+ emit_insn (gen_addsi3 (out, out, const1_rtx));
+ }
+
+ /* Generate loop to check 4 bytes at a time. It is not a good idea to
+ align this loop. It gives only huge programs, but does not help to
+ speed up. */
+ emit_label (align_4_label);
+
+ mem = change_address (src, SImode, out);
+ emit_move_insn (scratch, mem);
+ if (TARGET_64BIT)
+ emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
+ else
+ emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
+
+ /* This formula yields a nonzero result iff one of the bytes is zero.
+ This saves three branches inside loop and many cycles. */
+
+ emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
+ emit_insn (gen_one_cmplsi2 (scratch, scratch));
+ emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
+ emit_insn (gen_andsi3 (tmpreg, tmpreg,
+ gen_int_mode (0x80808080, SImode)));
+ emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
+ align_4_label);
+
+ if (TARGET_CMOVE)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx reg2 = gen_reg_rtx (Pmode);
+ emit_move_insn (reg, tmpreg);
+ emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
+
+ /* If zero is not in the first two bytes, move two bytes forward. */
+ emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
+ tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
+ tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
+ gen_rtx_IF_THEN_ELSE (SImode, tmp,
+ reg,
+ tmpreg)));
+ /* Emit lea manually to avoid clobbering of flags. */
+ emit_insn (gen_rtx_SET (SImode, reg2,
+ gen_rtx_PLUS (Pmode, out, const2_rtx)));
+
+ tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
+ tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, out,
+ gen_rtx_IF_THEN_ELSE (Pmode, tmp,
+ reg2,
+ out)));
+
+ }
+ else
+ {
+ rtx end_2_label = gen_label_rtx ();
+ /* Is zero in the first two bytes? */
+
+ emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
+ tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
+ tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
+ gen_rtx_LABEL_REF (VOIDmode, end_2_label),
+ pc_rtx);
+ tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
+ JUMP_LABEL (tmp) = end_2_label;
+
+ /* Not in the first two. Move two bytes forward. */
+ emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
+ if (TARGET_64BIT)
+ emit_insn (gen_adddi3 (out, out, const2_rtx));
+ else
+ emit_insn (gen_addsi3 (out, out, const2_rtx));
+
+ emit_label (end_2_label);
+
+ }
+
+ /* Avoid branch in fixing the byte. */
+ tmpreg = gen_lowpart (QImode, tmpreg);
+ emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
+ cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
+ if (TARGET_64BIT)
+ emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
+ else
+ emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
+
+ emit_label (end_0_label);
+}
+
+void
+ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
+ rtx callarg2 ATTRIBUTE_UNUSED,
+ rtx pop, int sibcall)
+{
+ rtx use = NULL, call;
+
+ if (pop == const0_rtx)
+ pop = NULL;
+ gcc_assert (!TARGET_64BIT || !pop);
+
+ if (TARGET_MACHO && !TARGET_64BIT)
+ {
+#if TARGET_MACHO
+ if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
+ fnaddr = machopic_indirect_call_target (fnaddr);
+#endif
+ }
+ else
+ {
+ /* Static functions and indirect calls don't need the pic register. */
+ if (! TARGET_64BIT && flag_pic
+ && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
+ && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
+ use_reg (&use, pic_offset_table_rtx);
+ }
+
+ if (TARGET_64BIT && INTVAL (callarg2) >= 0)
+ {
+ rtx al = gen_rtx_REG (QImode, 0);
+ emit_move_insn (al, callarg2);
+ use_reg (&use, al);
+ }
+
+ if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
+ {
+ fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
+ fnaddr = gen_rtx_MEM (QImode, fnaddr);
+ }
+ if (sibcall && TARGET_64BIT
+ && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
+ {
+ rtx addr;
+ addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
+ fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
+ emit_move_insn (fnaddr, addr);
+ fnaddr = gen_rtx_MEM (QImode, fnaddr);
+ }
+
+ call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
+ if (retval)
+ call = gen_rtx_SET (VOIDmode, retval, call);
+ if (pop)
+ {
+ pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
+ pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
+ call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
+ }
+
+ call = emit_call_insn (call);
+ if (use)
+ CALL_INSN_FUNCTION_USAGE (call) = use;
+}
+
+
+/* Clear stack slot assignments remembered from previous functions.
+ This is called from INIT_EXPANDERS once before RTL is emitted for each
+ function. */
+
+static struct machine_function *
+ix86_init_machine_status (void)
+{
+ struct machine_function *f;
+
+ f = ggc_alloc_cleared (sizeof (struct machine_function));
+ f->use_fast_prologue_epilogue_nregs = -1;
+ f->tls_descriptor_call_expanded_p = 0;
+
+ return f;
+}
+
+/* Return a MEM corresponding to a stack slot with mode MODE.
+ Allocate a new slot if necessary.
+
+ The RTL for a function can have several slots available: N is
+ which slot to use. */
+
+rtx
+assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
+{
+ struct stack_local_entry *s;
+
+ gcc_assert (n < MAX_386_STACK_LOCALS);
+
+ /* Virtual slot is valid only before vregs are instantiated. */
+ gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
+
+ for (s = ix86_stack_locals; s; s = s->next)
+ if (s->mode == mode && s->n == n)
+ return s->rtl;
+
+ s = (struct stack_local_entry *)
+ ggc_alloc (sizeof (struct stack_local_entry));
+ s->n = n;
+ s->mode = mode;
+ s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+
+ s->next = ix86_stack_locals;
+ ix86_stack_locals = s;
+ return s->rtl;
+}
+
+/* Construct the SYMBOL_REF for the tls_get_addr function. */
+
+static GTY(()) rtx ix86_tls_symbol;
+rtx
+ix86_tls_get_addr (void)
+{
+
+ if (!ix86_tls_symbol)
+ {
+ ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
+ (TARGET_ANY_GNU_TLS
+ && !TARGET_64BIT)
+ ? "___tls_get_addr"
+ : "__tls_get_addr");
+ }
+
+ return ix86_tls_symbol;
+}
+
+/* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
+
+static GTY(()) rtx ix86_tls_module_base_symbol;
+rtx
+ix86_tls_module_base (void)
+{
+
+ if (!ix86_tls_module_base_symbol)
+ {
+ ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
+ "_TLS_MODULE_BASE_");
+ SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
+ |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
+ }
+
+ return ix86_tls_module_base_symbol;
+}
+
+/* Calculate the length of the memory address in the instruction
+ encoding. Does not include the one-byte modrm, opcode, or prefix. */
+
+int
+memory_address_length (rtx addr)
+{
+ struct ix86_address parts;
+ rtx base, index, disp;
+ int len;
+ int ok;
+
+ if (GET_CODE (addr) == PRE_DEC
+ || GET_CODE (addr) == POST_INC
+ || GET_CODE (addr) == PRE_MODIFY
+ || GET_CODE (addr) == POST_MODIFY)
+ return 0;
+
+ ok = ix86_decompose_address (addr, &parts);
+ gcc_assert (ok);
+
+ if (parts.base && GET_CODE (parts.base) == SUBREG)
+ parts.base = SUBREG_REG (parts.base);
+ if (parts.index && GET_CODE (parts.index) == SUBREG)
+ parts.index = SUBREG_REG (parts.index);
+
+ base = parts.base;
+ index = parts.index;
+ disp = parts.disp;
+ len = 0;
+
+ /* Rule of thumb:
+ - esp as the base always wants an index,
+ - ebp as the base always wants a displacement. */
+
+ /* Register Indirect. */
+ if (base && !index && !disp)
+ {
+ /* esp (for its index) and ebp (for its displacement) need
+ the two-byte modrm form. */
+ if (addr == stack_pointer_rtx
+ || addr == arg_pointer_rtx
+ || addr == frame_pointer_rtx
+ || addr == hard_frame_pointer_rtx)
+ len = 1;
+ }
+
+ /* Direct Addressing. */
+ else if (disp && !base && !index)
+ len = 4;
+
+ else
+ {
+ /* Find the length of the displacement constant. */
+ if (disp)
+ {
+ if (base && satisfies_constraint_K (disp))
+ len = 1;
+ else
+ len = 4;
+ }
+ /* ebp always wants a displacement. */
+ else if (base == hard_frame_pointer_rtx)
+ len = 1;
+
+ /* An index requires the two-byte modrm form.... */
+ if (index
+ /* ...like esp, which always wants an index. */
+ || base == stack_pointer_rtx
+ || base == arg_pointer_rtx
+ || base == frame_pointer_rtx)
+ len += 1;
+ }
+
+ return len;
+}
+
+/* Compute default value for "length_immediate" attribute. When SHORTFORM
+ is set, expect that insn have 8bit immediate alternative. */
+int
+ix86_attr_length_immediate_default (rtx insn, int shortform)
+{
+ int len = 0;
+ int i;
+ extract_insn_cached (insn);
+ for (i = recog_data.n_operands - 1; i >= 0; --i)
+ if (CONSTANT_P (recog_data.operand[i]))
+ {
+ gcc_assert (!len);
+ if (shortform && satisfies_constraint_K (recog_data.operand[i]))
+ len = 1;
+ else
+ {
+ switch (get_attr_mode (insn))
+ {
+ case MODE_QI:
+ len+=1;
+ break;
+ case MODE_HI:
+ len+=2;
+ break;
+ case MODE_SI:
+ len+=4;
+ break;
+ /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
+ case MODE_DI:
+ len+=4;
+ break;
+ default:
+ fatal_insn ("unknown insn mode", insn);
+ }
+ }
+ }
+ return len;
+}
+/* Compute default value for "length_address" attribute. */
+int
+ix86_attr_length_address_default (rtx insn)
+{
+ int i;
+
+ if (get_attr_type (insn) == TYPE_LEA)
+ {
+ rtx set = PATTERN (insn);
+
+ if (GET_CODE (set) == PARALLEL)
+ set = XVECEXP (set, 0, 0);
+
+ gcc_assert (GET_CODE (set) == SET);
+
+ return memory_address_length (SET_SRC (set));
+ }
+
+ extract_insn_cached (insn);
+ for (i = recog_data.n_operands - 1; i >= 0; --i)
+ if (GET_CODE (recog_data.operand[i]) == MEM)
+ {
+ return memory_address_length (XEXP (recog_data.operand[i], 0));
+ break;
+ }
+ return 0;
+}
+
+/* Return the maximum number of instructions a cpu can issue. */
+
+static int
+ix86_issue_rate (void)
+{
+ switch (ix86_tune)
+ {
+ case PROCESSOR_PENTIUM:
+ case PROCESSOR_K6:
+ return 2;
+
+ case PROCESSOR_PENTIUMPRO:
+ case PROCESSOR_PENTIUM4:
+ case PROCESSOR_ATHLON:
+ case PROCESSOR_K8:
+ case PROCESSOR_NOCONA:
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
+ return 3;
+ /* APPLE LOCAL begin mainline */
+ case PROCESSOR_CORE2:
+ return 4;
+ /* APPLE LOCAL end mainline */
+
+ default:
+ return 1;
+ }
+}
+
+/* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
+ by DEP_INSN and nothing set by DEP_INSN. */
+
+static int
+ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
+{
+ rtx set, set2;
+
+ /* Simplify the test for uninteresting insns. */
+ if (insn_type != TYPE_SETCC
+ && insn_type != TYPE_ICMOV
+ && insn_type != TYPE_FCMOV
+ && insn_type != TYPE_IBR)
+ return 0;
+
+ if ((set = single_set (dep_insn)) != 0)
+ {
+ set = SET_DEST (set);
+ set2 = NULL_RTX;
+ }
+ else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
+ && XVECLEN (PATTERN (dep_insn), 0) == 2
+ && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
+ && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
+ {
+ set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
+ set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
+ }
+ else
+ return 0;
+
+ if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
+ return 0;
+
+ /* This test is true if the dependent insn reads the flags but
+ not any other potentially set register. */
+ if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
+ return 0;
+
+ if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
+ return 0;
+
+ return 1;
+}
+
+/* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
+ address with operands set by DEP_INSN. */
+
+static int
+ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
+{
+ rtx addr;
+
+ if (insn_type == TYPE_LEA
+ && TARGET_PENTIUM)
+ {
+ addr = PATTERN (insn);
+
+ if (GET_CODE (addr) == PARALLEL)
+ addr = XVECEXP (addr, 0, 0);
+
+ gcc_assert (GET_CODE (addr) == SET);
+
+ addr = SET_SRC (addr);
+ }
+ else
+ {
+ int i;
+ extract_insn_cached (insn);
+ for (i = recog_data.n_operands - 1; i >= 0; --i)
+ if (GET_CODE (recog_data.operand[i]) == MEM)
+ {
+ addr = XEXP (recog_data.operand[i], 0);
+ goto found;
+ }
+ return 0;
+ found:;
+ }
+
+ return modified_in_p (addr, dep_insn);
+}
+
+static int
+ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ enum attr_type insn_type, dep_insn_type;
+ enum attr_memory memory;
+ rtx set, set2;
+ int dep_insn_code_number;
+
+ /* Anti and output dependencies have zero cost on all CPUs. */
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+
+ dep_insn_code_number = recog_memoized (dep_insn);
+
+ /* If we can't recognize the insns, we can't really do anything. */
+ if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
+ return cost;
+
+ insn_type = get_attr_type (insn);
+ dep_insn_type = get_attr_type (dep_insn);
+
+ switch (ix86_tune)
+ {
+ case PROCESSOR_PENTIUM:
+ /* Address Generation Interlock adds a cycle of latency. */
+ if (ix86_agi_dependent (insn, dep_insn, insn_type))
+ cost += 1;
+
+ /* ??? Compares pair with jump/setcc. */
+ if (ix86_flags_dependent (insn, dep_insn, insn_type))
+ cost = 0;
+
+ /* Floating point stores require value to be ready one cycle earlier. */
+ if (insn_type == TYPE_FMOV
+ && get_attr_memory (insn) == MEMORY_STORE
+ && !ix86_agi_dependent (insn, dep_insn, insn_type))
+ cost += 1;
+ break;
+
+ case PROCESSOR_PENTIUMPRO:
+ memory = get_attr_memory (insn);
+
+ /* INT->FP conversion is expensive. */
+ if (get_attr_fp_int_src (dep_insn))
+ cost += 5;
+
+ /* There is one cycle extra latency between an FP op and a store. */
+ if (insn_type == TYPE_FMOV
+ && (set = single_set (dep_insn)) != NULL_RTX
+ && (set2 = single_set (insn)) != NULL_RTX
+ && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
+ && GET_CODE (SET_DEST (set2)) == MEM)
+ cost += 1;
+
+ /* Show ability of reorder buffer to hide latency of load by executing
+ in parallel with previous instruction in case
+ previous instruction is not needed to compute the address. */
+ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
+ && !ix86_agi_dependent (insn, dep_insn, insn_type))
+ {
+ /* Claim moves to take one cycle, as core can issue one load
+ at time and the next load can start cycle later. */
+ if (dep_insn_type == TYPE_IMOV
+ || dep_insn_type == TYPE_FMOV)
+ cost = 1;
+ else if (cost > 1)
+ cost--;
+ }
+ break;
+
+ case PROCESSOR_K6:
+ memory = get_attr_memory (insn);
+
+ /* The esp dependency is resolved before the instruction is really
+ finished. */
+ if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
+ && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
+ return 1;
+
+ /* INT->FP conversion is expensive. */
+ if (get_attr_fp_int_src (dep_insn))
+ cost += 5;
+
+ /* Show ability of reorder buffer to hide latency of load by executing
+ in parallel with previous instruction in case
+ previous instruction is not needed to compute the address. */
+ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
+ && !ix86_agi_dependent (insn, dep_insn, insn_type))
+ {
+ /* Claim moves to take one cycle, as core can issue one load
+ at time and the next load can start cycle later. */
+ if (dep_insn_type == TYPE_IMOV
+ || dep_insn_type == TYPE_FMOV)
+ cost = 1;
+ else if (cost > 2)
+ cost -= 2;
+ else
+ cost = 1;
+ }
+ break;
+
+ case PROCESSOR_ATHLON:
+ case PROCESSOR_K8:
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
+ memory = get_attr_memory (insn);
+
+ /* Show ability of reorder buffer to hide latency of load by executing
+ in parallel with previous instruction in case
+ previous instruction is not needed to compute the address. */
+ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
+ && !ix86_agi_dependent (insn, dep_insn, insn_type))
+ {
+ enum attr_unit unit = get_attr_unit (insn);
+ int loadcost = 3;
+
+ /* Because of the difference between the length of integer and
+ floating unit pipeline preparation stages, the memory operands
+ for floating point are cheaper.
+
+ ??? For Athlon it the difference is most probably 2. */
+ if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
+ loadcost = 3;
+ else
+ loadcost = TARGET_ATHLON ? 2 : 0;
+
+ if (cost >= loadcost)
+ cost -= loadcost;
+ else
+ cost = 0;
+ }
+
+ default:
+ break;
+ }
+
+ return cost;
+}
+
+/* How many alternative schedules to try. This should be as wide as the
+ scheduling freedom in the DFA, but no wider. Making this value too
+ large results extra work for the scheduler. */
+
+static int
+ia32_multipass_dfa_lookahead (void)
+{
+ if (ix86_tune == PROCESSOR_PENTIUM)
+ return 2;
+
+ if (ix86_tune == PROCESSOR_PENTIUMPRO
+ || ix86_tune == PROCESSOR_K6)
+ return 1;
+
+ else
+ return 0;
+}
+
+
+/* Compute the alignment given to a constant that is being placed in memory.
+ EXP is the constant and ALIGN is the alignment that the object would
+ ordinarily have.
+ The value of this function is used instead of that alignment to align
+ the object. */
+
+int
+ix86_constant_alignment (tree exp, int align)
+{
+ if (TREE_CODE (exp) == REAL_CST)
+ {
+ if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
+ return 64;
+ else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
+ return 128;
+ }
+ else if (!optimize_size && TREE_CODE (exp) == STRING_CST
+ && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
+ return BITS_PER_WORD;
+
+/* APPLE LOCAL begin 4090661 */
+#if TARGET_MACHO
+ /* Without this, static arrays initialized to strings get aligned
+ to 32 bytes. These go in cstring, so would result in a lot of extra
+ padding in files with a couple of small strings. 4090661. */
+ else if (TREE_CODE (exp) == STRING_CST)
+ {
+ if (TREE_STRING_LENGTH (exp) >= 31 && !optimize_size)
+ return BITS_PER_WORD;
+ else
+ return 8;
+ }
+#endif
+/* APPLE LOCAL end 4090661 */
+ return align;
+}
+
+/* Compute the alignment for a static variable.
+ TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this function is used
+ instead of that alignment to align the object. */
+
+int
+ix86_data_alignment (tree type, int align)
+{
+ int max_align = optimize_size ? BITS_PER_WORD : 256;
+
+ if (AGGREGATE_TYPE_P (type)
+ && TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
+ || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
+ && align < max_align)
+ align = max_align;
+
+ /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
+ to 16byte boundary. */
+ if (TARGET_64BIT)
+ {
+ if (AGGREGATE_TYPE_P (type)
+ && TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
+ || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
+ return 128;
+ }
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
+ return 64;
+ if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
+ return 128;
+ }
+ else if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+
+ if (TYPE_MODE (type) == DCmode && align < 64)
+ return 64;
+ if (TYPE_MODE (type) == XCmode && align < 128)
+ return 128;
+ }
+ else if ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && TYPE_FIELDS (type))
+ {
+ if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
+ return 64;
+ if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
+ return 128;
+ }
+ else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
+ || TREE_CODE (type) == INTEGER_TYPE)
+ {
+ if (TYPE_MODE (type) == DFmode && align < 64)
+ return 64;
+ if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
+ return 128;
+ }
+
+ return align;
+}
+
+/* Compute the alignment for a local variable.
+ TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object. */
+
+int
+ix86_local_alignment (tree type, int align)
+{
+ /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
+ to 16byte boundary. */
+ if (TARGET_64BIT)
+ {
+ if (AGGREGATE_TYPE_P (type)
+ && TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
+ || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
+ return 128;
+ }
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
+ return 64;
+ if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
+ return 128;
+ }
+ else if (TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ if (TYPE_MODE (type) == DCmode && align < 64)
+ return 64;
+ if (TYPE_MODE (type) == XCmode && align < 128)
+ return 128;
+ }
+ else if ((TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ && TYPE_FIELDS (type))
+ {
+ if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
+ return 64;
+ if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
+ return 128;
+ }
+ else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
+ || TREE_CODE (type) == INTEGER_TYPE)
+ {
+
+ if (TYPE_MODE (type) == DFmode && align < 64)
+ return 64;
+ if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
+ return 128;
+ }
+ return align;
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+void
+x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
+{
+ if (!TARGET_64BIT)
+ {
+ /* Compute offset from the end of the jmp to the target function. */
+ rtx disp = expand_binop (SImode, sub_optab, fnaddr,
+ plus_constant (tramp, 10),
+ NULL_RTX, 1, OPTAB_DIRECT);
+ emit_move_insn (gen_rtx_MEM (QImode, tramp),
+ gen_int_mode (0xb9, QImode));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
+ emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
+ gen_int_mode (0xe9, QImode));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
+ }
+ else
+ {
+ int offset = 0;
+ /* Try to load address using shorter movl instead of movabs.
+ We may want to support movq for kernel mode, but kernel does not use
+ trampolines at the moment. */
+ if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
+ {
+ fnaddr = copy_to_mode_reg (DImode, fnaddr);
+ emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
+ gen_int_mode (0xbb41, HImode));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
+ gen_lowpart (SImode, fnaddr));
+ offset += 6;
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
+ gen_int_mode (0xbb49, HImode));
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
+ fnaddr);
+ offset += 10;
+ }
+ /* Load static chain using movabs to r10. */
+ emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
+ gen_int_mode (0xba49, HImode));
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
+ cxt);
+ offset += 10;
+ /* Jump to the r11 */
+ emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
+ gen_int_mode (0xff49, HImode));
+ emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
+ gen_int_mode (0xe3, QImode));
+ offset += 3;
+ gcc_assert (offset <= TRAMPOLINE_SIZE);
+ }
+
+#ifdef ENABLE_EXECUTE_STACK
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
+ LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
+#endif
+}
+
+/* Codes for all the SSE/MMX builtins. */
+enum ix86_builtins
+{
+ IX86_BUILTIN_ADDPS,
+ IX86_BUILTIN_ADDSS,
+ IX86_BUILTIN_DIVPS,
+ IX86_BUILTIN_DIVSS,
+ IX86_BUILTIN_MULPS,
+ IX86_BUILTIN_MULSS,
+ IX86_BUILTIN_SUBPS,
+ IX86_BUILTIN_SUBSS,
+
+ IX86_BUILTIN_CMPEQPS,
+ IX86_BUILTIN_CMPLTPS,
+ IX86_BUILTIN_CMPLEPS,
+ IX86_BUILTIN_CMPGTPS,
+ IX86_BUILTIN_CMPGEPS,
+ IX86_BUILTIN_CMPNEQPS,
+ IX86_BUILTIN_CMPNLTPS,
+ IX86_BUILTIN_CMPNLEPS,
+ IX86_BUILTIN_CMPNGTPS,
+ IX86_BUILTIN_CMPNGEPS,
+ IX86_BUILTIN_CMPORDPS,
+ IX86_BUILTIN_CMPUNORDPS,
+ IX86_BUILTIN_CMPEQSS,
+ IX86_BUILTIN_CMPLTSS,
+ IX86_BUILTIN_CMPLESS,
+ IX86_BUILTIN_CMPNEQSS,
+ IX86_BUILTIN_CMPNLTSS,
+ IX86_BUILTIN_CMPNLESS,
+ IX86_BUILTIN_CMPNGTSS,
+ IX86_BUILTIN_CMPNGESS,
+ IX86_BUILTIN_CMPORDSS,
+ IX86_BUILTIN_CMPUNORDSS,
+
+ IX86_BUILTIN_COMIEQSS,
+ IX86_BUILTIN_COMILTSS,
+ IX86_BUILTIN_COMILESS,
+ IX86_BUILTIN_COMIGTSS,
+ IX86_BUILTIN_COMIGESS,
+ IX86_BUILTIN_COMINEQSS,
+ IX86_BUILTIN_UCOMIEQSS,
+ IX86_BUILTIN_UCOMILTSS,
+ IX86_BUILTIN_UCOMILESS,
+ IX86_BUILTIN_UCOMIGTSS,
+ IX86_BUILTIN_UCOMIGESS,
+ IX86_BUILTIN_UCOMINEQSS,
+
+ IX86_BUILTIN_CVTPI2PS,
+ IX86_BUILTIN_CVTPS2PI,
+ IX86_BUILTIN_CVTSI2SS,
+ IX86_BUILTIN_CVTSI642SS,
+ IX86_BUILTIN_CVTSS2SI,
+ IX86_BUILTIN_CVTSS2SI64,
+ IX86_BUILTIN_CVTTPS2PI,
+ IX86_BUILTIN_CVTTSS2SI,
+ IX86_BUILTIN_CVTTSS2SI64,
+
+ IX86_BUILTIN_MAXPS,
+ IX86_BUILTIN_MAXSS,
+ IX86_BUILTIN_MINPS,
+ IX86_BUILTIN_MINSS,
+
+ IX86_BUILTIN_LOADUPS,
+ IX86_BUILTIN_STOREUPS,
+ IX86_BUILTIN_MOVSS,
+
+ IX86_BUILTIN_MOVHLPS,
+ IX86_BUILTIN_MOVLHPS,
+ IX86_BUILTIN_LOADHPS,
+ IX86_BUILTIN_LOADLPS,
+ IX86_BUILTIN_STOREHPS,
+ IX86_BUILTIN_STORELPS,
+
+ IX86_BUILTIN_MASKMOVQ,
+ IX86_BUILTIN_MOVMSKPS,
+ IX86_BUILTIN_PMOVMSKB,
+
+ IX86_BUILTIN_MOVNTPS,
+ IX86_BUILTIN_MOVNTQ,
+
+ IX86_BUILTIN_LOADDQU,
+ IX86_BUILTIN_STOREDQU,
+
+ IX86_BUILTIN_PACKSSWB,
+ IX86_BUILTIN_PACKSSDW,
+ IX86_BUILTIN_PACKUSWB,
+
+ IX86_BUILTIN_PADDB,
+ IX86_BUILTIN_PADDW,
+ IX86_BUILTIN_PADDD,
+ IX86_BUILTIN_PADDQ,
+ IX86_BUILTIN_PADDSB,
+ IX86_BUILTIN_PADDSW,
+ IX86_BUILTIN_PADDUSB,
+ IX86_BUILTIN_PADDUSW,
+ IX86_BUILTIN_PSUBB,
+ IX86_BUILTIN_PSUBW,
+ IX86_BUILTIN_PSUBD,
+ IX86_BUILTIN_PSUBQ,
+ IX86_BUILTIN_PSUBSB,
+ IX86_BUILTIN_PSUBSW,
+ IX86_BUILTIN_PSUBUSB,
+ IX86_BUILTIN_PSUBUSW,
+
+ IX86_BUILTIN_PAND,
+ IX86_BUILTIN_PANDN,
+ IX86_BUILTIN_POR,
+ IX86_BUILTIN_PXOR,
+
+ IX86_BUILTIN_PAVGB,
+ IX86_BUILTIN_PAVGW,
+
+ IX86_BUILTIN_PCMPEQB,
+ IX86_BUILTIN_PCMPEQW,
+ IX86_BUILTIN_PCMPEQD,
+ IX86_BUILTIN_PCMPGTB,
+ IX86_BUILTIN_PCMPGTW,
+ IX86_BUILTIN_PCMPGTD,
+
+ IX86_BUILTIN_PMADDWD,
+
+ IX86_BUILTIN_PMAXSW,
+ IX86_BUILTIN_PMAXUB,
+ IX86_BUILTIN_PMINSW,
+ IX86_BUILTIN_PMINUB,
+
+ IX86_BUILTIN_PMULHUW,
+ IX86_BUILTIN_PMULHW,
+ IX86_BUILTIN_PMULLW,
+
+ IX86_BUILTIN_PSADBW,
+ IX86_BUILTIN_PSHUFW,
+
+ IX86_BUILTIN_PSLLW,
+ IX86_BUILTIN_PSLLD,
+ IX86_BUILTIN_PSLLQ,
+ IX86_BUILTIN_PSRAW,
+ IX86_BUILTIN_PSRAD,
+ IX86_BUILTIN_PSRLW,
+ IX86_BUILTIN_PSRLD,
+ IX86_BUILTIN_PSRLQ,
+ IX86_BUILTIN_PSLLWI,
+ IX86_BUILTIN_PSLLDI,
+ IX86_BUILTIN_PSLLQI,
+ IX86_BUILTIN_PSRAWI,
+ IX86_BUILTIN_PSRADI,
+ IX86_BUILTIN_PSRLWI,
+ IX86_BUILTIN_PSRLDI,
+ IX86_BUILTIN_PSRLQI,
+
+ IX86_BUILTIN_PUNPCKHBW,
+ IX86_BUILTIN_PUNPCKHWD,
+ IX86_BUILTIN_PUNPCKHDQ,
+ IX86_BUILTIN_PUNPCKLBW,
+ IX86_BUILTIN_PUNPCKLWD,
+ IX86_BUILTIN_PUNPCKLDQ,
+
+ IX86_BUILTIN_SHUFPS,
+
+ IX86_BUILTIN_RCPPS,
+ IX86_BUILTIN_RCPSS,
+ IX86_BUILTIN_RSQRTPS,
+ IX86_BUILTIN_RSQRTSS,
+ IX86_BUILTIN_SQRTPS,
+ IX86_BUILTIN_SQRTSS,
+
+ IX86_BUILTIN_UNPCKHPS,
+ IX86_BUILTIN_UNPCKLPS,
+
+ IX86_BUILTIN_ANDPS,
+ IX86_BUILTIN_ANDNPS,
+ IX86_BUILTIN_ORPS,
+ IX86_BUILTIN_XORPS,
+
+ IX86_BUILTIN_EMMS,
+ IX86_BUILTIN_LDMXCSR,
+ IX86_BUILTIN_STMXCSR,
+ IX86_BUILTIN_SFENCE,
+
+ /* 3DNow! Original */
+ IX86_BUILTIN_FEMMS,
+ IX86_BUILTIN_PAVGUSB,
+ IX86_BUILTIN_PF2ID,
+ IX86_BUILTIN_PFACC,
+ IX86_BUILTIN_PFADD,
+ IX86_BUILTIN_PFCMPEQ,
+ IX86_BUILTIN_PFCMPGE,
+ IX86_BUILTIN_PFCMPGT,
+ IX86_BUILTIN_PFMAX,
+ IX86_BUILTIN_PFMIN,
+ IX86_BUILTIN_PFMUL,
+ IX86_BUILTIN_PFRCP,
+ IX86_BUILTIN_PFRCPIT1,
+ IX86_BUILTIN_PFRCPIT2,
+ IX86_BUILTIN_PFRSQIT1,
+ IX86_BUILTIN_PFRSQRT,
+ IX86_BUILTIN_PFSUB,
+ IX86_BUILTIN_PFSUBR,
+ IX86_BUILTIN_PI2FD,
+ IX86_BUILTIN_PMULHRW,
+
+ /* 3DNow! Athlon Extensions */
+ IX86_BUILTIN_PF2IW,
+ IX86_BUILTIN_PFNACC,
+ IX86_BUILTIN_PFPNACC,
+ IX86_BUILTIN_PI2FW,
+ IX86_BUILTIN_PSWAPDSI,
+ IX86_BUILTIN_PSWAPDSF,
+
+ /* SSE2 */
+ IX86_BUILTIN_ADDPD,
+ IX86_BUILTIN_ADDSD,
+ IX86_BUILTIN_DIVPD,
+ IX86_BUILTIN_DIVSD,
+ IX86_BUILTIN_MULPD,
+ IX86_BUILTIN_MULSD,
+ IX86_BUILTIN_SUBPD,
+ IX86_BUILTIN_SUBSD,
+
+ IX86_BUILTIN_CMPEQPD,
+ IX86_BUILTIN_CMPLTPD,
+ IX86_BUILTIN_CMPLEPD,
+ IX86_BUILTIN_CMPGTPD,
+ IX86_BUILTIN_CMPGEPD,
+ IX86_BUILTIN_CMPNEQPD,
+ IX86_BUILTIN_CMPNLTPD,
+ IX86_BUILTIN_CMPNLEPD,
+ IX86_BUILTIN_CMPNGTPD,
+ IX86_BUILTIN_CMPNGEPD,
+ IX86_BUILTIN_CMPORDPD,
+ IX86_BUILTIN_CMPUNORDPD,
+ IX86_BUILTIN_CMPNEPD,
+ IX86_BUILTIN_CMPEQSD,
+ IX86_BUILTIN_CMPLTSD,
+ IX86_BUILTIN_CMPLESD,
+ IX86_BUILTIN_CMPNEQSD,
+ IX86_BUILTIN_CMPNLTSD,
+ IX86_BUILTIN_CMPNLESD,
+ IX86_BUILTIN_CMPORDSD,
+ IX86_BUILTIN_CMPUNORDSD,
+ IX86_BUILTIN_CMPNESD,
+
+ IX86_BUILTIN_COMIEQSD,
+ IX86_BUILTIN_COMILTSD,
+ IX86_BUILTIN_COMILESD,
+ IX86_BUILTIN_COMIGTSD,
+ IX86_BUILTIN_COMIGESD,
+ IX86_BUILTIN_COMINEQSD,
+ IX86_BUILTIN_UCOMIEQSD,
+ IX86_BUILTIN_UCOMILTSD,
+ IX86_BUILTIN_UCOMILESD,
+ IX86_BUILTIN_UCOMIGTSD,
+ IX86_BUILTIN_UCOMIGESD,
+ IX86_BUILTIN_UCOMINEQSD,
+
+ IX86_BUILTIN_MAXPD,
+ IX86_BUILTIN_MAXSD,
+ IX86_BUILTIN_MINPD,
+ IX86_BUILTIN_MINSD,
+
+ IX86_BUILTIN_ANDPD,
+ IX86_BUILTIN_ANDNPD,
+ IX86_BUILTIN_ORPD,
+ IX86_BUILTIN_XORPD,
+
+ IX86_BUILTIN_SQRTPD,
+ IX86_BUILTIN_SQRTSD,
+
+ IX86_BUILTIN_UNPCKHPD,
+ IX86_BUILTIN_UNPCKLPD,
+
+ IX86_BUILTIN_SHUFPD,
+
+ IX86_BUILTIN_LOADUPD,
+ IX86_BUILTIN_STOREUPD,
+ IX86_BUILTIN_MOVSD,
+
+ IX86_BUILTIN_LOADHPD,
+ IX86_BUILTIN_LOADLPD,
+
+ IX86_BUILTIN_CVTDQ2PD,
+ IX86_BUILTIN_CVTDQ2PS,
+
+ IX86_BUILTIN_CVTPD2DQ,
+ IX86_BUILTIN_CVTPD2PI,
+ IX86_BUILTIN_CVTPD2PS,
+ IX86_BUILTIN_CVTTPD2DQ,
+ IX86_BUILTIN_CVTTPD2PI,
+
+ IX86_BUILTIN_CVTPI2PD,
+ IX86_BUILTIN_CVTSI2SD,
+ IX86_BUILTIN_CVTSI642SD,
+
+ IX86_BUILTIN_CVTSD2SI,
+ IX86_BUILTIN_CVTSD2SI64,
+ IX86_BUILTIN_CVTSD2SS,
+ IX86_BUILTIN_CVTSS2SD,
+ IX86_BUILTIN_CVTTSD2SI,
+ IX86_BUILTIN_CVTTSD2SI64,
+
+ IX86_BUILTIN_CVTPS2DQ,
+ IX86_BUILTIN_CVTPS2PD,
+ IX86_BUILTIN_CVTTPS2DQ,
+
+ IX86_BUILTIN_MOVNTI,
+ IX86_BUILTIN_MOVNTPD,
+ IX86_BUILTIN_MOVNTDQ,
+
+ /* SSE2 MMX */
+ IX86_BUILTIN_MASKMOVDQU,
+ IX86_BUILTIN_MOVMSKPD,
+ IX86_BUILTIN_PMOVMSKB128,
+
+ /* APPLE LOCAL begin 4099020 */
+ IX86_BUILTIN_MOVQ,
+ IX86_BUILTIN_LOADQ,
+ IX86_BUILTIN_STOREQ,
+ /* APPLE LOCAL end 4099020 */
+
+ IX86_BUILTIN_PACKSSWB128,
+ IX86_BUILTIN_PACKSSDW128,
+ IX86_BUILTIN_PACKUSWB128,
+
+ IX86_BUILTIN_PADDB128,
+ IX86_BUILTIN_PADDW128,
+ IX86_BUILTIN_PADDD128,
+ IX86_BUILTIN_PADDQ128,
+ IX86_BUILTIN_PADDSB128,
+ IX86_BUILTIN_PADDSW128,
+ IX86_BUILTIN_PADDUSB128,
+ IX86_BUILTIN_PADDUSW128,
+ IX86_BUILTIN_PSUBB128,
+ IX86_BUILTIN_PSUBW128,
+ IX86_BUILTIN_PSUBD128,
+ IX86_BUILTIN_PSUBQ128,
+ IX86_BUILTIN_PSUBSB128,
+ IX86_BUILTIN_PSUBSW128,
+ IX86_BUILTIN_PSUBUSB128,
+ IX86_BUILTIN_PSUBUSW128,
+
+ IX86_BUILTIN_PAND128,
+ IX86_BUILTIN_PANDN128,
+ IX86_BUILTIN_POR128,
+ IX86_BUILTIN_PXOR128,
+
+ IX86_BUILTIN_PAVGB128,
+ IX86_BUILTIN_PAVGW128,
+
+ IX86_BUILTIN_PCMPEQB128,
+ IX86_BUILTIN_PCMPEQW128,
+ IX86_BUILTIN_PCMPEQD128,
+ IX86_BUILTIN_PCMPGTB128,
+ IX86_BUILTIN_PCMPGTW128,
+ IX86_BUILTIN_PCMPGTD128,
+
+ IX86_BUILTIN_PMADDWD128,
+
+ IX86_BUILTIN_PMAXSW128,
+ IX86_BUILTIN_PMAXUB128,
+ IX86_BUILTIN_PMINSW128,
+ IX86_BUILTIN_PMINUB128,
+
+ IX86_BUILTIN_PMULUDQ,
+ IX86_BUILTIN_PMULUDQ128,
+ IX86_BUILTIN_PMULHUW128,
+ IX86_BUILTIN_PMULHW128,
+ IX86_BUILTIN_PMULLW128,
+
+ IX86_BUILTIN_PSADBW128,
+ IX86_BUILTIN_PSHUFHW,
+ IX86_BUILTIN_PSHUFLW,
+ IX86_BUILTIN_PSHUFD,
+
+ IX86_BUILTIN_PSLLW128,
+ IX86_BUILTIN_PSLLD128,
+ IX86_BUILTIN_PSLLQ128,
+ IX86_BUILTIN_PSRAW128,
+ IX86_BUILTIN_PSRAD128,
+ IX86_BUILTIN_PSRLW128,
+ IX86_BUILTIN_PSRLD128,
+ IX86_BUILTIN_PSRLQ128,
+ IX86_BUILTIN_PSLLDQI128,
+ /* APPLE LOCAL 591583 */
+ IX86_BUILTIN_PSLLDQI128_BYTESHIFT,
+ IX86_BUILTIN_PSLLWI128,
+ IX86_BUILTIN_PSLLDI128,
+ IX86_BUILTIN_PSLLQI128,
+ IX86_BUILTIN_PSRAWI128,
+ IX86_BUILTIN_PSRADI128,
+ IX86_BUILTIN_PSRLDQI128,
+ /* APPLE LOCAL 591583 */
+ IX86_BUILTIN_PSRLDQI128_BYTESHIFT,
+ IX86_BUILTIN_PSRLWI128,
+ IX86_BUILTIN_PSRLDI128,
+ IX86_BUILTIN_PSRLQI128,
+
+ IX86_BUILTIN_PUNPCKHBW128,
+ IX86_BUILTIN_PUNPCKHWD128,
+ IX86_BUILTIN_PUNPCKHDQ128,
+ IX86_BUILTIN_PUNPCKHQDQ128,
+ IX86_BUILTIN_PUNPCKLBW128,
+ IX86_BUILTIN_PUNPCKLWD128,
+ IX86_BUILTIN_PUNPCKLDQ128,
+ IX86_BUILTIN_PUNPCKLQDQ128,
+
+ IX86_BUILTIN_CLFLUSH,
+ IX86_BUILTIN_MFENCE,
+ IX86_BUILTIN_LFENCE,
+
+ /* Prescott New Instructions. */
+ IX86_BUILTIN_ADDSUBPS,
+ IX86_BUILTIN_HADDPS,
+ IX86_BUILTIN_HSUBPS,
+ IX86_BUILTIN_MOVSHDUP,
+ IX86_BUILTIN_MOVSLDUP,
+ IX86_BUILTIN_ADDSUBPD,
+ IX86_BUILTIN_HADDPD,
+ IX86_BUILTIN_HSUBPD,
+ IX86_BUILTIN_LDDQU,
+
+ IX86_BUILTIN_MONITOR,
+ IX86_BUILTIN_MWAIT,
+ /* APPLE LOCAL begin mainline */
+ /* Merom New Instructions. */
+ IX86_BUILTIN_PHADDW,
+ IX86_BUILTIN_PHADDD,
+ IX86_BUILTIN_PHADDSW,
+ IX86_BUILTIN_PHSUBW,
+ IX86_BUILTIN_PHSUBD,
+ IX86_BUILTIN_PHSUBSW,
+ IX86_BUILTIN_PMADDUBSW,
+ IX86_BUILTIN_PMULHRSW,
+ IX86_BUILTIN_PSHUFB,
+ IX86_BUILTIN_PSIGNB,
+ IX86_BUILTIN_PSIGNW,
+ IX86_BUILTIN_PSIGND,
+ IX86_BUILTIN_PALIGNR,
+ IX86_BUILTIN_PABSB,
+ IX86_BUILTIN_PABSW,
+ IX86_BUILTIN_PABSD,
+
+ IX86_BUILTIN_PHADDW128,
+ IX86_BUILTIN_PHADDD128,
+ IX86_BUILTIN_PHADDSW128,
+ IX86_BUILTIN_PHSUBW128,
+ IX86_BUILTIN_PHSUBD128,
+ IX86_BUILTIN_PHSUBSW128,
+ IX86_BUILTIN_PMADDUBSW128,
+ IX86_BUILTIN_PMULHRSW128,
+ IX86_BUILTIN_PSHUFB128,
+ IX86_BUILTIN_PSIGNB128,
+ IX86_BUILTIN_PSIGNW128,
+ IX86_BUILTIN_PSIGND128,
+ IX86_BUILTIN_PALIGNR128,
+ IX86_BUILTIN_PABSB128,
+ IX86_BUILTIN_PABSW128,
+ IX86_BUILTIN_PABSD128,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* AMDFAM10 - SSE4A New Instructions. */
+ IX86_BUILTIN_MOVNTSD,
+ IX86_BUILTIN_MOVNTSS,
+ IX86_BUILTIN_EXTRQI,
+ IX86_BUILTIN_EXTRQ,
+ IX86_BUILTIN_INSERTQI,
+ IX86_BUILTIN_INSERTQ,
+
+ /* SSE4.1. */
+ IX86_BUILTIN_BLENDPD,
+ IX86_BUILTIN_BLENDPS,
+ IX86_BUILTIN_BLENDVPD,
+ IX86_BUILTIN_BLENDVPS,
+ IX86_BUILTIN_PBLENDVB128,
+ IX86_BUILTIN_PBLENDW128,
+
+ IX86_BUILTIN_DPPD,
+ IX86_BUILTIN_DPPS,
+
+ IX86_BUILTIN_INSERTPS128,
+
+ IX86_BUILTIN_MOVNTDQA,
+ IX86_BUILTIN_MPSADBW128,
+ IX86_BUILTIN_PACKUSDW128,
+ IX86_BUILTIN_PCMPEQQ,
+ IX86_BUILTIN_PHMINPOSUW128,
+
+ IX86_BUILTIN_PMAXSB128,
+ IX86_BUILTIN_PMAXSD128,
+ IX86_BUILTIN_PMAXUD128,
+ IX86_BUILTIN_PMAXUW128,
+
+ IX86_BUILTIN_PMINSB128,
+ IX86_BUILTIN_PMINSD128,
+ IX86_BUILTIN_PMINUD128,
+ IX86_BUILTIN_PMINUW128,
+
+ IX86_BUILTIN_PMOVSXBW128,
+ IX86_BUILTIN_PMOVSXBD128,
+ IX86_BUILTIN_PMOVSXBQ128,
+ IX86_BUILTIN_PMOVSXWD128,
+ IX86_BUILTIN_PMOVSXWQ128,
+ IX86_BUILTIN_PMOVSXDQ128,
+
+ IX86_BUILTIN_PMOVZXBW128,
+ IX86_BUILTIN_PMOVZXBD128,
+ IX86_BUILTIN_PMOVZXBQ128,
+ IX86_BUILTIN_PMOVZXWD128,
+ IX86_BUILTIN_PMOVZXWQ128,
+ IX86_BUILTIN_PMOVZXDQ128,
+
+ IX86_BUILTIN_PMULDQ128,
+ IX86_BUILTIN_PMULLD128,
+
+ IX86_BUILTIN_ROUNDPD,
+ IX86_BUILTIN_ROUNDPS,
+ IX86_BUILTIN_ROUNDSD,
+ IX86_BUILTIN_ROUNDSS,
+
+ IX86_BUILTIN_PTESTZ,
+ IX86_BUILTIN_PTESTC,
+ IX86_BUILTIN_PTESTNZC,
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ /* APPLE LOCAL end mainline */
+ IX86_BUILTIN_VEC_INIT_V2SI,
+ IX86_BUILTIN_VEC_INIT_V4HI,
+ IX86_BUILTIN_VEC_INIT_V8QI,
+ IX86_BUILTIN_VEC_EXT_V2DF,
+ IX86_BUILTIN_VEC_EXT_V2DI,
+ IX86_BUILTIN_VEC_EXT_V4SF,
+ IX86_BUILTIN_VEC_EXT_V4SI,
+ IX86_BUILTIN_VEC_EXT_V8HI,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* deletion */
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_EXT_V2SI,
+ IX86_BUILTIN_VEC_EXT_V4HI,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_EXT_V16QI,
+ IX86_BUILTIN_VEC_SET_V2DI,
+ IX86_BUILTIN_VEC_SET_V4SF,
+ IX86_BUILTIN_VEC_SET_V4SI,
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_SET_V8HI,
+ IX86_BUILTIN_VEC_SET_V4HI,
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ IX86_BUILTIN_VEC_SET_V16QI,
+
+ IX86_BUILTIN_VEC_PACK_SFIX,
+
+ /* SSE4.2. */
+ IX86_BUILTIN_CRC32QI,
+ IX86_BUILTIN_CRC32HI,
+ IX86_BUILTIN_CRC32SI,
+ IX86_BUILTIN_CRC32DI,
+
+ IX86_BUILTIN_PCMPESTRI128,
+ IX86_BUILTIN_PCMPESTRM128,
+ IX86_BUILTIN_PCMPESTRA128,
+ IX86_BUILTIN_PCMPESTRC128,
+ IX86_BUILTIN_PCMPESTRO128,
+ IX86_BUILTIN_PCMPESTRS128,
+ IX86_BUILTIN_PCMPESTRZ128,
+ IX86_BUILTIN_PCMPISTRI128,
+ IX86_BUILTIN_PCMPISTRM128,
+ IX86_BUILTIN_PCMPISTRA128,
+ IX86_BUILTIN_PCMPISTRC128,
+ IX86_BUILTIN_PCMPISTRO128,
+ IX86_BUILTIN_PCMPISTRS128,
+ IX86_BUILTIN_PCMPISTRZ128,
+
+ IX86_BUILTIN_PCMPGTQ,
+
+ /* TFmode support builtins. */
+ IX86_BUILTIN_INFQ,
+ IX86_BUILTIN_FABSQ,
+ IX86_BUILTIN_COPYSIGNQ,
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ IX86_BUILTIN_MAX
+};
+
+#define def_builtin(MASK, NAME, TYPE, CODE) \
+do { \
+ if ((MASK) & target_flags \
+ && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
+ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
+ NULL, NULL_TREE); \
+} while (0)
+
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+/* Like def_builtin, but also marks the function decl "const". */
+
+static inline tree
+def_builtin_const (int mask, const char *name, tree type,
+ enum ix86_builtins code)
+{
+ tree decl = NULL_TREE;
+ if ((mask) & target_flags
+ && (!((mask) & MASK_64BIT) || TARGET_64BIT))
+ decl = lang_hooks.builtin_function (name, type, code, BUILT_IN_MD,
+ NULL, NULL_TREE);
+
+ if (decl)
+ TREE_READONLY (decl) = 1;
+ return decl;
+}
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+/* Bits for builtin_description.flag. */
+
+/* Set when we don't support the comparison natively, and should
+ swap_comparison in order to support it. */
+#define BUILTIN_DESC_SWAP_OPERANDS 1
+
+struct builtin_description
+{
+ const unsigned int mask;
+ const enum insn_code icode;
+ const char *const name;
+ const enum ix86_builtins code;
+ const enum rtx_code comparison;
+ const unsigned int flag;
+};
+
+/* APPLE LOCAL begin 4299257 */
+static const struct builtin_description bdesc_comi[] =
+{
+ { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
+ { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
+ { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
+ { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
+ { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
+ { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
+};
+static const struct builtin_description bdesc_ucomi[] =
+{
+ { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, EQ, 0 },
+ { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, LT, 0 },
+ { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, LE, 0 },
+ { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
+ { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
+ { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, EQ, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, LT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, LE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
+};
+/* APPLE LOCAL end 4299257 */
+
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+static const struct builtin_description bdesc_ptest[] =
+{
+ /* SSE4.1 */
+ { MASK_SSE4_1, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, 0 },
+};
+
+static const struct builtin_description bdesc_pcmpestr[] =
+{
+ /* SSE4.2 */
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
+};
+
+static const struct builtin_description bdesc_pcmpistr[] =
+{
+ /* SSE4.2 */
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
+};
+
+static const struct builtin_description bdesc_crc32[] =
+{
+ /* SSE4.2 */
+ { MASK_SSE4_2 | MASK_64BIT, CODE_FOR_sse4_2_crc32qi, 0, IX86_BUILTIN_CRC32QI, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_crc32hi, 0, IX86_BUILTIN_CRC32HI, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_crc32si, 0, IX86_BUILTIN_CRC32SI, UNKNOWN, 0 },
+ { MASK_SSE4_2, CODE_FOR_sse4_2_crc32di, 0, IX86_BUILTIN_CRC32DI, UNKNOWN, 0 },
+};
+
+/* SSE builtins with 3 arguments and the last argument must be an immediate or xmm0. */
+static const struct builtin_description bdesc_sse_3arg[] =
+{
+ /* SSE4.1 */
+ { MASK_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_roundsd, 0, IX86_BUILTIN_ROUNDSD, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_roundss, 0, IX86_BUILTIN_ROUNDSS, UNKNOWN, 0 },
+};
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+static const struct builtin_description bdesc_2arg[] =
+{
+ /* SSE */
+ { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
+
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, 0 },
+
+ { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
+
+ { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
+
+ { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
+
+ /* MMX */
+ { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ { MASK_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
+
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
+
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
+
+ /* Special. */
+ { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
+
+ { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
+ { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashlv4hi2si, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashlv2si2si, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashlv1di3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashlv1di2si, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_lshrv4hi2si, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_lshrv2si2si, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_lshrv1di3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_lshrv1di2si, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashrv4hi2si, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_ashrv2si2si, 0, IX86_BUILTIN_PSRADI, 0, 0 },
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
+ { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
+
+ /* SSE2 */
+ { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
+ BUILTIN_DESC_SWAP_OPERANDS },
+ { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
+
+ { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
+
+ /* SSE2 MMX */
+ { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
+
+ { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
+ { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
+
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
+ { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
+
+ /* SSE3 MMX */
+ { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
+ /* APPLE LOCAL begin mainline */
+ { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
+
+ /* SSSE3 MMX */
+ { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 },
+ /* APPLE LOCAL end mainline */
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* SSE4.1 */
+ { MASK_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, 0, IX86_BUILTIN_PMULDQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, 0 },
+
+ /* SSE4.2 */
+ { MASK_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, 0 },
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+};
+
+static const struct builtin_description bdesc_1arg[] =
+{
+ { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
+
+ { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
+
+ { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
+ { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
+ { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
+ { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
+ { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
+ { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
+
+ { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
+ { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
+
+ /* SSE3 */
+ { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
+ { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
+ /* APPLE LOCAL begin mainline */
+
+ /* SSSE3 */
+ { MASK_SSSE3, CODE_FOR_ssse3_pabsv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pabsv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pabsv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pabsv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
+ { MASK_SSSE3, CODE_FOR_ssse3_pabsv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ { MASK_SSSE3, CODE_FOR_ssse3_pabsv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
+ /* APPLE LOCAL end mainline */
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* SSE4.1 */
+ { MASK_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, 0, IX86_BUILTIN_PMOVSXBW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, 0, IX86_BUILTIN_PMOVSXBD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, 0, IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, 0, IX86_BUILTIN_PMOVSXWD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, 0, IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, 0, IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, 0, IX86_BUILTIN_PMOVZXBW128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, 0, IX86_BUILTIN_PMOVZXBD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, 0, IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, 0, IX86_BUILTIN_PMOVZXWD128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, 0, IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, 0, IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, 0 },
+
+ /* Fake 1 arg builtins with a constant smaller than 8 bits as the 2nd arg. */
+ { MASK_SSE4_1, CODE_FOR_sse4_1_roundpd, 0, IX86_BUILTIN_ROUNDPD, UNKNOWN, 0 },
+ { MASK_SSE4_1, CODE_FOR_sse4_1_roundps, 0, IX86_BUILTIN_ROUNDPS, UNKNOWN, 0 },
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+};
+
+static void
+ix86_init_builtins (void)
+{
+ if (TARGET_MMX)
+ ix86_init_mmx_sse_builtins ();
+
+ /* APPLE LOCAL begin constant cfstrings */
+#ifdef SUBTARGET_INIT_BUILTINS
+ SUBTARGET_INIT_BUILTINS;
+#endif
+ /* APPLE LOCAL end constant cfstrings */
+}
+
+/* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
+ is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
+ builtins. */
+static void
+ix86_init_mmx_sse_builtins (void)
+{
+ const struct builtin_description * d;
+ size_t i;
+
+ tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
+ tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
+ tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
+ tree V2DI_type_node
+ = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
+ tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
+ tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
+ tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
+ tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
+ tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
+ tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ tree V1DI_type_node = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
+
+ tree pchar_type_node = build_pointer_type (char_type_node);
+ tree pcchar_type_node = build_pointer_type (
+ build_type_variant (char_type_node, 1, 0));
+ tree pfloat_type_node = build_pointer_type (float_type_node);
+ tree pcfloat_type_node = build_pointer_type (
+ build_type_variant (float_type_node, 1, 0));
+ tree pv2si_type_node = build_pointer_type (V2SI_type_node);
+ tree pv2di_type_node = build_pointer_type (V2DI_type_node);
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ tree pv1di_type_node = build_pointer_type (V1DI_type_node);
+
+ /* Comparisons. */
+ tree int_ftype_v4sf_v4sf
+ = build_function_type_list (integer_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf_v4sf
+ = build_function_type_list (V4SI_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ /* MMX/SSE/integer conversions. */
+ tree int_ftype_v4sf
+ = build_function_type_list (integer_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree int64_ftype_v4sf
+ = build_function_type_list (long_long_integer_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree int_ftype_v8qi
+ = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_int
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, integer_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_int64
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, long_long_integer_type_node,
+ NULL_TREE);
+ tree v4sf_ftype_v4sf_v2si
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V2SI_type_node, NULL_TREE);
+
+ /* Miscellaneous. */
+ tree v8qi_ftype_v4hi_v4hi
+ = build_function_type_list (V8QI_type_node,
+ V4HI_type_node, V4HI_type_node, NULL_TREE);
+ tree v4hi_ftype_v2si_v2si
+ = build_function_type_list (V4HI_type_node,
+ V2SI_type_node, V2SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_int
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ tree v2si_ftype_v4hi_v4hi
+ = build_function_type_list (V2SI_type_node,
+ V4HI_type_node, V4HI_type_node, NULL_TREE);
+ tree v4hi_ftype_v4hi_int
+ = build_function_type_list (V4HI_type_node,
+ V4HI_type_node, integer_type_node, NULL_TREE);
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ tree v4hi_ftype_v4hi_v1di
+ = build_function_type_list (V4HI_type_node,
+ V4HI_type_node, V1DI_type_node,
+ NULL_TREE);
+ tree v2si_ftype_v2si_int
+ = build_function_type_list (V2SI_type_node,
+ V2SI_type_node, integer_type_node, NULL_TREE);
+ tree v2si_ftype_v2si_v1di
+ = build_function_type_list (V2SI_type_node,
+ V2SI_type_node, V1DI_type_node, NULL_TREE);
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+ tree void_ftype_void
+ = build_function_type (void_type_node, void_list_node);
+ tree void_ftype_unsigned
+ = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
+ tree void_ftype_unsigned_unsigned
+ = build_function_type_list (void_type_node, unsigned_type_node,
+ unsigned_type_node, NULL_TREE);
+ tree void_ftype_pcvoid_unsigned_unsigned
+ = build_function_type_list (void_type_node, const_ptr_type_node,
+ unsigned_type_node, unsigned_type_node,
+ NULL_TREE);
+ tree unsigned_ftype_void
+ = build_function_type (unsigned_type_node, void_list_node);
+ tree v2si_ftype_v4sf
+ = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
+ /* Loads/stores. */
+ tree void_ftype_v8qi_v8qi_pchar
+ = build_function_type_list (void_type_node,
+ V8QI_type_node, V8QI_type_node,
+ pchar_type_node, NULL_TREE);
+ tree v4sf_ftype_pcfloat
+ = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
+ /* @@@ the type is bogus */
+ tree v4sf_ftype_v4sf_pv2si
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, pv2si_type_node, NULL_TREE);
+ tree void_ftype_pv2si_v4sf
+ = build_function_type_list (void_type_node,
+ pv2si_type_node, V4SF_type_node, NULL_TREE);
+ tree void_ftype_pfloat_v4sf
+ = build_function_type_list (void_type_node,
+ pfloat_type_node, V4SF_type_node, NULL_TREE);
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ tree void_ftype_pv1di_v1di
+ = build_function_type_list (void_type_node,
+ pv1di_type_node, V1DI_type_node, NULL_TREE);
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+ tree void_ftype_pv2di_v2di
+ = build_function_type_list (void_type_node,
+ pv2di_type_node, V2DI_type_node, NULL_TREE);
+ /* Normal vector unops. */
+ tree v4sf_ftype_v4sf
+ = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
+ /* APPLE LOCAL begin mainline */
+ tree v16qi_ftype_v16qi
+ = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi
+ = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v4si_ftype_v4si
+ = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v8qi_ftype_v8qi
+ = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
+ tree v4hi_ftype_v4hi
+ = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
+ /* APPLE LOCAL end mainline */
+
+ /* Normal vector binops. */
+ tree v4sf_ftype_v4sf_v4sf
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree v8qi_ftype_v8qi_v8qi
+ = build_function_type_list (V8QI_type_node,
+ V8QI_type_node, V8QI_type_node, NULL_TREE);
+ tree v4hi_ftype_v4hi_v4hi
+ = build_function_type_list (V4HI_type_node,
+ V4HI_type_node, V4HI_type_node, NULL_TREE);
+ tree v2si_ftype_v2si_v2si
+ = build_function_type_list (V2SI_type_node,
+ V2SI_type_node, V2SI_type_node, NULL_TREE);
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ tree v1di_ftype_v1di_v1di
+ = build_function_type_list (V1DI_type_node,
+ V1DI_type_node, V1DI_type_node, NULL_TREE);
+ /* APPLE LOCAL begin 4684674 */
+ tree v1di_ftype_v1di_int
+ = build_function_type_list (V1DI_type_node,
+ V1DI_type_node, integer_type_node, NULL_TREE);
+ /* APPLE LOCAL end 4684674 */
+ /* APPLE LOCAL begin 4656532 */
+ tree v1di_ftype_v1di_v1di_int
+ = build_function_type_list (V1DI_type_node,
+ V1DI_type_node,
+ V1DI_type_node,
+ integer_type_node, NULL_TREE);
+ /* APPLE LOCAL end 4656532 */
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+ tree v2si_ftype_v2sf
+ = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
+ tree v2sf_ftype_v2si
+ = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
+ tree v2si_ftype_v2si
+ = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
+ tree v2sf_ftype_v2sf
+ = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
+ tree v2sf_ftype_v2sf_v2sf
+ = build_function_type_list (V2SF_type_node,
+ V2SF_type_node, V2SF_type_node, NULL_TREE);
+ tree v2si_ftype_v2sf_v2sf
+ = build_function_type_list (V2SI_type_node,
+ V2SF_type_node, V2SF_type_node, NULL_TREE);
+ tree pint_type_node = build_pointer_type (integer_type_node);
+ tree pdouble_type_node = build_pointer_type (double_type_node);
+ tree pcdouble_type_node = build_pointer_type (
+ build_type_variant (double_type_node, 1, 0));
+ tree int_ftype_v2df_v2df
+ = build_function_type_list (integer_type_node,
+ V2DF_type_node, V2DF_type_node, NULL_TREE);
+
+ tree void_ftype_pcvoid
+ = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
+ tree v4sf_ftype_v4si
+ = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf
+ = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
+ tree v2df_ftype_v4si
+ = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v2df
+ = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
+ tree v2si_ftype_v2df
+ = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
+ tree v4sf_ftype_v2df
+ = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
+ tree v2df_ftype_v2si
+ = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
+ tree v2df_ftype_v4sf
+ = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
+ tree int_ftype_v2df
+ = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
+ tree int64_ftype_v2df
+ = build_function_type_list (long_long_integer_type_node,
+ V2DF_type_node, NULL_TREE);
+ tree v2df_ftype_v2df_int
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, integer_type_node, NULL_TREE);
+ tree v2df_ftype_v2df_int64
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, long_long_integer_type_node,
+ NULL_TREE);
+ tree v4sf_ftype_v4sf_v2df
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V2DF_type_node, NULL_TREE);
+ tree v2df_ftype_v2df_v4sf
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, V4SF_type_node, NULL_TREE);
+ tree v2df_ftype_v2df_v2df_int
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, V2DF_type_node,
+ integer_type_node,
+ NULL_TREE);
+ tree v2df_ftype_v2df_pcdouble
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, pcdouble_type_node, NULL_TREE);
+ tree void_ftype_pdouble_v2df
+ = build_function_type_list (void_type_node,
+ pdouble_type_node, V2DF_type_node, NULL_TREE);
+ tree void_ftype_pint_int
+ = build_function_type_list (void_type_node,
+ pint_type_node, integer_type_node, NULL_TREE);
+ tree void_ftype_v16qi_v16qi_pchar
+ = build_function_type_list (void_type_node,
+ V16QI_type_node, V16QI_type_node,
+ pchar_type_node, NULL_TREE);
+ tree v2df_ftype_pcdouble
+ = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
+ tree v2df_ftype_v2df_v2df
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, V2DF_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v2di_ftype_v2di_v2di
+ = build_function_type_list (V2DI_type_node,
+ V2DI_type_node, V2DI_type_node, NULL_TREE);
+ tree v2di_ftype_v2df_v2df
+ = build_function_type_list (V2DI_type_node,
+ V2DF_type_node, V2DF_type_node, NULL_TREE);
+ tree v2df_ftype_v2df
+ = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
+ tree v2di_ftype_v2di_int
+ = build_function_type_list (V2DI_type_node,
+ V2DI_type_node, integer_type_node, NULL_TREE);
+ /* APPLE LOCAL begin mainline */
+ tree v2di_ftype_v2di_v2di_int
+ = build_function_type_list (V2DI_type_node, V2DI_type_node,
+ V2DI_type_node, integer_type_node, NULL_TREE);
+ /* APPLE LOCAL end mainline */
+ tree v4si_ftype_v4si_int
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_int
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v8hi
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ tree v1di_ftype_v8qi_v8qi
+ = build_function_type_list (V1DI_type_node,
+ V8QI_type_node, V8QI_type_node, NULL_TREE);
+ tree v1di_ftype_v2si_v2si
+ = build_function_type_list (V1DI_type_node,
+ V2SI_type_node, V2SI_type_node, NULL_TREE);
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ tree v2di_ftype_v16qi_v16qi
+ = build_function_type_list (V2DI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v2di_ftype_v4si_v4si
+ = build_function_type_list (V2DI_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree int_ftype_v16qi
+ = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
+ tree v16qi_ftype_pcchar
+ = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
+ tree void_ftype_pchar_v16qi
+ = build_function_type_list (void_type_node,
+ pchar_type_node, V16QI_type_node, NULL_TREE);
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ tree v2di_ftype_v2di_unsigned_unsigned
+ = build_function_type_list (V2DI_type_node, V2DI_type_node,
+ unsigned_type_node, unsigned_type_node,
+ NULL_TREE);
+ tree v2di_ftype_v2di_v2di_unsigned_unsigned
+ = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
+ unsigned_type_node, unsigned_type_node,
+ NULL_TREE);
+ tree v2di_ftype_v2di_v16qi
+ = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
+ NULL_TREE);
+ tree v2df_ftype_v2df_v2df_v2df
+ = build_function_type_list (V2DF_type_node,
+ V2DF_type_node, V2DF_type_node,
+ V2DF_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_v4sf
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree v8hi_ftype_v16qi
+ = build_function_type_list (V8HI_type_node, V16QI_type_node,
+ NULL_TREE);
+ tree v4si_ftype_v16qi
+ = build_function_type_list (V4SI_type_node, V16QI_type_node,
+ NULL_TREE);
+ tree v2di_ftype_v16qi
+ = build_function_type_list (V2DI_type_node, V16QI_type_node,
+ NULL_TREE);
+ tree v4si_ftype_v8hi
+ = build_function_type_list (V4SI_type_node, V8HI_type_node,
+ NULL_TREE);
+ tree v2di_ftype_v8hi
+ = build_function_type_list (V2DI_type_node, V8HI_type_node,
+ NULL_TREE);
+ tree v2di_ftype_v4si
+ = build_function_type_list (V2DI_type_node, V4SI_type_node,
+ NULL_TREE);
+ tree v2di_ftype_pv2di
+ = build_function_type_list (V2DI_type_node, pv2di_type_node,
+ NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_int
+ = build_function_type_list (V16QI_type_node, V16QI_type_node,
+ V16QI_type_node, integer_type_node,
+ NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_v16qi
+ = build_function_type_list (V16QI_type_node, V16QI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_int
+ = build_function_type_list (V8HI_type_node, V8HI_type_node,
+ V8HI_type_node, integer_type_node,
+ NULL_TREE);
+ tree v4si_ftype_v4si_v4si_int
+ = build_function_type_list (V4SI_type_node, V4SI_type_node,
+ V4SI_type_node, integer_type_node,
+ NULL_TREE);
+ tree int_ftype_v2di_v2di
+ = build_function_type_list (integer_type_node,
+ V2DI_type_node, V2DI_type_node,
+ NULL_TREE);
+ tree int_ftype_v16qi_int_v16qi_int_int
+ = build_function_type_list (integer_type_node,
+ V16QI_type_node,
+ integer_type_node,
+ V16QI_type_node,
+ integer_type_node,
+ integer_type_node,
+ NULL_TREE);
+ tree v16qi_ftype_v16qi_int_v16qi_int_int
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node,
+ integer_type_node,
+ V16QI_type_node,
+ integer_type_node,
+ integer_type_node,
+ NULL_TREE);
+ tree int_ftype_v16qi_v16qi_int
+ = build_function_type_list (integer_type_node,
+ V16QI_type_node,
+ V16QI_type_node,
+ integer_type_node,
+ NULL_TREE);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ tree float80_type;
+ tree float128_type;
+ tree ftype;
+
+ /* The __float80 type. */
+ if (TYPE_MODE (long_double_type_node) == XFmode)
+ (*lang_hooks.types.register_builtin_type) (long_double_type_node,
+ "__float80");
+ else
+ {
+ /* The __float80 type. */
+ float80_type = make_node (REAL_TYPE);
+ TYPE_PRECISION (float80_type) = 80;
+ layout_type (float80_type);
+ (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
+ }
+
+ if (TARGET_64BIT)
+ {
+ float128_type = make_node (REAL_TYPE);
+ TYPE_PRECISION (float128_type) = 128;
+ layout_type (float128_type);
+ (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
+ }
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* Add all SSE builtins that are more or less simple operations on
+ three operands. */
+ for (i = 0, d = bdesc_sse_3arg;
+ i < ARRAY_SIZE (bdesc_sse_3arg);
+ i++, d++)
+ {
+ /* Use one of the operands; the target can have a different mode for
+ mask-generating compares. */
+ enum machine_mode mode;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+ mode = insn_data[d->icode].operand[1].mode;
+
+ switch (mode)
+ {
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_int;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_int;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_int;
+ break;
+ case V2DImode:
+ type = v2di_ftype_v2di_v2di_int;
+ break;
+ case V2DFmode:
+ type = v2df_ftype_v2df_v2df_int;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_int;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Override for variable blends. */
+ switch (d->icode)
+ {
+ case CODE_FOR_sse4_1_blendvpd:
+ type = v2df_ftype_v2df_v2df_v2df;
+ break;
+ case CODE_FOR_sse4_1_blendvps:
+ type = v4sf_ftype_v4sf_v4sf_v4sf;
+ break;
+ case CODE_FOR_sse4_1_pblendvb:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ default:
+ break;
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ /* Add all builtins that are more or less simple operations on two
+ operands. */
+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ {
+ /* Use one of the operands; the target can have a different mode for
+ mask-generating compares. */
+ enum machine_mode mode;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+ mode = insn_data[d->icode].operand[1].mode;
+
+ switch (mode)
+ {
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si;
+ break;
+ case V2DImode:
+ type = v2di_ftype_v2di_v2di;
+ break;
+ case V2DFmode:
+ type = v2df_ftype_v2df_v2df;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf;
+ break;
+ case V8QImode:
+ type = v8qi_ftype_v8qi_v8qi;
+ break;
+ case V4HImode:
+ type = v4hi_ftype_v4hi_v4hi;
+ break;
+ case V2SImode:
+ type = v2si_ftype_v2si_v2si;
+ break;
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ case V1DImode:
+ type = v1di_ftype_v1di_v1di;
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Override for comparisons. */
+ if (d->icode == CODE_FOR_sse_maskcmpv4sf3
+ || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
+ type = v4si_ftype_v4sf_v4sf;
+
+ if (d->icode == CODE_FOR_sse2_maskcmpv2df3
+ || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
+ type = v2di_ftype_v2df_v2df;
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+ /* APPLE LOCAL begin mainline */
+ /* Add all builtins that are more or less simple operations on 1 operand. */
+ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ {
+ enum machine_mode mode;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+ mode = insn_data[d->icode].operand[1].mode;
+
+ switch (mode)
+ {
+ case V16QImode:
+ type = v16qi_ftype_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si;
+ break;
+ case V2DFmode:
+ type = v2df_ftype_v2df;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf;
+ break;
+ case V8QImode:
+ type = v8qi_ftype_v8qi;
+ break;
+ case V4HImode:
+ type = v4hi_ftype_v4hi;
+ break;
+ case V2SImode:
+ type = v2si_ftype_v2si;
+ break;
+
+ default:
+ abort ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+ /* APPLE LOCAL end mainline */
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* pcmpestr[im] insns. */
+ for (i = 0, d = bdesc_pcmpestr;
+ i < ARRAY_SIZE (bdesc_pcmpestr);
+ i++, d++)
+ {
+ if (d->code == IX86_BUILTIN_PCMPESTRM128)
+ ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
+ else
+ ftype = int_ftype_v16qi_int_v16qi_int_int;
+ def_builtin (d->mask, d->name, ftype, d->code);
+ }
+
+ /* pcmpistr[im] insns. */
+ for (i = 0, d = bdesc_pcmpistr;
+ i < ARRAY_SIZE (bdesc_pcmpistr);
+ i++, d++)
+ {
+ if (d->code == IX86_BUILTIN_PCMPISTRM128)
+ ftype = v16qi_ftype_v16qi_v16qi_int;
+ else
+ ftype = int_ftype_v16qi_v16qi_int;
+ def_builtin (d->mask, d->name, ftype, d->code);
+ }
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ /* Add the remaining MMX insns with somewhat more complicated types. */
+ def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_v1di, IX86_BUILTIN_PSLLW);
+ def_builtin (MASK_MMX, "__builtin_ia32_psllwi", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSLLWI);
+ def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_v1di, IX86_BUILTIN_PSLLD);
+ def_builtin (MASK_MMX, "__builtin_ia32_pslldi", v2si_ftype_v2si_int, IX86_BUILTIN_PSLLDI);
+ def_builtin (MASK_MMX, "__builtin_ia32_psllq", v1di_ftype_v1di_v1di, IX86_BUILTIN_PSLLQ);
+ def_builtin (MASK_MMX, "__builtin_ia32_psllqi", v1di_ftype_v1di_int, IX86_BUILTIN_PSLLQI);
+
+ def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_v1di, IX86_BUILTIN_PSRLW);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrlwi", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSRLWI);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_v1di, IX86_BUILTIN_PSRLD);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrldi", v2si_ftype_v2si_int, IX86_BUILTIN_PSRLDI);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrlq", v1di_ftype_v1di_v1di, IX86_BUILTIN_PSRLQ);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrlqi", v1di_ftype_v1di_int, IX86_BUILTIN_PSRLQI);
+
+ def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_v1di, IX86_BUILTIN_PSRAW);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrawi", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSRAWI);
+ def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_v1di, IX86_BUILTIN_PSRAD);
+ def_builtin (MASK_MMX, "__builtin_ia32_psradi", v2si_ftype_v2si_int, IX86_BUILTIN_PSRADI);
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
+ def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
+
+ /* APPLE LOCAL 4299257 */
+ /* comi insns. */
+ for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
+ if (d->mask == MASK_SSE2)
+ def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
+ else
+ def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
+
+ /* APPLE LOCAL begin 4299257 */
+ /* ucomi insns. */
+ for (i = 0, d = bdesc_ucomi; i < ARRAY_SIZE (bdesc_ucomi); i++, d++)
+ if (d->mask == MASK_SSE2)
+ def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
+ else
+ def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
+ /* APPLE LOCAL end 4299257 */
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* ptest insns. */
+ for (i = 0, d = bdesc_ptest; i < ARRAY_SIZE (bdesc_ptest); i++, d++)
+ def_builtin (d->mask, d->name, int_ftype_v2di_v2di, d->code);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
+ def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
+ def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
+
+ def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
+ def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
+ def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
+ def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
+ def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
+ def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
+ def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
+ def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
+ def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
+ def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
+ def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
+
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
+
+ def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
+
+ def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
+
+ def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
+ def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pv1di_v1di, IX86_BUILTIN_MOVNTQ);
+
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
+
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", v1di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+
+ def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
+ def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
+ def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
+ def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
+
+ def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
+
+ /* Original 3DNow! */
+ def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
+ def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
+
+ /* 3DNow! extension as used in the Athlon CPU. */
+ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
+ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
+ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
+ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
+ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
+ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
+
+ /* SSE2 */
+ def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
+ def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
+ def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
+ def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
+ def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
+ def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
+ def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
+ def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
+ def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
+
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", v1di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
+ def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSLLW128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSLLD128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSRLW128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSRLD128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v8hi, IX86_BUILTIN_PSRAW128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v4si, IX86_BUILTIN_PSRAD128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
+ /* APPLE LOCAL 5919583 */
+ def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128_byteshift", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128_BYTESHIFT);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
+ /* APPLE LOCAL 5919583 */
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128_byteshift", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128_BYTESHIFT);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
+ def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
+
+ def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
+
+ /* Prescott New Instructions. */
+ def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
+ void_ftype_pcvoid_unsigned_unsigned,
+ IX86_BUILTIN_MONITOR);
+ def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
+ void_ftype_unsigned_unsigned,
+ IX86_BUILTIN_MWAIT);
+ def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
+ v4sf_ftype_v4sf,
+ IX86_BUILTIN_MOVSHDUP);
+ def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
+ v4sf_ftype_v4sf,
+ IX86_BUILTIN_MOVSLDUP);
+ def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
+ v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
+
+ /* APPLE LOCAL begin 4099020 */
+ ftype = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
+ def_builtin (MASK_SSE, "__builtin_ia32_movqv4si", ftype, IX86_BUILTIN_MOVQ);
+ ftype = build_function_type_list (V4SI_type_node, pv2si_type_node, NULL_TREE);
+ def_builtin (MASK_SSE, "__builtin_ia32_loadlv4si", ftype, IX86_BUILTIN_LOADQ);
+ ftype = build_function_type_list (void_type_node, pv2si_type_node, V4SI_type_node, NULL_TREE);
+ def_builtin (MASK_SSE, "__builtin_ia32_storelv4si", ftype, IX86_BUILTIN_STOREQ);
+ /* APPLE LOCAL end 4099020 */
+
+ /* APPLE LOCAL begin 4656532 */
+ /* Merom New Instructions. */
+ def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
+ v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
+ def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", v1di_ftype_v1di_v1di_int,
+ IX86_BUILTIN_PALIGNR);
+
+ /* APPLE LOCAL end 4656532 */
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* SSE4.1. */
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_movntdqa", v2di_ftype_pv2di, IX86_BUILTIN_MOVNTDQA);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovsxbw128", v8hi_ftype_v16qi, IX86_BUILTIN_PMOVSXBW128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovsxbd128", v4si_ftype_v16qi, IX86_BUILTIN_PMOVSXBD128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovsxbq128", v2di_ftype_v16qi, IX86_BUILTIN_PMOVSXBQ128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovsxwd128", v4si_ftype_v8hi, IX86_BUILTIN_PMOVSXWD128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovsxwq128", v2di_ftype_v8hi, IX86_BUILTIN_PMOVSXWQ128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovsxdq128", v2di_ftype_v4si, IX86_BUILTIN_PMOVSXDQ128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovzxbw128", v8hi_ftype_v16qi, IX86_BUILTIN_PMOVZXBW128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovzxbd128", v4si_ftype_v16qi, IX86_BUILTIN_PMOVZXBD128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovzxbq128", v2di_ftype_v16qi, IX86_BUILTIN_PMOVZXBQ128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovzxwd128", v4si_ftype_v8hi, IX86_BUILTIN_PMOVZXWD128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovzxwq128", v2di_ftype_v8hi, IX86_BUILTIN_PMOVZXWQ128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmovzxdq128", v2di_ftype_v4si, IX86_BUILTIN_PMOVZXDQ128);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_pmuldq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULDQ128);
+ def_builtin_const (MASK_SSE4_1, "__builtin_ia32_roundpd", v2df_ftype_v2df_int, IX86_BUILTIN_ROUNDPD);
+ def_builtin_const (MASK_SSE4_1, "__builtin_ia32_roundps", v4sf_ftype_v4sf_int, IX86_BUILTIN_ROUNDPS);
+ def_builtin_const (MASK_SSE4_1, "__builtin_ia32_roundsd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_ROUNDSD);
+ def_builtin_const (MASK_SSE4_1, "__builtin_ia32_roundss", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_ROUNDSS);
+
+ /* SSE4.2. */
+ ftype = build_function_type_list (unsigned_type_node,
+ unsigned_type_node,
+ unsigned_char_type_node,
+ NULL_TREE);
+ def_builtin (MASK_SSE4_2, "__builtin_ia32_crc32qi", ftype, IX86_BUILTIN_CRC32QI);
+ ftype = build_function_type_list (unsigned_type_node,
+ unsigned_type_node,
+ short_unsigned_type_node,
+ NULL_TREE);
+ def_builtin (MASK_SSE4_2, "__builtin_ia32_crc32hi", ftype, IX86_BUILTIN_CRC32HI);
+ ftype = build_function_type_list (unsigned_type_node,
+ unsigned_type_node,
+ unsigned_type_node,
+ NULL_TREE);
+ def_builtin (MASK_SSE4_2, "__builtin_ia32_crc32si", ftype, IX86_BUILTIN_CRC32SI);
+ ftype = build_function_type_list (long_long_unsigned_type_node,
+ long_long_unsigned_type_node,
+ long_long_unsigned_type_node,
+ NULL_TREE);
+ def_builtin (MASK_SSE4_2, "__builtin_ia32_crc32di", ftype, IX86_BUILTIN_CRC32DI);
+
+ /* AMDFAM10 SSE4A New built-ins */
+ def_builtin (MASK_SSE4A, "__builtin_ia32_movntsd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTSD);
+ def_builtin (MASK_SSE4A, "__builtin_ia32_movntss", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTSS);
+ def_builtin (MASK_SSE4A, "__builtin_ia32_extrqi", v2di_ftype_v2di_unsigned_unsigned, IX86_BUILTIN_EXTRQI);
+ def_builtin (MASK_SSE4A, "__builtin_ia32_extrq", v2di_ftype_v2di_v16qi, IX86_BUILTIN_EXTRQ);
+ def_builtin (MASK_SSE4A, "__builtin_ia32_insertqi", v2di_ftype_v2di_v2di_unsigned_unsigned, IX86_BUILTIN_INSERTQI);
+ def_builtin (MASK_SSE4A, "__builtin_ia32_insertq", v2di_ftype_v2di_v2di, IX86_BUILTIN_INSERTQ);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ /* Access to the vec_init patterns. */
+ ftype = build_function_type_list (V2SI_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
+ ftype, IX86_BUILTIN_VEC_INIT_V2SI);
+
+ ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node, NULL_TREE);
+ def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
+ ftype, IX86_BUILTIN_VEC_INIT_V4HI);
+
+ ftype = build_function_type_list (V8QI_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, NULL_TREE);
+ def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
+ ftype, IX86_BUILTIN_VEC_INIT_V8QI);
+
+ /* Access to the vec_extract patterns. */
+ ftype = build_function_type_list (double_type_node, V2DF_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_vec_ext_v2df",
+ ftype, IX86_BUILTIN_VEC_EXT_V2DF);
+
+ ftype = build_function_type_list (long_long_integer_type_node,
+ V2DI_type_node, integer_type_node,
+ NULL_TREE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_vec_ext_v2di",
+ ftype, IX86_BUILTIN_VEC_EXT_V2DI);
+
+ ftype = build_function_type_list (float_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
+ ftype, IX86_BUILTIN_VEC_EXT_V4SF);
+
+ ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_vec_ext_v4si",
+ ftype, IX86_BUILTIN_VEC_EXT_V4SI);
+
+ /* APPLE LOCAL begin radar 4469713 */
+ /* The return type of the builtin function should be an unsigned instead
+ of a signed type. */
+ ftype = build_function_type_list (unsigned_intHI_type_node, V8HI_type_node,
+ /* APPLE LOCAL end radar 4469713 */
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_vec_ext_v8hi",
+ ftype, IX86_BUILTIN_VEC_EXT_V8HI);
+
+ ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
+ ftype, IX86_BUILTIN_VEC_EXT_V4HI);
+
+ ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
+ ftype, IX86_BUILTIN_VEC_EXT_V2SI);
+
+ ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ /* Access to the vec_set patterns. */
+ ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
+ intDI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE4_1 | MASK_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
+
+ ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
+ float_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
+
+ ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
+ intSI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
+ intHI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE2, "__builtin_ia32_vec_set_v8hi",
+ ftype, IX86_BUILTIN_VEC_SET_V8HI);
+
+ ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
+ intHI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
+ ftype, IX86_BUILTIN_VEC_SET_V4HI);
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
+ intQI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+}
+
+/* Errors in the source file can cause expand_expr to return const0_rtx
+ where we expect a vector. To avoid crashing, use one of the vector
+ clear instructions. */
+static rtx
+safe_vector_operand (rtx x, enum machine_mode mode)
+{
+ if (x == const0_rtx)
+ x = CONST0_RTX (mode);
+ return x;
+}
+
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+/* Subroutine of ix86_expand_builtin to take care of SSE insns with
+ 4 operands. The third argument must be a constant smaller than 8
+ bits or xmm0. */
+
+static rtx
+ix86_expand_sse_4_operands_builtin (enum insn_code icode, tree exp,
+ rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (exp);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (exp));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (exp)));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[2].mode;
+ enum machine_mode mode3 = insn_data[icode].operand[3].mode;
+
+ if (VECTOR_MODE_P (mode1))
+ op0 = safe_vector_operand (op0, mode1);
+ if (VECTOR_MODE_P (mode2))
+ op1 = safe_vector_operand (op1, mode2);
+ if (VECTOR_MODE_P (mode3))
+ op2 = safe_vector_operand (op2, mode3);
+
+ if (optimize
+ || target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_mode_reg (mode1, op0);
+ if ((optimize && !register_operand (op1, mode2))
+ || !(*insn_data[icode].operand[2].predicate) (op1, mode2))
+ op1 = copy_to_mode_reg (mode2, op1);
+
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
+ switch (icode)
+ {
+ case CODE_FOR_sse4_1_blendvpd:
+ case CODE_FOR_sse4_1_blendvps:
+ case CODE_FOR_sse4_1_pblendvb:
+ op2 = copy_to_mode_reg (mode3, op2);
+ break;
+
+ case CODE_FOR_sse4_1_roundsd:
+ case CODE_FOR_sse4_1_roundss:
+ error ("the third argument must be a 4-bit immediate");
+ return const0_rtx;
+
+ default:
+ error ("the third argument must be an 8-bit immediate");
+ return const0_rtx;
+ }
+
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of ix86_expand_builtin to take care of crc32 insns. */
+
+static rtx
+ix86_expand_crc32 (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (exp);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (exp));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ if (optimize
+ || !target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
+ {
+ op1 = copy_to_reg (op1);
+ op1 = simplify_gen_subreg (mode1, op1, GET_MODE (op1), 0);
+ }
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+/* Subroutine of ix86_expand_builtin to take care of binop insns. */
+
+static rtx
+ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat, xops[3];
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ if (optimize || !target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (GET_MODE (op1) == SImode && mode1 == TImode)
+ {
+ rtx x = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_loadd (x, op1));
+ op1 = gen_lowpart (TImode, x);
+ }
+
+ /* The insn must want input operands in the same modes as the
+ result. */
+ gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
+ && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ /* ??? Using ix86_fixup_binary_operands is problematic when
+ we've got mismatched modes. Fake it. */
+
+ xops[0] = target;
+ xops[1] = op0;
+ xops[2] = op1;
+
+ if (tmode == mode0 && tmode == mode1)
+ {
+ target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
+ op0 = xops[1];
+ op1 = xops[2];
+ }
+ else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
+ {
+ op0 = force_reg (mode0, op0);
+ op1 = force_reg (mode1, op1);
+ target = gen_reg_rtx (tmode);
+ }
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of ix86_expand_builtin to take care of stores. */
+
+static rtx
+ix86_expand_store_builtin (enum insn_code icode, tree arglist)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode0 = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+ return 0;
+}
+
+/* Subroutine of ix86_expand_builtin to take care of unop insns. */
+
+static rtx
+ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
+ rtx target, int do_load)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ rtx op0 = expand_normal (arg0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ if (optimize || !target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ if (do_load)
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ else
+ {
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+
+ if ((optimize && !register_operand (op0, mode0))
+ || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ }
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ switch (icode)
+ {
+ case CODE_FOR_sse4_1_roundpd:
+ case CODE_FOR_sse4_1_roundps:
+ {
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ {
+ error ("the second argument must be a 4-bit immediate");
+ return const0_rtx;
+ }
+ pat = GEN_FCN (icode) (target, op0, op1);
+ }
+ break;
+ default:
+ pat = GEN_FCN (icode) (target, op0);
+ break;
+ }
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of ix86_expand_builtin to take care of three special unop insns:
+ sqrtss, rsqrtss, rcpss. */
+
+static rtx
+ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ rtx op1, op0 = expand_normal (arg0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ if (optimize || !target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+
+ if ((optimize && !register_operand (op0, mode0))
+ || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ op1 = op0;
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
+ op1 = copy_to_mode_reg (mode0, op1);
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of ix86_expand_builtin to take care of comparison insns. */
+
+static rtx
+ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
+ rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2;
+ enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
+ enum rtx_code comparison = d->comparison;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ /* Swap operands if we have a comparison that isn't available in
+ hardware. */
+ if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
+ {
+ rtx tmp = gen_reg_rtx (mode1);
+ emit_move_insn (tmp, op1);
+ op1 = op0;
+ op0 = tmp;
+ }
+
+ if (optimize || !target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if ((optimize && !register_operand (op0, mode0))
+ || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if ((optimize && !register_operand (op1, mode1))
+ || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
+ pat = GEN_FCN (d->icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Subroutine of ix86_expand_builtin to take care of comi insns. */
+
+static rtx
+ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
+ rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2;
+ enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
+ enum rtx_code comparison = d->comparison;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ /* Swap operands if we have a comparison that isn't available in
+ hardware. */
+ if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
+ {
+ rtx tmp = op1;
+ op1 = op0;
+ op0 = tmp;
+ }
+
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (target, const0_rtx);
+ target = gen_rtx_SUBREG (QImode, target, 0);
+
+ if ((optimize && !register_operand (op0, mode0))
+ || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if ((optimize && !register_operand (op1, mode1))
+ || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
+ pat = GEN_FCN (d->icode) (op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ emit_insn (gen_rtx_SET (VOIDmode,
+ gen_rtx_STRICT_LOW_PART (VOIDmode, target),
+ gen_rtx_fmt_ee (comparison, QImode,
+ SET_DEST (pat),
+ const0_rtx)));
+
+ return SUBREG_REG (target);
+}
+
+/* APPLE LOCAL begin 4299257 */
+/* Subroutine of ix86_expand_builtin to take care of ucomi insns. */
+
+static rtx
+ix86_expand_sse_ucomi (const struct builtin_description *d, tree arglist,
+ rtx target)
+{
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
+ enum machine_mode scalar_mode;
+ enum rtx_code comparison = d->comparison;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ /* Swap operands if we have a comparison that isn't available in
+ hardware. */
+ if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
+ {
+ rtx tmp = op1;
+ op1 = op0;
+ op0 = tmp;
+ }
+
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (target, const0_rtx);
+ target = gen_rtx_SUBREG (QImode, target, 0);
+
+ gcc_assert (mode0 == V4SFmode || mode0 == V2DFmode);
+ gcc_assert (mode1 == V4SFmode || mode1 == V2DFmode);
+
+ scalar_mode = (mode0 == V4SFmode) ? SFmode : DFmode;
+ op0 = gen_rtx_SUBREG (scalar_mode, copy_to_mode_reg (mode0, op0), 0);
+ op1 = gen_rtx_SUBREG (scalar_mode, copy_to_mode_reg (mode1, op1), 0);
+
+ ix86_compare_op0 = op0;
+ ix86_compare_op1 = op1;
+ if (ix86_expand_setcc (comparison, target))
+ return SUBREG_REG (target);
+
+ return NULL_RTX;
+}
+/* APPLE LOCAL end 4299257 */
+
+/* APPLE LOCAL begin 5612787 mainline sse4 */
+/* Subroutine of ix86_expand_builtin to take care of ptest insns. */
+
+static rtx
+ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
+ rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (exp);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (exp));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
+ enum rtx_code comparison = d->comparison;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (target, const0_rtx);
+ target = gen_rtx_SUBREG (QImode, target, 0);
+
+ if ((optimize && !register_operand (op0, mode0))
+ || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if ((optimize && !register_operand (op1, mode1))
+ || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (d->icode) (op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ emit_insn (gen_rtx_SET (VOIDmode,
+ gen_rtx_STRICT_LOW_PART (VOIDmode, target),
+ gen_rtx_fmt_ee (comparison, QImode,
+ SET_DEST (pat),
+ const0_rtx)));
+
+ return SUBREG_REG (target);
+}
+
+/* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
+
+static rtx
+ix86_expand_sse_pcmpestr (const struct builtin_description *d,
+ tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (exp);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (exp));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (exp)));
+ tree arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (exp))));
+ tree arg4 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (exp)))));
+ rtx scratch0, scratch1;
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = expand_normal (arg3);
+ rtx op4 = expand_normal (arg4);
+ enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
+
+ tmode0 = insn_data[d->icode].operand[0].mode;
+ tmode1 = insn_data[d->icode].operand[1].mode;
+ modev2 = insn_data[d->icode].operand[2].mode;
+ modei3 = insn_data[d->icode].operand[3].mode;
+ modev4 = insn_data[d->icode].operand[4].mode;
+ modei5 = insn_data[d->icode].operand[5].mode;
+ modeimm = insn_data[d->icode].operand[6].mode;
+
+ if (VECTOR_MODE_P (modev2))
+ op0 = safe_vector_operand (op0, modev2);
+ if (VECTOR_MODE_P (modev4))
+ op2 = safe_vector_operand (op2, modev4);
+
+ if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
+ op0 = copy_to_mode_reg (modev2, op0);
+ if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
+ op1 = copy_to_mode_reg (modei3, op1);
+ if ((optimize && !register_operand (op2, modev4))
+ || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
+ op2 = copy_to_mode_reg (modev4, op2);
+ if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
+ op3 = copy_to_mode_reg (modei5, op3);
+
+ if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
+ {
+ error ("the fifth argument must be a 8-bit immediate");
+ return const0_rtx;
+ }
+
+ if (d->code == IX86_BUILTIN_PCMPESTRI128)
+ {
+ if (optimize || !target
+ || GET_MODE (target) != tmode0
+ || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
+ target = gen_reg_rtx (tmode0);
+
+ scratch1 = gen_reg_rtx (tmode1);
+
+ pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
+ }
+ else if (d->code == IX86_BUILTIN_PCMPESTRM128)
+ {
+ if (optimize || !target
+ || GET_MODE (target) != tmode1
+ || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
+ target = gen_reg_rtx (tmode1);
+
+ scratch0 = gen_reg_rtx (tmode0);
+
+ pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
+ }
+ else
+ {
+ gcc_assert (d->flag);
+
+ scratch0 = gen_reg_rtx (tmode0);
+ scratch1 = gen_reg_rtx (tmode1);
+
+ pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
+ }
+
+ if (! pat)
+ return 0;
+
+ emit_insn (pat);
+
+ if (d->flag)
+ {
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (target, const0_rtx);
+ target = gen_rtx_SUBREG (QImode, target, 0);
+
+ emit_insn
+ (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
+ gen_rtx_fmt_ee (EQ, QImode,
+ gen_rtx_REG ((enum machine_mode) d->flag,
+ FLAGS_REG),
+ const0_rtx)));
+ return SUBREG_REG (target);
+ }
+ else
+ return target;
+}
+
+
+/* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
+
+static rtx
+ix86_expand_sse_pcmpistr (const struct builtin_description *d,
+ tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (exp);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (exp));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (exp)));
+ rtx scratch0, scratch1;
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
+
+ tmode0 = insn_data[d->icode].operand[0].mode;
+ tmode1 = insn_data[d->icode].operand[1].mode;
+ modev2 = insn_data[d->icode].operand[2].mode;
+ modev3 = insn_data[d->icode].operand[3].mode;
+ modeimm = insn_data[d->icode].operand[4].mode;
+
+ if (VECTOR_MODE_P (modev2))
+ op0 = safe_vector_operand (op0, modev2);
+ if (VECTOR_MODE_P (modev3))
+ op1 = safe_vector_operand (op1, modev3);
+
+ if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
+ op0 = copy_to_mode_reg (modev2, op0);
+ if ((optimize && !register_operand (op1, modev3))
+ || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
+ op1 = copy_to_mode_reg (modev3, op1);
+
+ if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
+ {
+ error ("the third argument must be a 8-bit immediate");
+ return const0_rtx;
+ }
+
+ if (d->code == IX86_BUILTIN_PCMPISTRI128)
+ {
+ if (optimize || !target
+ || GET_MODE (target) != tmode0
+ || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
+ target = gen_reg_rtx (tmode0);
+
+ scratch1 = gen_reg_rtx (tmode1);
+
+ pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
+ }
+ else if (d->code == IX86_BUILTIN_PCMPISTRM128)
+ {
+ if (optimize || !target
+ || GET_MODE (target) != tmode1
+ || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
+ target = gen_reg_rtx (tmode1);
+
+ scratch0 = gen_reg_rtx (tmode0);
+
+ pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
+ }
+ else
+ {
+ gcc_assert (d->flag);
+
+ scratch0 = gen_reg_rtx (tmode0);
+ scratch1 = gen_reg_rtx (tmode1);
+
+ pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
+ }
+
+ if (! pat)
+ return 0;
+
+ emit_insn (pat);
+
+ if (d->flag)
+ {
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (target, const0_rtx);
+ target = gen_rtx_SUBREG (QImode, target, 0);
+
+ emit_insn
+ (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
+ gen_rtx_fmt_ee (EQ, QImode,
+ gen_rtx_REG ((enum machine_mode) d->flag,
+ FLAGS_REG),
+ const0_rtx)));
+ return SUBREG_REG (target);
+ }
+ else
+ return target;
+}
+/* APPLE LOCAL end 5612787 mainline sse4 */
+
+/* Return the integer constant in ARG. Constrain it to be in the range
+ of the subparts of VEC_TYPE; issue an error if not. */
+
+static int
+get_element_number (tree vec_type, tree arg)
+{
+ unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
+
+ if (!host_integerp (arg, 1)
+ || (elt = tree_low_cst (arg, 1), elt > max))
+ {
+ error ("selector must be an integer constant in the range 0..%wi", max);
+ return 0;
+ }
+
+ return elt;
+}
+
+/* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
+ ix86_expand_vector_init. We DO have language-level syntax for this, in
+ the form of (type){ init-list }. Except that since we can't place emms
+ instructions from inside the compiler, we can't allow the use of MMX
+ registers unless the user explicitly asks for it. So we do *not* define
+ vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
+ we have builtins invoked by mmintrin.h that gives us license to emit
+ these sorts of instructions. */
+
+static rtx
+ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
+{
+ enum machine_mode tmode = TYPE_MODE (type);
+ enum machine_mode inner_mode = GET_MODE_INNER (tmode);
+ int i, n_elt = GET_MODE_NUNITS (tmode);
+ rtvec v = rtvec_alloc (n_elt);
+
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
+ {
+ rtx x = expand_normal (TREE_VALUE (arglist));
+ RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
+ }
+
+ gcc_assert (arglist == NULL);
+
+ if (!target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
+ return target;
+}
+
+/* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
+ ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
+ had a language-level syntax for referencing vector elements. */
+
+static rtx
+ix86_expand_vec_ext_builtin (tree arglist, rtx target)
+{
+ enum machine_mode tmode, mode0;
+ tree arg0, arg1;
+ int elt;
+ rtx op0;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+
+ op0 = expand_normal (arg0);
+ elt = get_element_number (TREE_TYPE (arg0), arg1);
+
+ tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ mode0 = TYPE_MODE (TREE_TYPE (arg0));
+ gcc_assert (VECTOR_MODE_P (mode0));
+
+ op0 = force_reg (mode0, op0);
+
+ if (optimize || !target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ ix86_expand_vector_extract (true, target, op0, elt);
+
+ return target;
+}
+
+/* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
+ ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
+ a language-level syntax for referencing vector elements. */
+
+static rtx
+ix86_expand_vec_set_builtin (tree arglist)
+{
+ enum machine_mode tmode, mode1;
+ tree arg0, arg1, arg2;
+ int elt;
+ rtx op0, op1, target;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ tmode = TYPE_MODE (TREE_TYPE (arg0));
+ mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
+ elt = get_element_number (TREE_TYPE (arg0), arg2);
+
+ if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
+ op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
+
+ op0 = force_reg (tmode, op0);
+ op1 = force_reg (mode1, op1);
+
+ /* OP0 is the source of these builtin functions and shouldn't be
+ modified. Create a copy, use it and return it as target. */
+ target = gen_reg_rtx (tmode);
+ emit_move_insn (target, op0);
+ ix86_expand_vector_set (true, target, op1, elt);
+
+ return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ const struct builtin_description *d;
+ size_t i;
+ enum insn_code icode;
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ tree arg0, arg1, arg2, arg3;
+ rtx op0, op1, op2, op3, pat;
+ /* APPLE LOCAL ssse3 */
+ enum machine_mode tmode, mode0, mode1, mode2, mode3, mode4;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ switch (fcode)
+ {
+ case IX86_BUILTIN_EMMS:
+ emit_insn (gen_mmx_emms ());
+ return 0;
+
+ case IX86_BUILTIN_SFENCE:
+ emit_insn (gen_sse_sfence ());
+ return 0;
+
+ case IX86_BUILTIN_MASKMOVQ:
+ case IX86_BUILTIN_MASKMOVDQU:
+ icode = (fcode == IX86_BUILTIN_MASKMOVQ
+ ? CODE_FOR_mmx_maskmovq
+ : CODE_FOR_sse2_maskmovdqu);
+ /* Note the arg order is different from the operand order. */
+ arg1 = TREE_VALUE (arglist);
+ arg2 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+
+ op0 = force_reg (Pmode, op0);
+ op0 = gen_rtx_MEM (mode1, op0);
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+ pat = GEN_FCN (icode) (op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return 0;
+
+ case IX86_BUILTIN_SQRTSS:
+ return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
+ case IX86_BUILTIN_RSQRTSS:
+ return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
+ case IX86_BUILTIN_RCPSS:
+ return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
+
+ /* APPLE LOCAL begin 4099020 */
+ case IX86_BUILTIN_LOADQ:
+ return ix86_expand_unop_builtin (CODE_FOR_sse_loadqv4si, arglist, target, 1);
+
+ case IX86_BUILTIN_MOVQ:
+ return ix86_expand_unop_builtin (CODE_FOR_sse_movqv4si, arglist, target, 0);
+
+ case IX86_BUILTIN_STOREQ:
+ return ix86_expand_store_builtin (CODE_FOR_sse_storeqv4si, arglist);
+ /* APPLE LOCAL end 4099020 */
+
+ case IX86_BUILTIN_LOADUPS:
+ return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
+
+ case IX86_BUILTIN_STOREUPS:
+ return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
+
+ case IX86_BUILTIN_LOADHPS:
+ case IX86_BUILTIN_LOADLPS:
+ case IX86_BUILTIN_LOADHPD:
+ case IX86_BUILTIN_LOADLPD:
+ icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
+ : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
+ : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
+ : CODE_FOR_sse2_loadlpd);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+
+ op0 = force_reg (mode0, op0);
+ op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
+ if (optimize || target == 0
+ || GET_MODE (target) != tmode
+ || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_STOREHPS:
+ case IX86_BUILTIN_STORELPS:
+ icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
+ : CODE_FOR_sse_storelps);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ op1 = force_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return const0_rtx;
+
+ case IX86_BUILTIN_MOVNTPS:
+ return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
+ case IX86_BUILTIN_MOVNTQ:
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return ix86_expand_store_builtin (CODE_FOR_sse_movntv1di, arglist);
+
+ case IX86_BUILTIN_LDMXCSR:
+ op0 = expand_normal (TREE_VALUE (arglist));
+ target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
+ emit_move_insn (target, op0);
+ emit_insn (gen_sse_ldmxcsr (target));
+ return 0;
+
+ case IX86_BUILTIN_STMXCSR:
+ target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
+ emit_insn (gen_sse_stmxcsr (target));
+ return copy_to_mode_reg (SImode, target);
+
+ case IX86_BUILTIN_SHUFPS:
+ case IX86_BUILTIN_SHUFPD:
+ icode = (fcode == IX86_BUILTIN_SHUFPS
+ ? CODE_FOR_sse_shufps
+ : CODE_FOR_sse2_shufpd);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+ mode2 = insn_data[icode].operand[3].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if ((optimize && !register_operand (op1, mode1))
+ || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ {
+ /* @@@ better error message */
+ error ("mask must be an immediate");
+ return gen_reg_rtx (tmode);
+ }
+ if (optimize || target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_PSHUFW:
+ case IX86_BUILTIN_PSHUFD:
+ case IX86_BUILTIN_PSHUFHW:
+ case IX86_BUILTIN_PSHUFLW:
+ icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
+ : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
+ : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
+ : CODE_FOR_mmx_pshufw);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_mode_reg (mode1, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ {
+ /* @@@ better error message */
+ error ("mask must be an immediate");
+ return const0_rtx;
+ }
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_PSLLWI128:
+ icode = CODE_FOR_ashlv8hi3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSLLDI128:
+ icode = CODE_FOR_ashlv4si3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSLLQI128:
+ icode = CODE_FOR_ashlv2di3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSRAWI128:
+ icode = CODE_FOR_ashrv8hi3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSRADI128:
+ icode = CODE_FOR_ashrv4si3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSRLWI128:
+ icode = CODE_FOR_lshrv8hi3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSRLDI128:
+ icode = CODE_FOR_lshrv4si3;
+ goto do_pshifti;
+ case IX86_BUILTIN_PSRLQI128:
+ icode = CODE_FOR_lshrv2di3;
+ goto do_pshifti;
+ do_pshifti:
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
+ /* APPLE LOCAL begin radar 5543378 mainline candidate */
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ if (INTVAL (op1) < 0 || INTVAL (op1) > 255)
+ op1 = GEN_INT (255);
+ }
+ else
+ {
+ mode2 = insn_data[icode].operand[2].mode;
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ {
+ op1 = copy_to_reg (op1);
+ if (GET_MODE (op1) != mode2)
+ op1 = convert_to_mode (mode2, op1, 0);
+ }
+ }
+ /* APPLE LOCAL end radar 5543378 mainline candidate */
+
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_reg (op0);
+
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (!pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_PSLLW128:
+ icode = CODE_FOR_ashlv8hi3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSLLD128:
+ icode = CODE_FOR_ashlv4si3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSLLQ128:
+ icode = CODE_FOR_ashlv2di3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSRAW128:
+ icode = CODE_FOR_ashrv8hi3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSRAD128:
+ icode = CODE_FOR_ashrv4si3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSRLW128:
+ icode = CODE_FOR_lshrv8hi3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSRLD128:
+ icode = CODE_FOR_lshrv4si3;
+ goto do_pshift;
+ case IX86_BUILTIN_PSRLQ128:
+ icode = CODE_FOR_lshrv2di3;
+ goto do_pshift;
+ do_pshift:
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_reg (op0);
+
+ op1 = simplify_gen_subreg (TImode, op1, GET_MODE (op1), 0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, TImode))
+ op1 = copy_to_reg (op1);
+
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (!pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ /* APPLE LOCAL begin 5919583 */
+ case IX86_BUILTIN_PSLLDQI128:
+ case IX86_BUILTIN_PSRLDQI128:
+ case IX86_BUILTIN_PSLLDQI128_BYTESHIFT:
+ case IX86_BUILTIN_PSRLDQI128_BYTESHIFT:
+ icode = ((fcode == IX86_BUILTIN_PSLLDQI128
+ || fcode == IX86_BUILTIN_PSLLDQI128_BYTESHIFT)
+ ? CODE_FOR_sse2_ashlti3
+ : CODE_FOR_sse2_lshrti3);
+ /* APPLE LOCAL end 5919583 */
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+
+ /* APPLE LOCAL begin 591583 */
+ if (! CONST_INT_P (op1))
+ {
+ error ("shift must be an immediate");
+ return const0_rtx;
+ }
+ /* The _mm_srli_si128/_mm_slli_si128 primitives are defined with
+ a byte-shift count; inside of GCC, we prefer to specify the
+ width of a shift in bits. The original non-BYTESHIFT
+ primitives were problematic due to the "*8" in their macro
+ bodies; we have moved the "*8" here to resolve this. The
+ original builtins are still supported because many developers
+ rely upon them. */
+ if (fcode == IX86_BUILTIN_PSLLDQI128_BYTESHIFT
+ || fcode == IX86_BUILTIN_PSRLDQI128_BYTESHIFT)
+ op1 = gen_rtx_CONST_INT (SImode, INTVAL (op1) * 8);
+ /* APPLE LOCAL end 591583 */
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ {
+ op0 = copy_to_reg (op0);
+ op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
+ }
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ {
+ error ("shift must be an immediate");
+ return const0_rtx;
+ }
+ target = gen_reg_rtx (V2DImode);
+ pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0),
+ op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_FEMMS:
+ emit_insn (gen_mmx_femms ());
+ return NULL_RTX;
+
+ case IX86_BUILTIN_PAVGUSB:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
+
+ case IX86_BUILTIN_PF2ID:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
+
+ case IX86_BUILTIN_PFACC:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFADD:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFCMPEQ:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFCMPGE:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFCMPGT:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFMAX:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFMIN:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFMUL:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFRCP:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
+
+ case IX86_BUILTIN_PFRCPIT1:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFRCPIT2:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFRSQIT1:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFRSQRT:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
+
+ case IX86_BUILTIN_PFSUB:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFSUBR:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PI2FD:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
+
+ case IX86_BUILTIN_PMULHRW:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
+
+ case IX86_BUILTIN_PF2IW:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
+
+ case IX86_BUILTIN_PFNACC:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PFPNACC:
+ return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
+
+ case IX86_BUILTIN_PI2FW:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
+
+ case IX86_BUILTIN_PSWAPDSI:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
+
+ case IX86_BUILTIN_PSWAPDSF:
+ return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
+
+ case IX86_BUILTIN_SQRTSD:
+ return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
+ case IX86_BUILTIN_LOADUPD:
+ return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
+ case IX86_BUILTIN_STOREUPD:
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
+
+ case IX86_BUILTIN_MFENCE:
+ emit_insn (gen_sse2_mfence ());
+ return 0;
+ case IX86_BUILTIN_LFENCE:
+ emit_insn (gen_sse2_lfence ());
+ return 0;
+
+ case IX86_BUILTIN_CLFLUSH:
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ icode = CODE_FOR_sse2_clflush;
+ if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
+ op0 = copy_to_mode_reg (Pmode, op0);
+
+ emit_insn (gen_sse2_clflush (op0));
+ return 0;
+
+ case IX86_BUILTIN_MOVNTPD:
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
+ case IX86_BUILTIN_MOVNTDQ:
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
+ case IX86_BUILTIN_MOVNTI:
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
+
+ case IX86_BUILTIN_LOADDQU:
+ return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
+ case IX86_BUILTIN_STOREDQU:
+ return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
+
+ case IX86_BUILTIN_MONITOR:
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ if (!REG_P (op0))
+ op0 = copy_to_mode_reg (Pmode, op0);
+ if (!REG_P (op1))
+ op1 = copy_to_mode_reg (SImode, op1);
+ if (!REG_P (op2))
+ op2 = copy_to_mode_reg (SImode, op2);
+ if (!TARGET_64BIT)
+ emit_insn (gen_sse3_monitor (op0, op1, op2));
+ else
+ emit_insn (gen_sse3_monitor64 (op0, op1, op2));
+ return 0;
+
+ case IX86_BUILTIN_MWAIT:
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ if (!REG_P (op0))
+ op0 = copy_to_mode_reg (SImode, op0);
+ if (!REG_P (op1))
+ op1 = copy_to_mode_reg (SImode, op1);
+ emit_insn (gen_sse3_mwait (op0, op1));
+ return 0;
+
+ case IX86_BUILTIN_LDDQU:
+ return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
+ target, 1);
+ /* APPLE LOCAL begin mainline */
+ case IX86_BUILTIN_PALIGNR:
+ case IX86_BUILTIN_PALIGNR128:
+ if (fcode == IX86_BUILTIN_PALIGNR)
+ {
+ /* APPLE LOCAL begin 4656532 use V1DImode for _m64 */
+ icode = CODE_FOR_ssse3_palignrv1di;
+ mode = V1DImode;
+ /* APPLE LOCAL end 4656532 use V1DImode for _m64 */
+ }
+ else
+ {
+ icode = CODE_FOR_ssse3_palignrti;
+ mode = V2DImode;
+ }
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+ mode3 = insn_data[icode].operand[3].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ {
+ op0 = copy_to_reg (op0);
+ op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
+ }
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ {
+ op1 = copy_to_reg (op1);
+ op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
+ }
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
+ {
+ error ("shift must be an immediate");
+ return const0_rtx;
+ }
+ target = gen_reg_rtx (mode);
+ pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
+ op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ /* APPLE LOCAL end mainline */
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ case IX86_BUILTIN_MOVNTDQA:
+ return ix86_expand_unop_builtin (CODE_FOR_sse4_1_movntdqa, arglist,
+ target, 1);
+
+ case IX86_BUILTIN_MOVNTSD:
+ return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, exp);
+
+ case IX86_BUILTIN_MOVNTSS:
+ return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, exp);
+
+ case IX86_BUILTIN_INSERTQ:
+ case IX86_BUILTIN_EXTRQ:
+ icode = (fcode == IX86_BUILTIN_EXTRQ
+ ? CODE_FOR_sse4a_extrq
+ : CODE_FOR_sse4a_insertq);
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_mode_reg (mode1, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ op1 = copy_to_mode_reg (mode2, op1);
+ if (optimize || target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return NULL_RTX;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_EXTRQI:
+ icode = CODE_FOR_sse4a_extrqi;
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+ mode3 = insn_data[icode].operand[3].mode;
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_mode_reg (mode1, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ {
+ error ("index mask must be an immediate");
+ return gen_reg_rtx (tmode);
+ }
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
+ {
+ error ("length mask must be an immediate");
+ return gen_reg_rtx (tmode);
+ }
+ if (optimize || target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return NULL_RTX;
+ emit_insn (pat);
+ return target;
+
+ case IX86_BUILTIN_INSERTQI:
+ icode = CODE_FOR_sse4a_insertqi;
+ arg0 = TREE_VALUE (exp);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ op3 = expand_normal (arg3);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+ mode2 = insn_data[icode].operand[2].mode;
+ mode3 = insn_data[icode].operand[3].mode;
+ mode4 = insn_data[icode].operand[4].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
+ op0 = copy_to_mode_reg (mode1, op0);
+
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
+ op1 = copy_to_mode_reg (mode2, op1);
+
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
+ {
+ error ("index mask must be an immediate");
+ return gen_reg_rtx (tmode);
+ }
+ if (! (*insn_data[icode].operand[4].predicate) (op3, mode4))
+ {
+ error ("length mask must be an immediate");
+ return gen_reg_rtx (tmode);
+ }
+ if (optimize || target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
+ if (! pat)
+ return NULL_RTX;
+ emit_insn (pat);
+ return target;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ case IX86_BUILTIN_VEC_INIT_V2SI:
+ case IX86_BUILTIN_VEC_INIT_V4HI:
+ case IX86_BUILTIN_VEC_INIT_V8QI:
+ return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
+
+ case IX86_BUILTIN_VEC_EXT_V2DF:
+ case IX86_BUILTIN_VEC_EXT_V2DI:
+ case IX86_BUILTIN_VEC_EXT_V4SF:
+ case IX86_BUILTIN_VEC_EXT_V4SI:
+ case IX86_BUILTIN_VEC_EXT_V8HI:
+ case IX86_BUILTIN_VEC_EXT_V16QI:
+ case IX86_BUILTIN_VEC_EXT_V2SI:
+ case IX86_BUILTIN_VEC_EXT_V4HI:
+ return ix86_expand_vec_ext_builtin (arglist, target);
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ case IX86_BUILTIN_VEC_SET_V2DI:
+ case IX86_BUILTIN_VEC_SET_V4SF:
+ case IX86_BUILTIN_VEC_SET_V4SI:
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ case IX86_BUILTIN_VEC_SET_V8HI:
+ case IX86_BUILTIN_VEC_SET_V4HI:
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ case IX86_BUILTIN_VEC_SET_V16QI:
+ return ix86_expand_vec_set_builtin (arglist);
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ case IX86_BUILTIN_INFQ:
+ {
+ REAL_VALUE_TYPE inf;
+ rtx tmp;
+
+ real_inf (&inf);
+ tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
+
+ tmp = validize_mem (force_const_mem (mode, tmp));
+
+ if (target == 0)
+ target = gen_reg_rtx (mode);
+
+ emit_move_insn (target, tmp);
+ return target;
+ }
+
+ case IX86_BUILTIN_FABSQ:
+ return ix86_expand_unop_builtin (CODE_FOR_abstf2, arglist, target, 0);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ default:
+ break;
+ }
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ for (i = 0, d = bdesc_sse_3arg;
+ i < ARRAY_SIZE (bdesc_sse_3arg);
+ i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_sse_4_operands_builtin (d->icode,
+ arglist,
+ target);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == fcode)
+ {
+ /* Compares are treated specially. */
+ if (d->icode == CODE_FOR_sse_maskcmpv4sf3
+ || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
+ || d->icode == CODE_FOR_sse2_maskcmpv2df3
+ || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
+ return ix86_expand_sse_compare (d, arglist, target);
+
+ return ix86_expand_binop_builtin (d->icode, arglist, target);
+ }
+
+ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
+
+ for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_sse_comi (d, arglist, target);
+
+ /* APPLE LOCAL begin 4299257 */
+ for (i = 0, d = bdesc_ucomi; i < ARRAY_SIZE (bdesc_ucomi); i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_sse_ucomi (d, arglist, target);
+ /* APPLE LOCAL end 4299257 */
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ for (i = 0, d = bdesc_ptest; i < ARRAY_SIZE (bdesc_ptest); i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_sse_ptest (d, arglist, target);
+
+ for (i = 0, d = bdesc_crc32; i < ARRAY_SIZE (bdesc_crc32); i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_crc32 (d->icode, arglist, target);
+
+ for (i = 0, d = bdesc_pcmpestr;
+ i < ARRAY_SIZE (bdesc_pcmpestr);
+ i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_sse_pcmpestr (d, arglist, target);
+
+ for (i = 0, d = bdesc_pcmpistr;
+ i < ARRAY_SIZE (bdesc_pcmpistr);
+ i++, d++)
+ if (d->code == fcode)
+ return ix86_expand_sse_pcmpistr (d, arglist, target);
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+
+ gcc_unreachable ();
+}
+
+/* Store OPERAND to the memory after reload is completed. This means
+ that we can't easily use assign_stack_local. */
+rtx
+ix86_force_to_memory (enum machine_mode mode, rtx operand)
+{
+ rtx result;
+
+ gcc_assert (reload_completed);
+ if (TARGET_RED_ZONE)
+ {
+ result = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (-RED_ZONE_SIZE)));
+ emit_move_insn (result, operand);
+ }
+ else if (!TARGET_RED_ZONE && TARGET_64BIT)
+ {
+ switch (mode)
+ {
+ case HImode:
+ case SImode:
+ operand = gen_lowpart (DImode, operand);
+ /* FALLTHRU */
+ case DImode:
+ emit_insn (
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (DImode,
+ gen_rtx_PRE_DEC (DImode,
+ stack_pointer_rtx)),
+ operand));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ result = gen_rtx_MEM (mode, stack_pointer_rtx);
+ }
+ else
+ {
+ switch (mode)
+ {
+ case DImode:
+ {
+ rtx operands[2];
+ split_di (&operand, 1, operands, operands + 1);
+ emit_insn (
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (SImode,
+ gen_rtx_PRE_DEC (Pmode,
+ stack_pointer_rtx)),
+ operands[1]));
+ emit_insn (
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (SImode,
+ gen_rtx_PRE_DEC (Pmode,
+ stack_pointer_rtx)),
+ operands[0]));
+ }
+ break;
+ case HImode:
+ /* Store HImodes as SImodes. */
+ operand = gen_lowpart (SImode, operand);
+ /* FALLTHRU */
+ case SImode:
+ emit_insn (
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (GET_MODE (operand),
+ gen_rtx_PRE_DEC (SImode,
+ stack_pointer_rtx)),
+ operand));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ result = gen_rtx_MEM (mode, stack_pointer_rtx);
+ }
+ return result;
+}
+
+/* Free operand from the memory. */
+void
+ix86_free_from_memory (enum machine_mode mode)
+{
+ if (!TARGET_RED_ZONE)
+ {
+ int size;
+
+ if (mode == DImode || TARGET_64BIT)
+ size = 8;
+ else
+ size = 4;
+ /* Use LEA to deallocate stack space. In peephole2 it will be converted
+ to pop or add instruction if registers are available. */
+ emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (size))));
+ }
+}
+
+/* Put float CONST_DOUBLE in the constant pool instead of fp regs.
+ QImode must go into class Q_REGS.
+ Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
+ movdf to do mem-to-mem moves through integer regs. */
+enum reg_class
+ix86_preferred_reload_class (rtx x, enum reg_class class)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ /* We're only allowed to return a subclass of CLASS. Many of the
+ following checks fail for NO_REGS, so eliminate that early. */
+ if (class == NO_REGS)
+ return NO_REGS;
+
+ /* All classes can load zeros. */
+ if (x == CONST0_RTX (mode))
+ return class;
+
+ /* Force constants into memory if we are loading a (nonzero) constant into
+ an MMX or SSE register. This is because there are no MMX/SSE instructions
+ to load from a constant. */
+ if (CONSTANT_P (x)
+ && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
+ return NO_REGS;
+
+ /* APPLE LOCAL begin */
+ /* MERGE FIXME - ensure that 3501055 is fixed. */
+ /* MERGE FIXME - ensure that 4206991 is fixed. */
+ /* APPLE LOCAL end */
+ /* Prefer SSE regs only, if we can use them for math. */
+ if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
+ return SSE_CLASS_P (class) ? class : NO_REGS;
+
+ /* Floating-point constants need more complex checks. */
+ if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
+ {
+ /* General regs can load everything. */
+ if (reg_class_subset_p (class, GENERAL_REGS))
+ return class;
+
+ /* Floats can load 0 and 1 plus some others. Note that we eliminated
+ zero above. We only want to wind up preferring 80387 registers if
+ we plan on doing computation with them. */
+ if (TARGET_80387
+ && standard_80387_constant_p (x))
+ {
+ /* Limit class to non-sse. */
+ if (class == FLOAT_SSE_REGS)
+ return FLOAT_REGS;
+ if (class == FP_TOP_SSE_REGS)
+ return FP_TOP_REG;
+ if (class == FP_SECOND_SSE_REGS)
+ return FP_SECOND_REG;
+ if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
+ return class;
+ }
+
+ return NO_REGS;
+ }
+
+ /* Generally when we see PLUS here, it's the function invariant
+ (plus soft-fp const_int). Which can only be computed into general
+ regs. */
+ if (GET_CODE (x) == PLUS)
+ return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
+
+ /* QImode constants are easy to load, but non-constant QImode data
+ must go into Q_REGS. */
+ if (GET_MODE (x) == QImode && !CONSTANT_P (x))
+ {
+ if (reg_class_subset_p (class, Q_REGS))
+ return class;
+ if (reg_class_subset_p (Q_REGS, class))
+ return Q_REGS;
+ return NO_REGS;
+ }
+
+ return class;
+}
+
+/* Discourage putting floating-point values in SSE registers unless
+ SSE math is being used, and likewise for the 387 registers. */
+enum reg_class
+ix86_preferred_output_reload_class (rtx x, enum reg_class class)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ /* Restrict the output reload class to the register bank that we are doing
+ math on. If we would like not to return a subset of CLASS, reject this
+ alternative: if reload cannot do this, it will still use its choice. */
+ mode = GET_MODE (x);
+ if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
+ return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
+
+ if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
+ {
+ if (class == FP_TOP_SSE_REGS)
+ return FP_TOP_REG;
+ else if (class == FP_SECOND_SSE_REGS)
+ return FP_SECOND_REG;
+ else
+ return FLOAT_CLASS_P (class) ? class : NO_REGS;
+ }
+
+ return class;
+}
+
+/* If we are copying between general and FP registers, we need a memory
+ location. The same is true for SSE and MMX registers.
+
+ The macro can't work reliably when one of the CLASSES is class containing
+ registers from multiple units (SSE, MMX, integer). We avoid this by never
+ combining those units in single alternative in the machine description.
+ Ensure that this constraint holds to avoid unexpected surprises.
+
+ When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
+ enforce these sanity checks. */
+
+int
+ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
+ enum machine_mode mode, int strict)
+{
+ if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
+ || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
+ || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
+ || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
+ || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
+ || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
+ {
+ gcc_assert (!strict);
+ return true;
+ }
+
+ if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
+ return true;
+
+ /* ??? This is a lie. We do have moves between mmx/general, and for
+ mmx/sse2. But by saying we need secondary memory we discourage the
+ register allocator from using the mmx registers unless needed. */
+ if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
+ return true;
+
+ if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
+ {
+ /* SSE1 doesn't have any direct moves from other classes. */
+ if (!TARGET_SSE2)
+ return true;
+
+ /* If the target says that inter-unit moves are more expensive
+ than moving through memory, then don't generate them. */
+ if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
+ return true;
+
+ /* Between SSE and general, we have moves no larger than word size. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return true;
+
+ /* ??? For the cost of one register reformat penalty, we could use
+ the same instructions to move SFmode and DFmode data, but the
+ relevant move patterns don't support those alternatives. */
+ if (mode == SFmode || mode == DFmode)
+ return true;
+ }
+
+ return false;
+}
+
+/* Return true if the registers in CLASS cannot represent the change from
+ modes FROM to TO. */
+
+bool
+ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
+ enum reg_class class)
+{
+ if (from == to)
+ return false;
+
+ /* x87 registers can't do subreg at all, as all values are reformatted
+ to extended precision. */
+ if (MAYBE_FLOAT_CLASS_P (class))
+ return true;
+
+ if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
+ {
+ /* Vector registers do not support QI or HImode loads. If we don't
+ disallow a change to these modes, reload will assume it's ok to
+ drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
+ the vec_dupv4hi pattern. */
+ if (GET_MODE_SIZE (from) < 4)
+ return true;
+
+ /* Vector registers do not support subreg with nonzero offsets, which
+ are otherwise valid for integer registers. Since we can't see
+ whether we have a nonzero offset from here, prohibit all
+ nonparadoxical subregs changing size. */
+ if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
+ return true;
+ }
+
+ return false;
+}
+
+/* Return the cost of moving data from a register in class CLASS1 to
+ one in class CLASS2.
+
+ It is not required that the cost always equal 2 when FROM is the same as TO;
+ on some machines it is expensive to move between registers if they are not
+ general registers. */
+
+int
+ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
+ enum reg_class class2)
+{
+ /* In case we require secondary memory, compute cost of the store followed
+ by load. In order to avoid bad register allocation choices, we need
+ for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
+
+ if (ix86_secondary_memory_needed (class1, class2, mode, 0))
+ {
+ int cost = 1;
+
+ cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
+ MEMORY_MOVE_COST (mode, class1, 1));
+ cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
+ MEMORY_MOVE_COST (mode, class2, 1));
+
+ /* In case of copying from general_purpose_register we may emit multiple
+ stores followed by single load causing memory size mismatch stall.
+ Count this as arbitrarily high cost of 20. */
+ if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
+ cost += 20;
+
+ /* In the case of FP/MMX moves, the registers actually overlap, and we
+ have to switch modes in order to treat them differently. */
+ if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
+ || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
+ cost += 20;
+
+ return cost;
+ }
+
+ /* Moves between SSE/MMX and integer unit are expensive. */
+ if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
+ || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
+ return ix86_cost->mmxsse_to_integer;
+ if (MAYBE_FLOAT_CLASS_P (class1))
+ return ix86_cost->fp_move;
+ if (MAYBE_SSE_CLASS_P (class1))
+ return ix86_cost->sse_move;
+ if (MAYBE_MMX_CLASS_P (class1))
+ return ix86_cost->mmx_move;
+ return 2;
+}
+
+/* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
+
+bool
+ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* Flags and only flags can only hold CCmode values. */
+ if (CC_REGNO_P (regno))
+ return GET_MODE_CLASS (mode) == MODE_CC;
+ if (GET_MODE_CLASS (mode) == MODE_CC
+ || GET_MODE_CLASS (mode) == MODE_RANDOM
+ || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
+ return 0;
+ if (FP_REGNO_P (regno))
+ return VALID_FP_MODE_P (mode);
+ if (SSE_REGNO_P (regno))
+ {
+ /* We implement the move patterns for all vector modes into and
+ out of SSE registers, even when no operation instructions
+ are available. */
+ return (VALID_SSE_REG_MODE (mode)
+ || VALID_SSE2_REG_MODE (mode)
+ || VALID_MMX_REG_MODE (mode)
+ || VALID_MMX_REG_MODE_3DNOW (mode));
+ }
+ if (MMX_REGNO_P (regno))
+ {
+ /* We implement the move patterns for 3DNOW modes even in MMX mode,
+ so if the register is available at all, then we can move data of
+ the given mode into or out of it. */
+ return (VALID_MMX_REG_MODE (mode)
+ || VALID_MMX_REG_MODE_3DNOW (mode));
+ }
+
+ if (mode == QImode)
+ {
+ /* Take care for QImode values - they can be in non-QI regs,
+ but then they do cause partial register stalls. */
+ if (regno < 4 || TARGET_64BIT)
+ return 1;
+ if (!TARGET_PARTIAL_REG_STALL)
+ return 1;
+ return reload_in_progress || reload_completed;
+ }
+ /* We handle both integer and floats in the general purpose registers. */
+ else if (VALID_INT_MODE_P (mode))
+ return 1;
+ else if (VALID_FP_MODE_P (mode))
+ return 1;
+ /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
+ on to use that value in smaller contexts, this can easily force a
+ pseudo to be allocated to GENERAL_REGS. Since this is no worse than
+ supporting DImode, allow it. */
+ else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
+ return 1;
+
+ return 0;
+}
+
+/* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
+ tieable integer mode. */
+
+static bool
+ix86_tieable_integer_mode_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case HImode:
+ case SImode:
+ return true;
+
+ case QImode:
+ return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
+
+ case DImode:
+ /* APPLE LOCAL 5695218 convert int to logical bool */
+ return !!TARGET_64BIT;
+
+ default:
+ return false;
+ }
+}
+
+/* Return true if MODE1 is accessible in a register that can hold MODE2
+ without copying. That is, all register classes that can hold MODE2
+ can also hold MODE1. */
+
+bool
+ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
+{
+ if (mode1 == mode2)
+ return true;
+
+ if (ix86_tieable_integer_mode_p (mode1)
+ && ix86_tieable_integer_mode_p (mode2))
+ return true;
+
+ /* MODE2 being XFmode implies fp stack or general regs, which means we
+ can tie any smaller floating point modes to it. Note that we do not
+ tie this with TFmode. */
+ if (mode2 == XFmode)
+ return mode1 == SFmode || mode1 == DFmode;
+
+ /* MODE2 being DFmode implies fp stack, general or sse regs, which means
+ that we can tie it with SFmode. */
+ if (mode2 == DFmode)
+ return mode1 == SFmode;
+
+ /* If MODE2 is only appropriate for an SSE register, then tie with
+ any other mode acceptable to SSE registers. */
+ if (GET_MODE_SIZE (mode2) >= 8
+ && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
+ return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
+
+ /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
+ with any other mode acceptable to MMX registers. */
+ if (GET_MODE_SIZE (mode2) == 8
+ && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
+ return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
+
+ return false;
+}
+
+/* Return the cost of moving data of mode M between a
+ register and memory. A value of 2 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ If moving between registers and memory is more expensive than
+ between two registers, you should define this macro to express the
+ relative cost.
+
+ Model also increased moving costs of QImode registers in non
+ Q_REGS classes.
+ */
+int
+ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
+{
+ if (FLOAT_CLASS_P (class))
+ {
+ int index;
+ switch (mode)
+ {
+ case SFmode:
+ index = 0;
+ break;
+ case DFmode:
+ index = 1;
+ break;
+ case XFmode:
+ index = 2;
+ break;
+ default:
+ return 100;
+ }
+ return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
+ }
+ if (SSE_CLASS_P (class))
+ {
+ int index;
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 4:
+ index = 0;
+ break;
+ case 8:
+ index = 1;
+ break;
+ case 16:
+ index = 2;
+ break;
+ default:
+ return 100;
+ }
+ return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
+ }
+ if (MMX_CLASS_P (class))
+ {
+ int index;
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 4:
+ index = 0;
+ break;
+ case 8:
+ index = 1;
+ break;
+ default:
+ return 100;
+ }
+ return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
+ }
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 1:
+ if (in)
+ return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
+ : ix86_cost->movzbl_load);
+ else
+ return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
+ : ix86_cost->int_store[0] + 4);
+ break;
+ case 2:
+ return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
+ default:
+ /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
+ if (mode == TFmode)
+ mode = XFmode;
+ return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
+ * (((int) GET_MODE_SIZE (mode)
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
+ }
+}
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
+ *total = 3;
+ else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
+ *total = 2;
+ else if (flag_pic && SYMBOLIC_CONST (x)
+ && (!TARGET_64BIT
+ || (!GET_CODE (x) != LABEL_REF
+ && (GET_CODE (x) != SYMBOL_REF
+ || !SYMBOL_REF_LOCAL_P (x)))))
+ *total = 1;
+ else
+ *total = 0;
+ return true;
+
+ case CONST_DOUBLE:
+ if (mode == VOIDmode)
+ *total = 0;
+ else
+ switch (standard_80387_constant_p (x))
+ {
+ case 1: /* 0.0 */
+ *total = 1;
+ break;
+ default: /* Other constants */
+ *total = 2;
+ break;
+ case 0:
+ case -1:
+ /* Start with (MEM (SYMBOL_REF)), since that's where
+ it'll probably end up. Add a penalty for size. */
+ *total = (COSTS_N_INSNS (1)
+ + (flag_pic != 0 && !TARGET_64BIT)
+ + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
+ break;
+ }
+ return true;
+
+ case ZERO_EXTEND:
+ /* The zero extensions is often completely free on x86_64, so make
+ it as cheap as possible. */
+ if (TARGET_64BIT && mode == DImode
+ && GET_MODE (XEXP (x, 0)) == SImode)
+ *total = 1;
+ else if (TARGET_ZERO_EXTEND_WITH_AND)
+ *total = ix86_cost->add;
+ else
+ *total = ix86_cost->movzx;
+ return false;
+
+ case SIGN_EXTEND:
+ *total = ix86_cost->movsx;
+ return false;
+
+ case ASHIFT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
+ {
+ HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
+ if (value == 1)
+ {
+ *total = ix86_cost->add;
+ return false;
+ }
+ if ((value == 2 || value == 3)
+ && ix86_cost->lea <= ix86_cost->shift_const)
+ {
+ *total = ix86_cost->lea;
+ return false;
+ }
+ }
+ /* FALLTHRU */
+
+ case ROTATE:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATERT:
+ if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
+ {
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ if (INTVAL (XEXP (x, 1)) > 32)
+ *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
+ else
+ *total = ix86_cost->shift_const * 2;
+ }
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) == AND)
+ *total = ix86_cost->shift_var * 2;
+ else
+ *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
+ }
+ }
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ *total = ix86_cost->shift_const;
+ else
+ *total = ix86_cost->shift_var;
+ }
+ return false;
+
+ case MULT:
+ if (FLOAT_MODE_P (mode))
+ {
+ *total = ix86_cost->fmul;
+ return false;
+ }
+ else
+ {
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ int nbits;
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
+ for (nbits = 0; value != 0; value &= value - 1)
+ nbits++;
+ }
+ else
+ /* This is arbitrary. */
+ nbits = 7;
+
+ /* Compute costs correctly for widening multiplication. */
+ if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
+ && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
+ == GET_MODE_SIZE (mode))
+ {
+ int is_mulwiden = 0;
+ enum machine_mode inner_mode = GET_MODE (op0);
+
+ if (GET_CODE (op0) == GET_CODE (op1))
+ is_mulwiden = 1, op1 = XEXP (op1, 0);
+ else if (GET_CODE (op1) == CONST_INT)
+ {
+ if (GET_CODE (op0) == SIGN_EXTEND)
+ is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
+ == INTVAL (op1);
+ else
+ is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
+ }
+
+ if (is_mulwiden)
+ op0 = XEXP (op0, 0), mode = GET_MODE (op0);
+ }
+
+ *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
+ + nbits * ix86_cost->mult_bit
+ + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
+
+ return true;
+ }
+
+ case DIV:
+ case UDIV:
+ case MOD:
+ case UMOD:
+ if (FLOAT_MODE_P (mode))
+ *total = ix86_cost->fdiv;
+ else
+ *total = ix86_cost->divide[MODE_INDEX (mode)];
+ return false;
+
+ case PLUS:
+ if (FLOAT_MODE_P (mode))
+ *total = ix86_cost->fadd;
+ else if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
+ {
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
+ if (val == 2 || val == 4 || val == 8)
+ {
+ *total = ix86_cost->lea;
+ *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
+ *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
+ outer_code);
+ *total += rtx_cost (XEXP (x, 1), outer_code);
+ return true;
+ }
+ }
+ else if (GET_CODE (XEXP (x, 0)) == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
+ if (val == 2 || val == 4 || val == 8)
+ {
+ *total = ix86_cost->lea;
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
+ *total += rtx_cost (XEXP (x, 1), outer_code);
+ return true;
+ }
+ }
+ else if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ *total = ix86_cost->lea;
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
+ *total += rtx_cost (XEXP (x, 1), outer_code);
+ return true;
+ }
+ }
+ /* FALLTHRU */
+
+ case MINUS:
+ if (FLOAT_MODE_P (mode))
+ {
+ *total = ix86_cost->fadd;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case AND:
+ case IOR:
+ case XOR:
+ if (!TARGET_64BIT && mode == DImode)
+ {
+ *total = (ix86_cost->add * 2
+ + (rtx_cost (XEXP (x, 0), outer_code)
+ << (GET_MODE (XEXP (x, 0)) != DImode))
+ + (rtx_cost (XEXP (x, 1), outer_code)
+ << (GET_MODE (XEXP (x, 1)) != DImode)));
+ return true;
+ }
+ /* FALLTHRU */
+
+ case NEG:
+ if (FLOAT_MODE_P (mode))
+ {
+ *total = ix86_cost->fchs;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case NOT:
+ if (!TARGET_64BIT && mode == DImode)
+ *total = ix86_cost->add * 2;
+ else
+ *total = ix86_cost->add;
+ return false;
+
+ case COMPARE:
+ if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
+ && XEXP (XEXP (x, 0), 1) == const1_rtx
+ && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
+ && XEXP (x, 1) == const0_rtx)
+ {
+ /* This kind of construct is implemented using test[bwl].
+ Treat it as if we had an AND. */
+ *total = (ix86_cost->add
+ + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
+ + rtx_cost (const1_rtx, outer_code));
+ return true;
+ }
+ return false;
+
+ case FLOAT_EXTEND:
+ if (!TARGET_SSE_MATH
+ || mode == XFmode
+ || (mode == DFmode && !TARGET_SSE2))
+ /* For standard 80387 constants, raise the cost to prevent
+ compress_float_constant() to generate load from memory. */
+ switch (standard_80387_constant_p (XEXP (x, 0)))
+ {
+ case -1:
+ case 0:
+ *total = 0;
+ break;
+ case 1: /* 0.0 */
+ *total = 1;
+ break;
+ default:
+ *total = (x86_ext_80387_constants & TUNEMASK
+ || optimize_size
+ ? 1 : 0);
+ }
+ return false;
+
+ case ABS:
+ if (FLOAT_MODE_P (mode))
+ *total = ix86_cost->fabs;
+ return false;
+
+ case SQRT:
+ if (FLOAT_MODE_P (mode))
+ *total = ix86_cost->fsqrt;
+ return false;
+
+ case UNSPEC:
+ if (XINT (x, 1) == UNSPEC_TP)
+ *total = 0;
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+#if TARGET_MACHO
+
+static int current_machopic_label_num;
+
+/* Given a symbol name and its associated stub, write out the
+ definition of the stub. */
+
+void
+machopic_output_stub (FILE *file, const char *symb, const char *stub)
+{
+ unsigned int length;
+ char *binder_name, *symbol_name, lazy_ptr_name[32];
+ int label = ++current_machopic_label_num;
+
+ /* For 64-bit we shouldn't get here. */
+ gcc_assert (!TARGET_64BIT);
+
+ /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
+ symb = (*targetm.strip_name_encoding) (symb);
+
+ length = strlen (stub);
+ binder_name = alloca (length + 32);
+ GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
+
+ length = strlen (symb);
+ symbol_name = alloca (length + 32);
+ GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
+
+ sprintf (lazy_ptr_name, "L%d$lz", label);
+
+ /* APPLE LOCAL begin deep branch prediction pic-base */
+ /* APPLE LOCAL begin AT&T-style stub 4164563 */
+ /* Choose one of four possible sections for this stub. */
+ if (MACHOPIC_ATT_STUB)
+ switch_to_section (darwin_sections[machopic_picsymbol_stub3_section]);
+ else if (MACHOPIC_PURE)
+ /* APPLE LOCAL end AT&T-style stub 4164563 */
+ {
+ if (TARGET_DEEP_BRANCH_PREDICTION)
+ switch_to_section (darwin_sections[machopic_picsymbol_stub2_section]);
+ else
+ switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
+ }
+ else
+ /* APPLE LOCAL end deep branch prediction pic-base */
+ switch_to_section (darwin_sections[machopic_symbol_stub_section]);
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ /* APPLE LOCAL begin use %ecx in stubs 4146993 */
+ /* APPLE LOCAL begin deep branch prediction pic-base */
+ /* APPLE LOCAL begin AT&T-style stub 4164563 */
+ if (MACHOPIC_ATT_STUB)
+ {
+ fprintf (file, "\thlt ; hlt ; hlt ; hlt ; hlt\n");
+ }
+ else if (MACHOPIC_PURE)
+ /* APPLE LOCAL end AT&T-style stub 4164563 */
+ {
+ /* PIC stub. */
+ if (TARGET_DEEP_BRANCH_PREDICTION)
+ {
+ /* 25-byte PIC stub using "CALL get_pc_thunk". */
+ rtx tmp = gen_rtx_REG (SImode, 2 /* ECX */);
+ output_set_got (tmp, NULL_RTX); /* "CALL ___<cpu>.get_pc_thunk.cx". */
+ fprintf (file, "LPC$%d:\tmovl\t%s-LPC$%d(%%ecx),%%ecx\n", label, lazy_ptr_name, label);
+ }
+ else
+ {
+ /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %eax". */
+ fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%ecx\n", label, label);
+ fprintf (file, "\tmovl %s-LPC$%d(%%ecx),%%ecx\n", lazy_ptr_name, label);
+ }
+ fprintf (file, "\tjmp\t*%%ecx\n");
+ }
+ else /* 16-byte -mdynamic-no-pic stub. */
+ fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
+
+ /* APPLE LOCAL begin AT&T-style stub 4164563 */
+ /* The AT&T-style ("self-modifying") stub is not lazily bound, thus
+ it needs no stub-binding-helper. */
+ if (MACHOPIC_ATT_STUB)
+ return;
+ /* APPLE LOCAL end AT&T-style stub 4164563 */
+
+ /* The "stub_binding_helper" is a fragment that gets executed only
+ once, the first time this stub is invoked (then it becomes "dead
+ code"). It asks the dynamic linker to set the
+ lazy_symbol_pointer to point at the function we want
+ (e.g. printf) so that subsequent invocations of this stub go
+ directly to that dynamically-linked callee. Other UN*X systems
+ use similar stubs, but those are generated by the static linker
+ and never appear in assembly files. */
+ /* APPLE LOCAL end deep branch prediction pic-base */
+ fprintf (file, "%s:\n", binder_name);
+
+ /* APPLE LOCAL begin deep branch prediction pic-base * tabify insns */
+ if (MACHOPIC_PURE)
+ {
+ fprintf (file, "\tlea\t%s-%s(%%ecx),%%ecx\n", lazy_ptr_name, binder_name);
+ fprintf (file, "\tpushl\t%%ecx\n");
+ }
+ else
+ fprintf (file, "\t pushl\t$%s\n", lazy_ptr_name);
+
+ fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
+ /* APPLE LOCAL end deep branch prediction pic-base * tabify insns */
+ /* APPLE LOCAL end use %ecx in stubs 4146993 */
+
+ /* APPLE LOCAL begin deep branch prediction pic-base. */
+ /* N.B. Keep the correspondence of these
+ 'symbol_ptr/symbol_ptr2/symbol_ptr3' sections consistent with the
+ old-pic/new-pic/non-pic stubs; altering this will break
+ compatibility with existing dylibs. */
+ if (MACHOPIC_PURE)
+ {
+ /* PIC stubs. */
+ if (TARGET_DEEP_BRANCH_PREDICTION)
+ /* 25-byte PIC stub using "CALL get_pc_thunk". */
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr2_section]);
+ else
+ /* 26-byte PIC stub using inline picbase: "CALL L42 ! L42: pop %ebx". */
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
+ }
+ else
+ /* 16-byte -mdynamic-no-pic stub. */
+ switch_to_section(darwin_sections[machopic_lazy_symbol_ptr3_section]);
+
+ fprintf (file, "%s:\n", lazy_ptr_name);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+ fprintf (file, "\t.long\t%s\n", binder_name);
+}
+/* APPLE LOCAL end deep branch prediction pic-base */
+
+void
+darwin_x86_file_end (void)
+{
+ darwin_file_end ();
+ ix86_file_end ();
+}
+
+/* APPLE LOCAL begin 4457939 stack alignment mishandled */
+void
+ix86_darwin_init_expanders (void)
+{
+ /* <rdar://problem/4471596> stack alignment is not handled properly
+
+ Please remove this entire function when addressing this
+ Radar. Please be sure to delete the definition of INIT_EXPANDERS
+ in i386/darwin.h as well. */
+ /* Darwin/x86_32 stack pointer will be 16-byte aligned at every
+ CALL, but the frame pointer, when used, will be 8-bytes offset
+ from a 16-byte alignment (the size of the return address and the
+ saved frame pointer). */
+ if (cfun && cfun->emit
+ && cfun->emit->regno_pointer_align)
+ {
+ REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY;
+ REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = BITS_PER_WORD;
+ REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = BITS_PER_WORD;
+ REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = BITS_PER_WORD;
+ }
+}
+/* APPLE LOCAL end 4457939 stack alignment mishandled */
+#endif /* TARGET_MACHO */
+
+/* Order the registers for register allocator. */
+
+void
+x86_order_regs_for_local_alloc (void)
+{
+ int pos = 0;
+ int i;
+
+ /* First allocate the local general purpose registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (GENERAL_REGNO_P (i) && call_used_regs[i])
+ reg_alloc_order [pos++] = i;
+
+ /* Global general purpose registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (GENERAL_REGNO_P (i) && !call_used_regs[i])
+ reg_alloc_order [pos++] = i;
+
+ /* x87 registers come first in case we are doing FP math
+ using them. */
+ if (!TARGET_SSE_MATH)
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ reg_alloc_order [pos++] = i;
+
+ /* SSE registers. */
+ for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
+ reg_alloc_order [pos++] = i;
+ for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
+ reg_alloc_order [pos++] = i;
+
+ /* x87 registers. */
+ if (TARGET_SSE_MATH)
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ reg_alloc_order [pos++] = i;
+
+ for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
+ reg_alloc_order [pos++] = i;
+
+ /* Initialize the rest of array as we do not allocate some registers
+ at all. */
+ while (pos < FIRST_PSEUDO_REGISTER)
+ reg_alloc_order [pos++] = 0;
+}
+
+/* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+ix86_handle_struct_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ tree *type = NULL;
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ type = &TREE_TYPE (*node);
+ }
+ else
+ type = node;
+
+ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
+ || TREE_CODE (*type) == UNION_TYPE)))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ else if ((is_attribute_p ("ms_struct", name)
+ && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
+ || ((is_attribute_p ("gcc_struct", name)
+ && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
+ {
+ warning (OPT_Wattributes, "%qs incompatible attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static bool
+ix86_ms_bitfield_layout_p (tree record_type)
+{
+ return (TARGET_MS_BITFIELD_LAYOUT &&
+ !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
+ || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
+}
+
+/* Returns an expression indicating where the this parameter is
+ located on entry to the FUNCTION. */
+
+static rtx
+x86_this_parameter (tree function)
+{
+ tree type = TREE_TYPE (function);
+
+ if (TARGET_64BIT)
+ {
+ int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
+ return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
+ }
+
+ if (ix86_function_regparm (type, function) > 0)
+ {
+ tree parm;
+
+ parm = TYPE_ARG_TYPES (type);
+ /* Figure out whether or not the function has a variable number of
+ arguments. */
+ for (; parm; parm = TREE_CHAIN (parm))
+ if (TREE_VALUE (parm) == void_type_node)
+ break;
+ /* If not, the this parameter is in the first argument. */
+ if (parm)
+ {
+ int regno = 0;
+ if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
+ regno = 2;
+ return gen_rtx_REG (SImode, regno);
+ }
+ }
+
+ if (aggregate_value_p (TREE_TYPE (type), type))
+ return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
+ else
+ return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
+}
+
+/* Determine whether x86_output_mi_thunk can succeed. */
+
+static bool
+x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT vcall_offset, tree function)
+{
+ /* 64-bit can handle anything. */
+ if (TARGET_64BIT)
+ return true;
+
+ /* For 32-bit, everything's fine if we have one free register. */
+ if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
+ return true;
+
+ /* Need a free register for vcall_offset. */
+ if (vcall_offset)
+ return false;
+
+ /* Need a free register for GOT references. */
+ if (flag_pic && !(*targetm.binds_local_p) (function))
+ return false;
+
+ /* Otherwise ok. */
+ return true;
+}
+
+/* Output the assembler code for a thunk function. THUNK_DECL is the
+ declaration for the thunk function itself, FUNCTION is the decl for
+ the target function. DELTA is an immediate constant offset to be
+ added to THIS. If VCALL_OFFSET is nonzero, the word at
+ *(*this + vcall_offset) should be added to THIS. */
+
+static void
+x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
+ tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset, tree function)
+{
+ rtx xops[3];
+ rtx this = x86_this_parameter (function);
+ rtx this_reg, tmp;
+
+ /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
+ pull it in now and let DELTA benefit. */
+ if (REG_P (this))
+ this_reg = this;
+ else if (vcall_offset)
+ {
+ /* Put the this parameter into %eax. */
+ xops[0] = this;
+ xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
+ output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
+ }
+ else
+ this_reg = NULL_RTX;
+
+ /* Adjust the this parameter by a fixed constant. */
+ if (delta)
+ {
+ xops[0] = GEN_INT (delta);
+ xops[1] = this_reg ? this_reg : this;
+ if (TARGET_64BIT)
+ {
+ if (!x86_64_general_operand (xops[0], DImode))
+ {
+ tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
+ xops[1] = tmp;
+ output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
+ xops[0] = tmp;
+ xops[1] = this;
+ }
+ output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
+ }
+ else
+ output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
+ }
+
+ /* Adjust the this parameter by a value stored in the vtable. */
+ if (vcall_offset)
+ {
+ if (TARGET_64BIT)
+ tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
+ else
+ {
+ int tmp_regno = 2 /* ECX */;
+ if (lookup_attribute ("fastcall",
+ TYPE_ATTRIBUTES (TREE_TYPE (function))))
+ tmp_regno = 0 /* EAX */;
+ tmp = gen_rtx_REG (SImode, tmp_regno);
+ }
+
+ xops[0] = gen_rtx_MEM (Pmode, this_reg);
+ xops[1] = tmp;
+ if (TARGET_64BIT)
+ output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
+ else
+ output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
+
+ /* Adjust the this parameter. */
+ xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
+ if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
+ {
+ rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
+ xops[0] = GEN_INT (vcall_offset);
+ xops[1] = tmp2;
+ output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
+ xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
+ }
+ xops[1] = this_reg;
+ if (TARGET_64BIT)
+ output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
+ else
+ output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
+ }
+
+ /* If necessary, drop THIS back to its stack slot. */
+ if (this_reg && this_reg != this)
+ {
+ xops[0] = this_reg;
+ xops[1] = this;
+ output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
+ }
+
+ xops[0] = XEXP (DECL_RTL (function), 0);
+ if (TARGET_64BIT)
+ {
+ if (!flag_pic || (*targetm.binds_local_p) (function))
+ output_asm_insn ("jmp\t%P0", xops);
+ else
+ {
+ tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
+ tmp = gen_rtx_CONST (Pmode, tmp);
+ tmp = gen_rtx_MEM (QImode, tmp);
+ xops[0] = tmp;
+ output_asm_insn ("jmp\t%A0", xops);
+ }
+ }
+ else
+ {
+ if (!flag_pic || (*targetm.binds_local_p) (function))
+ output_asm_insn ("jmp\t%P0", xops);
+ else
+#if TARGET_MACHO
+ if (TARGET_MACHO)
+ {
+ rtx sym_ref = XEXP (DECL_RTL (function), 0);
+ /* APPLE LOCAL begin axe stubs 5571540 */
+ if (darwin_stubs)
+ sym_ref = (gen_rtx_SYMBOL_REF
+ (Pmode,
+ machopic_indirection_name (sym_ref, /*stub_p=*/true)));
+ tmp = gen_rtx_MEM (QImode, sym_ref);
+ /* APPLE LOCAL end axe stubs 5571540 */
+ xops[0] = tmp;
+ output_asm_insn ("jmp\t%0", xops);
+ }
+ else
+#endif /* TARGET_MACHO */
+ {
+ tmp = gen_rtx_REG (SImode, 2 /* ECX */);
+ output_set_got (tmp, NULL_RTX);
+
+ xops[1] = tmp;
+ output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
+ output_asm_insn ("jmp\t{*}%1", xops);
+ }
+ }
+}
+
+static void
+x86_file_start (void)
+{
+ default_file_start ();
+#if TARGET_MACHO
+ darwin_file_start ();
+#endif
+ if (X86_FILE_START_VERSION_DIRECTIVE)
+ fputs ("\t.version\t\"01.01\"\n", asm_out_file);
+ if (X86_FILE_START_FLTUSED)
+ fputs ("\t.global\t__fltused\n", asm_out_file);
+ if (ix86_asm_dialect == ASM_INTEL)
+ fputs ("\t.intel_syntax\n", asm_out_file);
+}
+
+int
+x86_field_alignment (tree field, int computed)
+{
+ enum machine_mode mode;
+ tree type = TREE_TYPE (field);
+
+ if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
+ return computed;
+ /* APPLE LOCAL begin mac68k alignment */
+#if TARGET_MACHO
+ if (OPTION_ALIGN_MAC68K)
+ {
+ if (computed >= 128)
+ return computed;
+ return MIN (computed, 16);
+ }
+#endif
+ /* APPLE LOCAL end mac68k alignment */
+ mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
+ ? get_inner_array_type (type) : type);
+ if (mode == DFmode || mode == DCmode
+ || GET_MODE_CLASS (mode) == MODE_INT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ return MIN (32, computed);
+ return computed;
+}
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+void
+x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
+{
+ if (TARGET_64BIT)
+ if (flag_pic)
+ {
+#ifndef NO_PROFILE_COUNTERS
+ fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
+#endif
+ fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
+ }
+ else
+ {
+#ifndef NO_PROFILE_COUNTERS
+ fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
+#endif
+ fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
+ }
+ else if (flag_pic)
+ {
+#ifndef NO_PROFILE_COUNTERS
+ fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
+ LPREFIX, labelno, PROFILE_COUNT_REGISTER);
+#endif
+ fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
+ }
+ else
+ {
+#ifndef NO_PROFILE_COUNTERS
+ fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
+ PROFILE_COUNT_REGISTER);
+#endif
+ fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
+ }
+}
+
+/* We don't have exact information about the insn sizes, but we may assume
+ quite safely that we are informed about all 1 byte insns and memory
+ address sizes. This is enough to eliminate unnecessary padding in
+ 99% of cases. */
+
+static int
+min_insn_size (rtx insn)
+{
+ int l = 0;
+
+ if (!INSN_P (insn) || !active_insn_p (insn))
+ return 0;
+
+ /* Discard alignments we've emit and jump instructions. */
+ if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
+ && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
+ return 0;
+ if (GET_CODE (insn) == JUMP_INSN
+ && (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
+ return 0;
+
+ /* Important case - calls are always 5 bytes.
+ It is common to have many calls in the row. */
+ if (GET_CODE (insn) == CALL_INSN
+ && symbolic_reference_mentioned_p (PATTERN (insn))
+ && !SIBLING_CALL_P (insn))
+ return 5;
+ if (get_attr_length (insn) <= 1)
+ return 1;
+
+ /* For normal instructions we may rely on the sizes of addresses
+ and the presence of symbol to require 4 bytes of encoding.
+ This is not the case for jumps where references are PC relative. */
+ if (GET_CODE (insn) != JUMP_INSN)
+ {
+ l = get_attr_length_address (insn);
+ if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
+ l = 4;
+ }
+ if (l)
+ return 1+l;
+ else
+ return 2;
+}
+
+/* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
+ window. */
+
+static void
+ix86_avoid_jump_misspredicts (void)
+{
+ rtx insn, start = get_insns ();
+ int nbytes = 0, njumps = 0;
+ int isjump = 0;
+
+ /* Look for all minimal intervals of instructions containing 4 jumps.
+ The intervals are bounded by START and INSN. NBYTES is the total
+ size of instructions in the interval including INSN and not including
+ START. When the NBYTES is smaller than 16 bytes, it is possible
+ that the end of START and INSN ends up in the same 16byte page.
+
+ The smallest offset in the page INSN can start is the case where START
+ ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
+ We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
+ */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+
+ nbytes += min_insn_size (insn);
+ if (dump_file)
+ fprintf(dump_file, "Insn %i estimated to %i bytes\n",
+ INSN_UID (insn), min_insn_size (insn));
+ if ((GET_CODE (insn) == JUMP_INSN
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
+ || GET_CODE (insn) == CALL_INSN)
+ njumps++;
+ else
+ continue;
+
+ while (njumps > 3)
+ {
+ start = NEXT_INSN (start);
+ if ((GET_CODE (start) == JUMP_INSN
+ && GET_CODE (PATTERN (start)) != ADDR_VEC
+ && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
+ || GET_CODE (start) == CALL_INSN)
+ njumps--, isjump = 1;
+ else
+ isjump = 0;
+ nbytes -= min_insn_size (start);
+ }
+ gcc_assert (njumps >= 0);
+ if (dump_file)
+ fprintf (dump_file, "Interval %i to %i has %i bytes\n",
+ INSN_UID (start), INSN_UID (insn), nbytes);
+
+ if (njumps == 3 && isjump && nbytes < 16)
+ {
+ int padsize = 15 - nbytes + min_insn_size (insn);
+
+ if (dump_file)
+ fprintf (dump_file, "Padding insn %i by %i bytes!\n",
+ INSN_UID (insn), padsize);
+ emit_insn_before (gen_align (GEN_INT (padsize)), insn);
+ }
+ }
+}
+
+/* AMD Athlon works faster
+ when RET is not destination of conditional jump or directly preceded
+ by other jump instruction. We avoid the penalty by inserting NOP just
+ before the RET instructions in such cases. */
+static void
+ix86_pad_returns (void)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ {
+ basic_block bb = e->src;
+ rtx ret = BB_END (bb);
+ rtx prev;
+ bool replace = false;
+
+ if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
+ || !maybe_hot_bb_p (bb))
+ continue;
+ for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
+ if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
+ break;
+ if (prev && GET_CODE (prev) == CODE_LABEL)
+ {
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (EDGE_FREQUENCY (e) && e->src->index >= 0
+ && !(e->flags & EDGE_FALLTHRU))
+ replace = true;
+ }
+ if (!replace)
+ {
+ prev = prev_active_insn (ret);
+ if (prev
+ && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
+ || GET_CODE (prev) == CALL_INSN))
+ replace = true;
+ /* Empty functions get branch mispredict even when the jump destination
+ is not visible to us. */
+ if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
+ replace = true;
+ }
+ if (replace)
+ {
+ emit_insn_before (gen_return_internal_long (), ret);
+ delete_insn (ret);
+ }
+ }
+}
+
+/* Implement machine specific optimizations. We implement padding of returns
+ for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
+static void
+ix86_reorg (void)
+{
+ if (TARGET_PAD_RETURNS && optimize && !optimize_size)
+ ix86_pad_returns ();
+ if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
+ ix86_avoid_jump_misspredicts ();
+}
+
+/* Return nonzero when QImode register that must be represented via REX prefix
+ is used. */
+bool
+x86_extended_QIreg_mentioned_p (rtx insn)
+{
+ int i;
+ extract_insn_cached (insn);
+ for (i = 0; i < recog_data.n_operands; i++)
+ if (REG_P (recog_data.operand[i])
+ && REGNO (recog_data.operand[i]) >= 4)
+ return true;
+ return false;
+}
+
+/* Return nonzero when P points to register encoded via REX prefix.
+ Called via for_each_rtx. */
+static int
+extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
+{
+ unsigned int regno;
+ if (!REG_P (*p))
+ return 0;
+ regno = REGNO (*p);
+ return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
+}
+
+/* Return true when INSN mentions register that must be encoded using REX
+ prefix. */
+bool
+x86_extended_reg_mentioned_p (rtx insn)
+{
+ return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
+}
+
+/* Generate an unsigned DImode/SImode to FP conversion. This is the same code
+ optabs would emit if we didn't have TFmode patterns. */
+
+void
+x86_emit_floatuns (rtx operands[2])
+{
+ rtx neglab, donelab, i0, i1, f0, in, out;
+ enum machine_mode mode, inmode;
+
+ inmode = GET_MODE (operands[1]);
+ /* APPLE LOCAL begin 4176531 4424891 */
+ mode = GET_MODE (operands[0]);
+ if (!TARGET_64BIT && mode == DFmode && !optimize_size)
+ {
+ switch (inmode)
+ {
+ case SImode:
+ ix86_expand_convert_uns_SI2DF_sse (operands);
+ break;
+ case DImode:
+ ix86_expand_convert_uns_DI2DF_sse (operands);
+ break;
+ default:
+ abort ();
+ break;
+ }
+ return;
+ }
+ /* APPLE LOCAL end 4176531 4424891 */
+
+ out = operands[0];
+ in = force_reg (inmode, operands[1]);
+ /* APPLE LOCAL begin one line deletion 4424891 */
+ /* APPLE LOCAL end one line deletion 4424891 */
+ neglab = gen_label_rtx ();
+ donelab = gen_label_rtx ();
+ i1 = gen_reg_rtx (Pmode);
+ f0 = gen_reg_rtx (mode);
+
+ emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
+
+ emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
+ emit_jump_insn (gen_jump (donelab));
+ emit_barrier ();
+
+ emit_label (neglab);
+
+ i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
+ i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
+ i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
+ expand_float (f0, i0, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
+
+ emit_label (donelab);
+}
+
+/* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
+ with all elements equal to VAR. Return true if successful. */
+
+static bool
+ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
+ rtx target, rtx val)
+{
+ enum machine_mode smode, wsmode, wvmode;
+ rtx x;
+
+ switch (mode)
+ {
+ case V2SImode:
+ case V2SFmode:
+ if (!mmx_ok)
+ return false;
+ /* FALLTHRU */
+
+ case V2DFmode:
+ case V2DImode:
+ case V4SFmode:
+ case V4SImode:
+ val = force_reg (GET_MODE_INNER (mode), val);
+ x = gen_rtx_VEC_DUPLICATE (mode, val);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+ return true;
+
+ case V4HImode:
+ if (!mmx_ok)
+ return false;
+ if (TARGET_SSE || TARGET_3DNOW_A)
+ {
+ val = gen_lowpart (SImode, val);
+ x = gen_rtx_TRUNCATE (HImode, val);
+ x = gen_rtx_VEC_DUPLICATE (mode, x);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+ return true;
+ }
+ else
+ {
+ smode = HImode;
+ wsmode = SImode;
+ wvmode = V2SImode;
+ goto widen;
+ }
+
+ case V8QImode:
+ if (!mmx_ok)
+ return false;
+ smode = QImode;
+ wsmode = HImode;
+ wvmode = V4HImode;
+ goto widen;
+ case V8HImode:
+ if (TARGET_SSE2)
+ {
+ rtx tmp1, tmp2;
+ /* Extend HImode to SImode using a paradoxical SUBREG. */
+ tmp1 = gen_reg_rtx (SImode);
+ emit_move_insn (tmp1, gen_lowpart (SImode, val));
+ /* Insert the SImode value as low element of V4SImode vector. */
+ tmp2 = gen_reg_rtx (V4SImode);
+ tmp1 = gen_rtx_VEC_MERGE (V4SImode,
+ gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
+ CONST0_RTX (V4SImode),
+ const1_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
+ /* Cast the V4SImode vector back to a V8HImode vector. */
+ tmp1 = gen_reg_rtx (V8HImode);
+ emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
+ /* Duplicate the low short through the whole low SImode word. */
+ emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
+ /* Cast the V8HImode vector back to a V4SImode vector. */
+ tmp2 = gen_reg_rtx (V4SImode);
+ emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
+ /* Replicate the low element of the V4SImode vector. */
+ emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
+ /* Cast the V2SImode back to V8HImode, and store in target. */
+ emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
+ return true;
+ }
+ smode = HImode;
+ wsmode = SImode;
+ wvmode = V4SImode;
+ goto widen;
+ case V16QImode:
+ if (TARGET_SSE2)
+ {
+ rtx tmp1, tmp2;
+ /* Extend QImode to SImode using a paradoxical SUBREG. */
+ tmp1 = gen_reg_rtx (SImode);
+ emit_move_insn (tmp1, gen_lowpart (SImode, val));
+ /* Insert the SImode value as low element of V4SImode vector. */
+ tmp2 = gen_reg_rtx (V4SImode);
+ tmp1 = gen_rtx_VEC_MERGE (V4SImode,
+ gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
+ CONST0_RTX (V4SImode),
+ const1_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
+ /* Cast the V4SImode vector back to a V16QImode vector. */
+ tmp1 = gen_reg_rtx (V16QImode);
+ emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
+ /* Duplicate the low byte through the whole low SImode word. */
+ emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
+ emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
+ /* Cast the V16QImode vector back to a V4SImode vector. */
+ tmp2 = gen_reg_rtx (V4SImode);
+ emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
+ /* Replicate the low element of the V4SImode vector. */
+ emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
+ /* Cast the V2SImode back to V16QImode, and store in target. */
+ emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
+ return true;
+ }
+ smode = QImode;
+ wsmode = HImode;
+ wvmode = V8HImode;
+ goto widen;
+ widen:
+ /* Replicate the value once into the next wider mode and recurse. */
+ val = convert_modes (wsmode, smode, val, true);
+ x = expand_simple_binop (wsmode, ASHIFT, val,
+ GEN_INT (GET_MODE_BITSIZE (smode)),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
+
+ x = gen_reg_rtx (wvmode);
+ if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
+ gcc_unreachable ();
+ emit_move_insn (target, gen_lowpart (mode, x));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
+ whose ONE_VAR element is VAR, and other elements are zero. Return true
+ if successful. */
+
+static bool
+ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
+ rtx target, rtx var, int one_var)
+{
+ enum machine_mode vsimode;
+ rtx new_target;
+ rtx x, tmp;
+
+ switch (mode)
+ {
+ case V2SFmode:
+ case V2SImode:
+ if (!mmx_ok)
+ return false;
+ /* FALLTHRU */
+
+ case V2DFmode:
+ case V2DImode:
+ if (one_var != 0)
+ return false;
+ var = force_reg (GET_MODE_INNER (mode), var);
+ x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+ return true;
+
+ case V4SFmode:
+ case V4SImode:
+ if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
+ new_target = gen_reg_rtx (mode);
+ else
+ new_target = target;
+ var = force_reg (GET_MODE_INNER (mode), var);
+ x = gen_rtx_VEC_DUPLICATE (mode, var);
+ x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
+ if (one_var != 0)
+ {
+ /* We need to shuffle the value to the correct position, so
+ create a new pseudo to store the intermediate result. */
+
+ /* With SSE2, we can use the integer shuffle insns. */
+ if (mode != V4SFmode && TARGET_SSE2)
+ {
+ emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
+ GEN_INT (1),
+ GEN_INT (one_var == 1 ? 0 : 1),
+ GEN_INT (one_var == 2 ? 0 : 1),
+ GEN_INT (one_var == 3 ? 0 : 1)));
+ if (target != new_target)
+ emit_move_insn (target, new_target);
+ return true;
+ }
+
+ /* Otherwise convert the intermediate result to V4SFmode and
+ use the SSE1 shuffle instructions. */
+ if (mode != V4SFmode)
+ {
+ tmp = gen_reg_rtx (V4SFmode);
+ emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
+ }
+ else
+ tmp = new_target;
+
+ emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
+ GEN_INT (1),
+ GEN_INT (one_var == 1 ? 0 : 1),
+ GEN_INT (one_var == 2 ? 0+4 : 1+4),
+ GEN_INT (one_var == 3 ? 0+4 : 1+4)));
+
+ if (mode != V4SFmode)
+ emit_move_insn (target, gen_lowpart (V4SImode, tmp));
+ else if (tmp != target)
+ emit_move_insn (target, tmp);
+ }
+ else if (target != new_target)
+ emit_move_insn (target, new_target);
+ return true;
+
+ case V8HImode:
+ case V16QImode:
+ vsimode = V4SImode;
+ goto widen;
+ case V4HImode:
+ case V8QImode:
+ if (!mmx_ok)
+ return false;
+ vsimode = V2SImode;
+ goto widen;
+ widen:
+ if (one_var != 0)
+ return false;
+
+ /* Zero extend the variable element to SImode and recurse. */
+ var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
+
+ x = gen_reg_rtx (vsimode);
+ if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
+ var, one_var))
+ gcc_unreachable ();
+
+ emit_move_insn (target, gen_lowpart (mode, x));
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
+ consisting of the values in VALS. It is known that all elements
+ except ONE_VAR are constants. Return true if successful. */
+
+static bool
+ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
+ rtx target, rtx vals, int one_var)
+{
+ rtx var = XVECEXP (vals, 0, one_var);
+ enum machine_mode wmode;
+ rtx const_vec, x;
+
+ const_vec = copy_rtx (vals);
+ XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
+ const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
+
+ switch (mode)
+ {
+ case V2DFmode:
+ case V2DImode:
+ case V2SFmode:
+ case V2SImode:
+ /* For the two element vectors, it's just as easy to use
+ the general case. */
+ return false;
+
+ case V4SFmode:
+ case V4SImode:
+ case V8HImode:
+ case V4HImode:
+ break;
+
+ case V16QImode:
+ wmode = V8HImode;
+ goto widen;
+ case V8QImode:
+ wmode = V4HImode;
+ goto widen;
+ widen:
+ /* There's no way to set one QImode entry easily. Combine
+ the variable value with its adjacent constant value, and
+ promote to an HImode set. */
+ x = XVECEXP (vals, 0, one_var ^ 1);
+ if (one_var & 1)
+ {
+ var = convert_modes (HImode, QImode, var, true);
+ var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ x = GEN_INT (INTVAL (x) & 0xff);
+ }
+ else
+ {
+ var = convert_modes (HImode, QImode, var, true);
+ x = gen_int_mode (INTVAL (x) << 8, HImode);
+ }
+ if (x != const0_rtx)
+ var = expand_simple_binop (HImode, IOR, var, x, var,
+ 1, OPTAB_LIB_WIDEN);
+
+ x = gen_reg_rtx (wmode);
+ emit_move_insn (x, gen_lowpart (wmode, const_vec));
+ ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
+
+ emit_move_insn (target, gen_lowpart (mode, x));
+ return true;
+
+ default:
+ return false;
+ }
+
+ emit_move_insn (target, const_vec);
+ ix86_expand_vector_set (mmx_ok, target, var, one_var);
+ return true;
+}
+
+/* A subroutine of ix86_expand_vector_init. Handle the most general case:
+ all values variable, and none identical. */
+
+static void
+ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
+ rtx target, rtx vals)
+{
+ enum machine_mode half_mode = GET_MODE_INNER (mode);
+ rtx op0 = NULL, op1 = NULL;
+ bool use_vec_concat = false;
+
+ switch (mode)
+ {
+ case V2SFmode:
+ case V2SImode:
+ if (!mmx_ok && !TARGET_SSE)
+ break;
+ /* FALLTHRU */
+
+ case V2DFmode:
+ case V2DImode:
+ /* For the two element vectors, we always implement VEC_CONCAT. */
+ op0 = XVECEXP (vals, 0, 0);
+ op1 = XVECEXP (vals, 0, 1);
+ use_vec_concat = true;
+ break;
+
+ case V4SFmode:
+ half_mode = V2SFmode;
+ goto half;
+ case V4SImode:
+ half_mode = V2SImode;
+ goto half;
+ half:
+ {
+ rtvec v;
+
+ /* For V4SF and V4SI, we implement a concat of two V2 vectors.
+ Recurse to load the two halves. */
+
+ op0 = gen_reg_rtx (half_mode);
+ v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
+ ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
+
+ op1 = gen_reg_rtx (half_mode);
+ v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
+ ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
+
+ use_vec_concat = true;
+ }
+ break;
+
+ case V8HImode:
+ case V16QImode:
+ case V4HImode:
+ case V8QImode:
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (use_vec_concat)
+ {
+ if (!register_operand (op0, half_mode))
+ op0 = force_reg (half_mode, op0);
+ if (!register_operand (op1, half_mode))
+ op1 = force_reg (half_mode, op1);
+
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_VEC_CONCAT (mode, op0, op1)));
+ }
+ else
+ {
+ int i, j, n_elts, n_words, n_elt_per_word;
+ enum machine_mode inner_mode;
+ rtx words[4], shift;
+
+ inner_mode = GET_MODE_INNER (mode);
+ n_elts = GET_MODE_NUNITS (mode);
+ n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ n_elt_per_word = n_elts / n_words;
+ shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
+
+ for (i = 0; i < n_words; ++i)
+ {
+ rtx word = NULL_RTX;
+
+ for (j = 0; j < n_elt_per_word; ++j)
+ {
+ rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
+ elt = convert_modes (word_mode, inner_mode, elt, true);
+
+ if (j == 0)
+ word = elt;
+ else
+ {
+ word = expand_simple_binop (word_mode, ASHIFT, word, shift,
+ word, 1, OPTAB_LIB_WIDEN);
+ word = expand_simple_binop (word_mode, IOR, word, elt,
+ word, 1, OPTAB_LIB_WIDEN);
+ }
+ }
+
+ words[i] = word;
+ }
+
+ if (n_words == 1)
+ emit_move_insn (target, gen_lowpart (mode, words[0]));
+ else if (n_words == 2)
+ {
+ rtx tmp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
+ emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
+ emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
+ emit_move_insn (target, tmp);
+ }
+ else if (n_words == 4)
+ {
+ rtx tmp = gen_reg_rtx (V4SImode);
+ vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
+ ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
+ emit_move_insn (target, gen_lowpart (mode, tmp));
+ }
+ else
+ gcc_unreachable ();
+ }
+}
+
+/* Initialize vector TARGET via VALS. Suppress the use of MMX
+ instructions unless MMX_OK is true. */
+
+void
+ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0, one_var = -1;
+ bool all_same = true, all_const_zero = true;
+ int i;
+ rtx x;
+
+ for (i = 0; i < n_elts; ++i)
+ {
+ x = XVECEXP (vals, 0, i);
+ if (!CONSTANT_P (x))
+ n_var++, one_var = i;
+ else if (x != CONST0_RTX (inner_mode))
+ all_const_zero = false;
+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
+ }
+
+ /* Constants are best loaded from the constant pool. */
+ if (n_var == 0)
+ {
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ return;
+ }
+
+ /* If all values are identical, broadcast the value. */
+ if (all_same
+ && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
+ XVECEXP (vals, 0, 0)))
+ return;
+
+ /* Values where only one field is non-constant are best loaded from
+ the pool and overwritten via move later. */
+ if (n_var == 1)
+ {
+ if (all_const_zero
+ && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
+ XVECEXP (vals, 0, one_var),
+ one_var))
+ return;
+
+ if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
+ return;
+ }
+
+ ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
+}
+
+void
+ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ bool use_vec_merge = false;
+ rtx tmp;
+
+ switch (mode)
+ {
+ case V2SFmode:
+ case V2SImode:
+ if (mmx_ok)
+ {
+ tmp = gen_reg_rtx (GET_MODE_INNER (mode));
+ ix86_expand_vector_extract (true, tmp, target, 1 - elt);
+ if (elt == 0)
+ tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
+ else
+ tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
+ emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
+ return;
+ }
+ break;
+
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ case V2DImode:
+ use_vec_merge = TARGET_SSE4_1;
+ if (use_vec_merge)
+ break;
+
+ case V2DFmode:
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ {
+ rtx op0, op1;
+
+ /* For the two element vectors, we implement a VEC_CONCAT with
+ the extraction of the other element. */
+
+ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
+ tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
+
+ if (elt == 0)
+ op0 = val, op1 = tmp;
+ else
+ op0 = tmp, op1 = val;
+
+ tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
+ }
+ return;
+
+ case V4SFmode:
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ use_vec_merge = TARGET_SSE4_1;
+ if (use_vec_merge)
+ break;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ switch (elt)
+ {
+ case 0:
+ use_vec_merge = true;
+ break;
+
+ case 1:
+ /* tmp = target = A B C D */
+ tmp = copy_to_reg (target);
+ /* target = A A B B */
+ emit_insn (gen_sse_unpcklps (target, target, target));
+ /* target = X A B B */
+ ix86_expand_vector_set (false, target, val, 0);
+ /* target = A X C D */
+ emit_insn (gen_sse_shufps_1 (target, target, tmp,
+ GEN_INT (1), GEN_INT (0),
+ GEN_INT (2+4), GEN_INT (3+4)));
+ return;
+
+ case 2:
+ /* tmp = target = A B C D */
+ tmp = copy_to_reg (target);
+ /* tmp = X B C D */
+ ix86_expand_vector_set (false, tmp, val, 0);
+ /* target = A B X D */
+ emit_insn (gen_sse_shufps_1 (target, target, tmp,
+ GEN_INT (0), GEN_INT (1),
+ GEN_INT (0+4), GEN_INT (3+4)));
+ return;
+
+ case 3:
+ /* tmp = target = A B C D */
+ tmp = copy_to_reg (target);
+ /* tmp = X B C D */
+ ix86_expand_vector_set (false, tmp, val, 0);
+ /* target = A B X D */
+ emit_insn (gen_sse_shufps_1 (target, target, tmp,
+ GEN_INT (0), GEN_INT (1),
+ GEN_INT (2+4), GEN_INT (0+4)));
+ return;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case V4SImode:
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ use_vec_merge = TARGET_SSE4_1;
+ if (use_vec_merge)
+ break;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ /* Element 0 handled by vec_merge below. */
+ if (elt == 0)
+ {
+ use_vec_merge = true;
+ break;
+ }
+
+ if (TARGET_SSE2)
+ {
+ /* With SSE2, use integer shuffles to swap element 0 and ELT,
+ store into element 0, then shuffle them back. */
+
+ rtx order[4];
+
+ order[0] = GEN_INT (elt);
+ order[1] = const1_rtx;
+ order[2] = const2_rtx;
+ order[3] = GEN_INT (3);
+ order[elt] = const0_rtx;
+
+ emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
+ order[1], order[2], order[3]));
+
+ ix86_expand_vector_set (false, target, val, 0);
+
+ emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
+ order[1], order[2], order[3]));
+ }
+ else
+ {
+ /* For SSE1, we have to reuse the V4SF code. */
+ ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
+ gen_lowpart (SFmode, val), elt);
+ }
+ return;
+
+ case V8HImode:
+ use_vec_merge = TARGET_SSE2;
+ break;
+ case V4HImode:
+ use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
+ break;
+
+ case V16QImode:
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ use_vec_merge = TARGET_SSE4_1;
+ break;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ case V8QImode:
+ default:
+ break;
+ }
+
+ if (use_vec_merge)
+ {
+ tmp = gen_rtx_VEC_DUPLICATE (mode, val);
+ tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
+ emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
+ }
+ else
+ {
+ rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
+
+ emit_move_insn (mem, target);
+
+ tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
+ emit_move_insn (tmp, val);
+
+ emit_move_insn (target, mem);
+ }
+}
+
+void
+ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
+{
+ enum machine_mode mode = GET_MODE (vec);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ bool use_vec_extr = false;
+ rtx tmp;
+
+ switch (mode)
+ {
+ case V2SImode:
+ case V2SFmode:
+ if (!mmx_ok)
+ break;
+ /* FALLTHRU */
+
+ case V2DFmode:
+ case V2DImode:
+ use_vec_extr = true;
+ break;
+
+ case V4SFmode:
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ use_vec_extr = TARGET_SSE4_1;
+ if (use_vec_extr)
+ break;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ switch (elt)
+ {
+ case 0:
+ tmp = vec;
+ break;
+
+ case 1:
+ case 3:
+ tmp = gen_reg_rtx (mode);
+ emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
+ GEN_INT (elt), GEN_INT (elt),
+ GEN_INT (elt+4), GEN_INT (elt+4)));
+ break;
+
+ case 2:
+ tmp = gen_reg_rtx (mode);
+ emit_insn (gen_sse_unpckhps (tmp, vec, vec));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ vec = tmp;
+ use_vec_extr = true;
+ elt = 0;
+ break;
+
+ case V4SImode:
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ use_vec_extr = TARGET_SSE4_1;
+ if (use_vec_extr)
+ break;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ if (TARGET_SSE2)
+ {
+ switch (elt)
+ {
+ case 0:
+ tmp = vec;
+ break;
+
+ case 1:
+ case 3:
+ tmp = gen_reg_rtx (mode);
+ emit_insn (gen_sse2_pshufd_1 (tmp, vec,
+ GEN_INT (elt), GEN_INT (elt),
+ GEN_INT (elt), GEN_INT (elt)));
+ break;
+
+ case 2:
+ tmp = gen_reg_rtx (mode);
+ emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ vec = tmp;
+ use_vec_extr = true;
+ elt = 0;
+ }
+ else
+ {
+ /* For SSE1, we have to reuse the V4SF code. */
+ ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
+ gen_lowpart (V4SFmode, vec), elt);
+ return;
+ }
+ break;
+
+ case V8HImode:
+ use_vec_extr = TARGET_SSE2;
+ break;
+ case V4HImode:
+ use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
+ break;
+
+ case V16QImode:
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ use_vec_extr = TARGET_SSE4_1;
+ break;
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+ case V8QImode:
+ /* ??? Could extract the appropriate HImode element and shift. */
+ default:
+ break;
+ }
+
+ if (use_vec_extr)
+ {
+ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
+ tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
+
+ /* Let the rtl optimizers know about the zero extension performed. */
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ if (inner_mode == QImode || inner_mode == HImode)
+ {
+ tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
+ target = gen_lowpart (SImode, target);
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
+ }
+ else
+ {
+ rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
+
+ emit_move_insn (mem, vec);
+
+ tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
+ emit_move_insn (target, tmp);
+ }
+}
+
+/* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
+ pattern to reduce; DEST is the destination; IN is the input vector. */
+
+void
+ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
+{
+ rtx tmp1, tmp2, tmp3;
+
+ tmp1 = gen_reg_rtx (V4SFmode);
+ tmp2 = gen_reg_rtx (V4SFmode);
+ tmp3 = gen_reg_rtx (V4SFmode);
+
+ emit_insn (gen_sse_movhlps (tmp1, in, in));
+ emit_insn (fn (tmp2, tmp1, in));
+
+ emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
+ GEN_INT (1), GEN_INT (1),
+ GEN_INT (1+4), GEN_INT (1+4)));
+ emit_insn (fn (dest, tmp2, tmp3));
+}
+
+/* Target hook for scalar_mode_supported_p. */
+static bool
+ix86_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Implements target hook vector_mode_supported_p. */
+static bool
+ix86_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
+ return true;
+ if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
+ return true;
+ if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
+ return true;
+ if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
+ return true;
+ return false;
+}
+
+/* Worker function for TARGET_MD_ASM_CLOBBERS.
+
+ We do this in the new i386 backend to maintain source compatibility
+ with the old cc0-based compiler. */
+
+static tree
+ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
+ tree inputs ATTRIBUTE_UNUSED,
+ tree clobbers)
+{
+ clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
+ clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
+ clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
+ clobbers);
+ return clobbers;
+}
+
+/* Return true if this goes in small data/bss. */
+
+static bool
+ix86_in_large_data_p (tree exp)
+{
+ if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
+ return false;
+
+ /* Functions are never large data. */
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
+ if (strcmp (section, ".ldata") == 0
+ || strcmp (section, ".lbss") == 0)
+ return true;
+ return false;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+
+ /* If this is an incomplete type with size 0, then we can't put it
+ in data because it might be too big when completed. */
+ if (!size || size > ix86_section_threshold)
+ return true;
+ }
+
+ return false;
+}
+static void
+ix86_encode_section_info (tree decl, rtx rtl, int first)
+{
+ default_encode_section_info (decl, rtl, first);
+
+ if (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
+ && ix86_in_large_data_p (decl))
+ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
+}
+
+/* Worker function for REVERSE_CONDITION. */
+
+enum rtx_code
+ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
+{
+ return (mode != CCFPmode && mode != CCFPUmode
+ ? reverse_condition (code)
+ : reverse_condition_maybe_unordered (code));
+}
+
+/* Output code to perform an x87 FP register move, from OPERANDS[1]
+ to OPERANDS[0]. */
+
+const char *
+output_387_reg_move (rtx insn, rtx *operands)
+{
+ if (REG_P (operands[1])
+ && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ {
+ if (REGNO (operands[0]) == FIRST_STACK_REG)
+ return output_387_ffreep (operands, 0);
+ return "fstp\t%y0";
+ }
+ if (STACK_TOP_P (operands[0]))
+ return "fld%z1\t%y1";
+ return "fst\t%y0";
+}
+
+/* Output code to perform a conditional jump to LABEL, if C2 flag in
+ FP status register is set. */
+
+void
+ix86_emit_fp_unordered_jump (rtx label)
+{
+ rtx reg = gen_reg_rtx (HImode);
+ rtx temp;
+
+ emit_insn (gen_x86_fnstsw_1 (reg));
+
+ if (TARGET_USE_SAHF)
+ {
+ emit_insn (gen_x86_sahf_1 (reg));
+
+ temp = gen_rtx_REG (CCmode, FLAGS_REG);
+ temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
+ }
+ else
+ {
+ emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
+
+ temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
+ temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
+ }
+
+ temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx);
+ temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
+ emit_jump_insn (temp);
+}
+
+/* Output code to perform a log1p XFmode calculation. */
+
+void ix86_emit_i387_log1p (rtx op0, rtx op1)
+{
+ rtx label1 = gen_label_rtx ();
+ rtx label2 = gen_label_rtx ();
+
+ rtx tmp = gen_reg_rtx (XFmode);
+ rtx tmp2 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_absxf2 (tmp, op1));
+ emit_insn (gen_cmpxf (tmp,
+ CONST_DOUBLE_FROM_REAL_VALUE (
+ REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
+ XFmode)));
+ emit_jump_insn (gen_bge (label1));
+
+ emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
+ emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
+ emit_jump (label2);
+
+ emit_label (label1);
+ emit_move_insn (tmp, CONST1_RTX (XFmode));
+ emit_insn (gen_addxf3 (tmp, op1, tmp));
+ emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
+ emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
+
+ emit_label (label2);
+}
+
+/* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
+
+static void
+i386_solaris_elf_named_section (const char *name, unsigned int flags,
+ tree decl)
+{
+ /* With Binutils 2.15, the "@unwind" marker must be specified on
+ every occurrence of the ".eh_frame" section, not just the first
+ one. */
+ if (TARGET_64BIT
+ && strcmp (name, ".eh_frame") == 0)
+ {
+ fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
+ flags & SECTION_WRITE ? "aw" : "a");
+ return;
+ }
+ default_elf_asm_named_section (name, flags, decl);
+}
+
+/* APPLE LOCAL begin regparmandstackparm */
+
+/* Mark this fndecl as using the regparmandstackparm calling convention. */
+static void
+ix86_make_regparmandstackparmee (tree *pt)
+{
+ decl_attributes (pt,
+ tree_cons (get_identifier ("regparmandstackparmee"),
+ NULL_TREE, TYPE_ATTRIBUTES (*pt)), 0);
+}
+
+/* Lookup fndecls marked 'regparmandstackparm', retrieve their $3SSE equivalents. */
+static splay_tree ix86_darwin_regparmandstackparm_st;
+/* Cache for regparmandstackparm fntypes. */
+static splay_tree ix86_darwin_fntype_st;
+
+/* Append "$3SSE" to an ID, returning a new IDENTIFIER_NODE. */
+static tree
+ix86_darwin_regparmandstackparm_mangle_name (tree id)
+{
+ static const char *mangle_suffix = "$3SSE";
+ unsigned int mangle_length = strlen (mangle_suffix);
+ const char *name;
+ unsigned int orig_length;
+ char *buf;
+
+ if (!id)
+ return NULL_TREE;
+
+ name = IDENTIFIER_POINTER (id);
+ orig_length = strlen (name);
+ buf = alloca (orig_length + mangle_length + 1);
+
+ strcpy (buf, name);
+ strcat (buf, mangle_suffix);
+ return get_identifier (buf); /* Expecting get_identifier to reallocate the string. */
+}
+
+/* Given the "normal" TRAD_FNDECL marked with 'regparmandstackparm',
+ return a duplicate fndecl marked 'regparmandstackparmee' (note trailing
+ 'ee'). Enter them as a pair in the splay tree ST, if non-null;
+ looking up the TRAD_FNDECL will return the new one. */
+static tree
+ix86_darwin_regparmandstackparm_dup_fndecl (tree trad_fndecl, splay_tree st)
+{
+ tree fntype;
+ tree new_fndecl;
+
+ fntype = TREE_TYPE (trad_fndecl);
+
+ /* NEW_FNDECL will be compiled with the XMM-based calling
+ convention, and TRAD_FNDECL (the original) will be compiled with
+ the traditional stack-based calling convention. */
+ new_fndecl = copy_node (trad_fndecl);
+ DECL_STRUCT_FUNCTION (new_fndecl) = (struct function *)0;
+ allocate_struct_function (new_fndecl);
+ DECL_STRUCT_FUNCTION (new_fndecl)->function_end_locus
+ = DECL_STRUCT_FUNCTION (trad_fndecl)->function_end_locus;
+ DECL_STRUCT_FUNCTION (new_fndecl)->static_chain_decl =
+ DECL_STRUCT_FUNCTION (trad_fndecl)->static_chain_decl;
+ DECL_RESULT (new_fndecl) = copy_node (DECL_RESULT (trad_fndecl));
+ DECL_CONTEXT (DECL_RESULT (new_fndecl)) = new_fndecl;
+ SET_DECL_ASSEMBLER_NAME (new_fndecl, 0);
+ DECL_NAME (new_fndecl) = ix86_darwin_regparmandstackparm_mangle_name (DECL_NAME (trad_fndecl));
+ TYPE_ATTRIBUTES (TREE_TYPE (new_fndecl))
+ = copy_list (TYPE_ATTRIBUTES (TREE_TYPE (trad_fndecl)));
+ ix86_make_regparmandstackparmee (&TREE_TYPE (new_fndecl));
+ /* Kludge: block copied from tree-inline.c(save_body). Should
+ be refactored into a common shareable routine. */
+ {
+ tree *parg;
+
+ for (parg = &DECL_ARGUMENTS (new_fndecl);
+ *parg;
+ parg = &TREE_CHAIN (*parg))
+ {
+ tree new = copy_node (*parg);
+
+ lang_hooks.dup_lang_specific_decl (new);
+ DECL_ABSTRACT_ORIGIN (new) = DECL_ORIGIN (*parg);
+ DECL_CONTEXT (new) = new_fndecl;
+ /* Note: it may be possible to move the original parameters
+ with the function body, making this splay tree
+ unnecessary. */
+ if (st)
+ splay_tree_insert (st, (splay_tree_key) *parg, (splay_tree_value) new);
+ TREE_CHAIN (new) = TREE_CHAIN (*parg);
+ *parg = new;
+ }
+
+ if (DECL_STRUCT_FUNCTION (trad_fndecl)->static_chain_decl)
+ {
+ tree old = DECL_STRUCT_FUNCTION (trad_fndecl)->static_chain_decl;
+ tree new = copy_node (old);
+
+ lang_hooks.dup_lang_specific_decl (new);
+ DECL_ABSTRACT_ORIGIN (new) = DECL_ORIGIN (old);
+ DECL_CONTEXT (new) = new_fndecl;
+ if (st)
+ splay_tree_insert (st, (splay_tree_key) old, (splay_tree_value) new);
+ TREE_CHAIN (new) = TREE_CHAIN (old);
+ DECL_STRUCT_FUNCTION (new_fndecl)->static_chain_decl = new;
+ }
+
+ if (st)
+ splay_tree_insert (st, (splay_tree_key) DECL_RESULT (trad_fndecl),
+ (splay_tree_value) DECL_RESULT (new_fndecl));
+ }
+#if 0
+ /* Testing Kludge: If TREE_READONLY is set, cgen can and
+ occasionally will delete "pure" (no side-effect) calls to a
+ library function. Cleared here to preclude this when
+ test-building libraries. */
+ TREE_READONLY (new_fndecl) = false;
+#endif
+
+ return new_fndecl;
+}
+
+/* FNDECL has no body, but user has marked it as a regparmandstackparm
+ item. Create a corresponding regparmandstackparm decl for it, and
+ arrange for calls to be redirected to the regparmandstackparm
+ version. */
+static tree
+ix86_darwin_regparmandstackparm_extern_decl (tree trad_fndecl)
+{
+ tree new_fndecl;
+
+ /* new_fndecl = ix86_darwin_regparmandstackparm_dup_fndecl (trad_fndecl, (splay_tree)0); */
+ new_fndecl = copy_node (trad_fndecl);
+ DECL_NAME (new_fndecl) = ix86_darwin_regparmandstackparm_mangle_name (DECL_NAME (trad_fndecl));
+ DECL_STRUCT_FUNCTION (new_fndecl) = (struct function *)0;
+ SET_DECL_ASSEMBLER_NAME (new_fndecl, 0);
+ ix86_make_regparmandstackparmee (&TREE_TYPE (new_fndecl));
+ cgraph_finalize_function (new_fndecl, /* nested = */ true);
+ if (!ix86_darwin_regparmandstackparm_st)
+ ix86_darwin_regparmandstackparm_st
+ = splay_tree_new (splay_tree_compare_pointers, NULL, NULL);
+ splay_tree_insert (ix86_darwin_regparmandstackparm_st,
+ (splay_tree_key) trad_fndecl, (splay_tree_value) new_fndecl);
+ return new_fndecl;
+}
+
+/* Invoked after all functions have been seen and digested, but before
+ any inlining decisions have been made. Walk the callgraph, seeking
+ calls to functions that have regparmandstackparm variants. Rewrite the
+ calls, directing them to the new 'regparmandstackparmee' versions. */
+void
+ix86_darwin_redirect_calls(void)
+{
+ struct cgraph_node *fastcall_node, *node;
+ struct cgraph_edge *edge, *next_edge;
+ tree addr, fastcall_decl, orig_fntype;
+ splay_tree_node call_stn, type_stn;
+
+ if (!flag_unit_at_a_time)
+ return;
+
+ if (!ix86_darwin_fntype_st)
+ ix86_darwin_fntype_st = splay_tree_new (splay_tree_compare_pointers, NULL, NULL);
+
+ if (!ix86_darwin_regparmandstackparm_st)
+ ix86_darwin_regparmandstackparm_st
+ = splay_tree_new (splay_tree_compare_pointers, NULL, NULL);
+
+ /* Extern decls marked "regparmandstackparm" beget regparmandstackparmee
+ decls. */
+ for (node = cgraph_nodes; node; node = node->next)
+ if (!DECL_SAVED_TREE (node->decl)
+ && lookup_attribute ("regparmandstackparm",
+ TYPE_ATTRIBUTES (TREE_TYPE (node->decl)))
+ && !lookup_attribute ("regparmandstackparmee",
+ TYPE_ATTRIBUTES (TREE_TYPE (node->decl))))
+ {
+ fastcall_decl = ix86_darwin_regparmandstackparm_extern_decl (node->decl);
+ splay_tree_insert (ix86_darwin_regparmandstackparm_st,
+ (splay_tree_key) node->decl,
+ (splay_tree_value) fastcall_decl);
+ }
+
+ /* Walk the callgraph, rewriting calls as we go. */
+ for (node = cgraph_nodes; node; node = node->next)
+ {
+ call_stn = splay_tree_lookup (ix86_darwin_regparmandstackparm_st,
+ (splay_tree_key)node->decl);
+ /* If this function was in our splay-tree, we previously created
+ a regparmandstackparm version of it. */
+ if (call_stn)
+ {
+ fastcall_decl = (tree)call_stn->value;
+ fastcall_node = cgraph_node (fastcall_decl);
+ /* Redirect all calls to this fn to the regparmandstackparm
+ version. */
+ for (edge = next_edge = node->callers ; edge ; edge = next_edge)
+ {
+ tree call, stmt;
+ next_edge = next_edge->next_caller;
+ cgraph_redirect_edge_callee (edge, fastcall_node);
+ /* APPLE LOCAL */
+ /* MERGE FIXME call_expr -> call_stmt */
+ stmt = edge->call_stmt;
+ call = get_call_expr_in (stmt);
+ addr = TREE_OPERAND (call, 0);
+ TREE_OPERAND (addr, 0) = fastcall_decl;
+ orig_fntype = TREE_TYPE (addr);
+ /* Likewise, revise the TYPE of the ADDR node between
+ the CALL_EXPR and the FNDECL. This type determines
+ the parameters and calling convention applied to this
+ CALL_EXPR. */
+ type_stn = splay_tree_lookup (ix86_darwin_fntype_st, (splay_tree_value)orig_fntype);
+ if (type_stn)
+ TREE_TYPE (addr) = (tree)type_stn->value;
+ else
+ {
+ ix86_make_regparmandstackparmee (&TREE_TYPE (addr));
+ splay_tree_insert (ix86_darwin_fntype_st,
+ (splay_tree_key)orig_fntype,
+ (splay_tree_value)TREE_TYPE (addr));
+ }
+ }
+ }
+ }
+}
+
+/* Information necessary to re-context a function body. */
+typedef struct {
+ tree old_context;
+ tree new_context;
+ splay_tree decl_map;
+} recontext_data;
+
+/* Visit every node of a function body; if it points at the
+ OLD_CONTEXT, re-direct it to the NEW_CONTEXT. Invoked via
+ walk_tree. DECL_MAP is a splay tree that maps the original
+ parameters to new ones. */
+static tree
+ix86_darwin_re_context_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED)
+{
+ tree t;
+ recontext_data *rcd;
+ enum tree_code_class class;
+ splay_tree_node n;
+
+ if (!tp)
+ return NULL_TREE;
+
+ t = *tp;
+ if (!t)
+ return NULL_TREE;
+
+ rcd = (recontext_data *)data;
+ n = splay_tree_lookup (rcd->decl_map, (splay_tree_key) t);
+ if (n)
+ {
+ *tp = (tree)n->value;
+ return NULL_TREE;
+ }
+
+ class = TREE_CODE_CLASS (TREE_CODE (t));
+ if (class != tcc_declaration)
+ return NULL_TREE;
+
+ if (DECL_CONTEXT (t) == rcd->old_context)
+ DECL_CONTEXT (t) = rcd->new_context;
+
+ return NULL_TREE;
+}
+
+/* Walk a function body, updating every pointer to OLD_CONTEXT to
+ NEW_CONTEXT. TP is the top of the function body, and ST is a splay
+ tree of replacements for the parameters. */
+static tree
+ix86_darwin_re_context (tree *tp, tree old_context, tree new_context, splay_tree st)
+{
+ recontext_data rcd;
+ tree ret;
+
+ rcd.old_context = old_context;
+ rcd.new_context = new_context;
+ rcd.decl_map = st;
+
+ ret = walk_tree (tp, ix86_darwin_re_context_1,
+ (void *)&rcd, (struct pointer_set_t *)0);
+ return ret;
+}
+
+/* Given TRAD_FNDECL, create a regparmandstackparm variant and hang the
+ DECL_SAVED_TREE body there. Create a new, one-statement body for
+ TRAD_FNDECL that calls the new one. If the return types are
+ compatible (e.g. non-FP), the call can usually be sibcalled. The
+ inliner will often copy the body from NEW_FNDECL into TRAD_FNDECL,
+ and we do nothing to prevent this. */
+static void
+ix86_darwin_regparmandstackparm_wrapper (tree trad_fndecl)
+{
+ tree new_fndecl;
+ splay_tree st;
+ tree bind, block, call, clone_parm, modify, parmlist, rdecl, rtn, stmt_list, type;
+ tree_stmt_iterator tsi;
+
+ st = splay_tree_new (splay_tree_compare_pointers, NULL, NULL);
+ new_fndecl = ix86_darwin_regparmandstackparm_dup_fndecl (trad_fndecl, st);
+
+ for (parmlist = NULL, clone_parm = DECL_ARGUMENTS (trad_fndecl);
+ clone_parm;
+ clone_parm = TREE_CHAIN (clone_parm))
+ {
+ gcc_assert (clone_parm);
+ DECL_ABSTRACT_ORIGIN (clone_parm) = NULL;
+ parmlist = tree_cons (NULL, clone_parm, parmlist);
+ }
+
+ /* We built this list backwards; fix now. */
+ parmlist = nreverse (parmlist);
+ type = TREE_TYPE (TREE_TYPE (trad_fndecl));
+ call = build_function_call (new_fndecl, parmlist);
+ TREE_TYPE (call) = type;
+ if (type == void_type_node)
+ rtn = call;
+ else if (0 && ix86_return_in_memory (type))
+ {
+ /* Return without a RESULT_DECL: RETURN_EXPR (CALL). */
+ rtn = make_node (RETURN_EXPR);
+ TREE_OPERAND (rtn, 0) = call;
+ TREE_TYPE (rtn) = type;
+ }
+ else /* RETURN_EXPR(MODIFY(RESULT_DECL, CALL)). */
+ {
+ rdecl = make_node (RESULT_DECL);
+ TREE_TYPE (rdecl) = type;
+ DECL_MODE (rdecl) = TYPE_MODE (type);
+ DECL_RESULT (trad_fndecl) = rdecl;
+ DECL_CONTEXT (rdecl) = trad_fndecl;
+ modify = build_modify_expr (rdecl, NOP_EXPR, call);
+ TREE_TYPE (modify) = type;
+ rtn = make_node (RETURN_EXPR);
+ TREE_OPERAND (rtn, 0) = modify;
+ TREE_TYPE (rtn) = type;
+ }
+ stmt_list = alloc_stmt_list ();
+ tsi = tsi_start (stmt_list);
+ tsi_link_after (&tsi, rtn, TSI_NEW_STMT);
+
+ /* This wrapper consists of "return <my_name>$3SSE (<my_arguments>);"
+ thus it has no local variables. */
+ block = make_node (BLOCK);
+ TREE_USED (block) = true;
+ bind = make_node (BIND_EXPR);
+ BIND_EXPR_BLOCK (bind) = block;
+ BIND_EXPR_BODY (bind) = stmt_list;
+ TREE_TYPE (bind) = void_type_node;
+ TREE_SIDE_EFFECTS (bind) = true;
+
+ DECL_SAVED_TREE (trad_fndecl) = bind;
+
+ /* DECL_ABSTRACT_ORIGIN (new_fndecl) = NULL; *//* ? */
+
+ ix86_darwin_re_context (&new_fndecl, trad_fndecl, new_fndecl, st);
+ ix86_darwin_re_context (&DECL_SAVED_TREE (new_fndecl), trad_fndecl, new_fndecl, st);
+ splay_tree_delete (st);
+ gimplify_function_tree (new_fndecl);
+ cgraph_finalize_function (new_fndecl, /* nested = */ true);
+ gimplify_function_tree (trad_fndecl);
+ if (!ix86_darwin_regparmandstackparm_st)
+ ix86_darwin_regparmandstackparm_st
+ = splay_tree_new (splay_tree_compare_pointers, NULL, NULL);
+ splay_tree_insert (ix86_darwin_regparmandstackparm_st,
+ (splay_tree_key) trad_fndecl, (splay_tree_value) new_fndecl);
+}
+
+/* Entry point into the regparmandstackparm stuff. FNDECL might be marked
+ 'regparmandstackparm'; if it is, create the fast version, &etc. */
+void
+ix86_darwin_handle_regparmandstackparm (tree fndecl)
+{
+ static unsigned int already_running = 0;
+
+ /* We don't support variable-argument functions yet. */
+ if (!fndecl || already_running)
+ return;
+
+ already_running++;
+
+ if (lookup_attribute ("regparmandstackparm", TYPE_ATTRIBUTES (TREE_TYPE (fndecl)))
+ && !lookup_attribute ("regparmandstackparmee", TYPE_ATTRIBUTES (TREE_TYPE (fndecl))))
+ {
+ if (DECL_STRUCT_FUNCTION (fndecl) && DECL_STRUCT_FUNCTION (fndecl)->stdarg)
+ error ("regparmandstackparm is incompatible with varargs");
+ else if (DECL_SAVED_TREE (fndecl))
+ ix86_darwin_regparmandstackparm_wrapper (fndecl);
+ }
+
+ already_running--;
+}
+/* APPLE LOCAL end regparmandstackparm */
+
+/* APPLE LOCAL begin CW asm blocks */
+#include <ctype.h>
+#include "config/asm.h"
+
+/* Addition register names accepted for inline assembly that would
+ otherwise not be registers. This table must be sorted for
+ bsearch. */
+static const char *iasm_additional_names[] = {
+ "AH", "AL", "AX", "BH", "BL", "BP", "BX", "CH", "CL", "CX", "DH",
+ "DI", "DL", "DX", "EAX", "EBP", "EBX", "ECX", "EDI", "EDX", "ESI",
+ "ESP", "MM0", "MM1", "MM2", "MM3", "MM4", "MM5", "MM6", "MM7", "R10",
+ "R11", "R12", "R13", "R14", "R15", "R8", "R9", "RAX", "RBP", "RBX",
+ "RCX", "RDI", "RDX", "RSI", "RSP", "SI", "SP", "ST", "ST(1)", "ST(2)",
+ "ST(3)", "ST(4)", "ST(5)", "ST(6)", "ST(7)", "XMM0", "XMM1", "XMM10",
+ "XMM11", "XMM12", "XMM13", "XMM14", "XMM15", "XMM2", "XMM3", "XMM4",
+ "XMM5", "XMM6", "XMM7", "XMM8", "XMM9" };
+
+/* Comparison function for bsearch to find additional register names. */
+static int
+iasm_reg_comp (const void *a, const void *b)
+{
+ char *const*x = a;
+ char *const*y = b;
+ int c = strcasecmp (*x, *y);
+ return c;
+}
+
+/* Translate some register names seen in CW asm into GCC standard
+ forms. */
+
+const char *
+i386_iasm_register_name (const char *regname, char *buf)
+{
+ const char **r;
+
+ /* If we can find the named register, return it. */
+ if (decode_reg_name (regname) >= 0)
+ {
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ return regname;
+ sprintf (buf, "%%%s", regname);
+ return buf;
+ }
+
+ /* If we can find a lower case version of any registers in
+ additional_names, return it. */
+ r = bsearch (&regname, iasm_additional_names,
+ sizeof (iasm_additional_names) / sizeof (iasm_additional_names[0]),
+ sizeof (iasm_additional_names[0]), iasm_reg_comp);
+ if (r)
+ {
+ char *p;
+ const char *q;
+ q = regname = *r;
+ p = buf;
+ if (ASSEMBLER_DIALECT != ASM_INTEL)
+ *p++ = '%';
+ regname = p;
+ while ((*p++ = tolower (*q++)))
+ ;
+ if (decode_reg_name (regname) >= 0)
+ return buf;
+ }
+
+ return NULL;
+}
+
+/* Return true iff the opcode wants memory to be stable. We arrange
+ for a memory clobber in these instances. */
+bool
+iasm_memory_clobber (const char *ARG_UNUSED (opcode))
+{
+ return true;
+}
+
+/* Return true iff the operands need swapping. */
+
+bool
+iasm_x86_needs_swapping (const char *opcode)
+{
+ /* Don't swap if output format is the same as input format. */
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ return false;
+
+ /* These don't need swapping. */
+ if (strcasecmp (opcode, "bound") == 0)
+ return false;
+ if (strcasecmp (opcode, "invlpga") == 0)
+ return false;
+ if (opcode[0] == ' ' && iasm_is_pseudo (opcode+1))
+ return false;
+
+ return true;
+}
+
+/* Swap operands, given in MS-style asm ordering when the output style
+ is in ATT syntax. */
+
+static tree
+iasm_x86_swap_operands (const char *opcode, tree args)
+{
+ int noperands;
+
+ if (iasm_x86_needs_swapping (opcode) == false)
+ return args;
+
+#if 0
+ /* GAS also checks the type of the arguments to determine if they
+ need swapping. */
+ if ((argtype[0]&Imm) && (argtype[1]&Imm))
+ return args;
+#endif
+ noperands = list_length (args);
+ if (noperands == 2 || noperands == 3)
+ {
+ /* Swap first and last (1 and 2 or 1 and 3). */
+ return nreverse (args);
+ }
+ return args;
+}
+
+/* Map a register name to a high level tree type for a VAR_DECL of
+ that type, whose RTL will refer to the given register. */
+
+static tree
+iasm_type_for (tree arg)
+{
+ tree type = NULL_TREE;
+
+ if (IDENTIFIER_LENGTH (arg) > 2
+ && IDENTIFIER_POINTER (arg)[0] == '%')
+ {
+ enum machine_mode mode = VOIDmode;
+ if (IDENTIFIER_POINTER (arg)[1] == 'e')
+ mode = SImode;
+ else if (/* IDENTIFIER_POINTER (arg)[2] == 'h'
+ || */ IDENTIFIER_POINTER (arg)[2] == 'l')
+ mode = QImode;
+ else if (IDENTIFIER_POINTER (arg)[2] == 'x')
+ mode = HImode;
+ else if (IDENTIFIER_POINTER (arg)[1] == 'r')
+ mode = DImode;
+ else if (IDENTIFIER_POINTER (arg)[1] == 'x')
+ mode = SFmode;
+ else if (IDENTIFIER_POINTER (arg)[1] == 'm')
+ mode = SFmode;
+
+ if (mode != VOIDmode)
+ type = lang_hooks.types.type_for_mode (mode, 1);
+ }
+
+ return type;
+}
+
+/* We raise the code from a named register into a VAR_DECL of an
+ appropriate type that refers to the register so that reload doesn't
+ run out of registers. */
+
+tree
+iasm_raise_reg (tree arg)
+{
+ int regno = decode_reg_name (IDENTIFIER_POINTER (arg));
+ if (regno >= 0)
+ {
+ tree decl = NULL_TREE;
+
+ decl = lookup_name (arg);
+ if (decl == error_mark_node)
+ decl = 0;
+ if (decl == 0)
+ {
+ tree type = iasm_type_for (arg);
+ if (type)
+ {
+ decl = build_decl (VAR_DECL, arg, type);
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_REGISTER (decl) = 1;
+ C_DECL_REGISTER (decl) = 1;
+ DECL_HARD_REGISTER (decl) = 1;
+ set_user_assembler_name (decl, IDENTIFIER_POINTER (arg));
+ decl = lang_hooks.decls.pushdecl (decl);
+ }
+ }
+
+ if (decl)
+ return decl;
+ }
+
+ return arg;
+}
+
+/* Allow constants and readonly variables to be used in instructions
+ in places that require constants. */
+
+static tree
+iasm_default_conv (tree e)
+{
+ if (e == NULL_TREE)
+ return e;
+
+ if (TREE_CODE (e) == CONST_DECL)
+ e = DECL_INITIAL (e);
+
+ if (DECL_P (e) && DECL_MODE (e) != BLKmode)
+ e = decl_constant_value (e);
+ return e;
+}
+
+/* Return true iff the operand is suitible for as the offset for a
+ memory instruction. */
+
+static bool
+iasm_is_offset (tree v)
+{
+ if (TREE_CODE (v) == INTEGER_CST)
+ return true;
+ if (TREE_CODE (v) == ADDR_EXPR)
+ {
+ v = TREE_OPERAND (v, 0);
+ if (TREE_CODE (v) == VAR_DECL
+ && TREE_STATIC (v)
+ && MEM_P (DECL_RTL (v)))
+ {
+ note_alternative_entry_points ();
+ return true;
+ }
+ if (TREE_CODE (v) == LABEL_DECL)
+ return true;
+ return false;
+ }
+ if (TREE_CODE (v) == VAR_DECL
+ && TREE_STATIC (v)
+ && MEM_P (DECL_RTL (v)))
+ {
+ note_alternative_entry_points ();
+ return true;
+ }
+ if ((TREE_CODE (v) == MINUS_EXPR
+ || TREE_CODE (v) == PLUS_EXPR)
+ && iasm_is_offset (TREE_OPERAND (v, 0))
+ && iasm_is_offset (TREE_OPERAND (v, 1)))
+ return true;
+ if (TREE_CODE (v) == NEGATE_EXPR
+ && iasm_is_offset (TREE_OPERAND (v, 0)))
+ return true;
+
+ return false;
+}
+
+/* Combine two types for [] expressions. */
+
+static tree
+iasm_combine_type (tree type0, tree type1)
+{
+ if (type0 == void_type_node
+ || type0 == NULL_TREE)
+ {
+ if (type1 == void_type_node)
+ return NULL_TREE;
+ return type1;
+ }
+
+ if (type1 == void_type_node
+ || type1 == NULL_TREE)
+ return type0;
+
+ if (type0 == type1)
+ return type0;
+
+ error ("too many types in []");
+
+ return type0;
+}
+
+/* We canonicalize the inputs form of bracket expressions as the input
+ forms are less constrained than what the assembler will accept.
+
+ TOP is the top of the canonical tree we're generating and
+ TREE_OPERAND (, 0) is the offset portion of the expression. ARGP
+ points to the current part of the tree we're walking.
+
+ The tranformations we do:
+
+ (A+O) ==> A
+ (A-O) ==> A
+ (O+A) ==> A
+
+ where O are offset expressions. */
+
+static tree
+iasm_canonicalize_bracket_1 (tree* argp, tree top)
+{
+ tree arg = *argp;
+ tree offset = TREE_OPERAND (top, 0);
+ tree arg0, arg1;
+ tree rtype = NULL_TREE;
+
+ *argp = arg = iasm_default_conv (arg);
+
+ switch (TREE_CODE (arg))
+ {
+ case NOP_EXPR:
+ if (TREE_CODE (TREE_TYPE (arg)) == IDENTIFIER_NODE)
+ {
+ *argp = TREE_OPERAND (arg, 0);
+ return TREE_TYPE (arg);
+ }
+ break;
+
+ case BRACKET_EXPR:
+ rtype = TREE_TYPE (arg);
+ /* fall thru */
+ case PLUS_EXPR:
+ arg0 = TREE_OPERAND (arg, 0);
+ arg1 = TREE_OPERAND (arg, 1);
+
+ arg0 = iasm_default_conv (arg0);
+ arg1 = iasm_default_conv (arg1);
+
+ if (iasm_is_offset (arg0))
+ {
+ if (offset != integer_zero_node)
+ arg0 = build2 (PLUS_EXPR, void_type_node, arg0, offset);
+ TREE_OPERAND (top, 0) = arg0;
+
+ *argp = arg1;
+ if (arg1)
+ return iasm_combine_type (rtype, iasm_canonicalize_bracket_1 (argp, top));
+ }
+ else if (arg1 && iasm_is_offset (arg1))
+ {
+ if (offset != integer_zero_node)
+ arg1 = build2 (PLUS_EXPR, void_type_node, arg1, offset);
+ TREE_OPERAND (top, 0) = arg1;
+ *argp = arg0;
+ return iasm_combine_type (rtype, iasm_canonicalize_bracket_1 (argp, top));
+ }
+ else
+ {
+ rtype = iasm_combine_type (rtype,
+ iasm_canonicalize_bracket_1 (&TREE_OPERAND (arg, 0), top));
+
+ if (arg1)
+ rtype = iasm_combine_type (rtype,
+ iasm_canonicalize_bracket_1 (&TREE_OPERAND (arg, 1), top));
+ if (TREE_OPERAND (arg, 0) == NULL_TREE)
+ {
+ if (TREE_OPERAND (arg, 1))
+ {
+ TREE_OPERAND (arg, 0) = TREE_OPERAND (arg, 1);
+ TREE_OPERAND (arg, 1) = NULL_TREE;
+ }
+ else
+ *argp = NULL_TREE;
+ }
+ else if (TREE_OPERAND (arg, 1) == NULL_TREE && rtype == NULL_TREE)
+ *argp = TREE_OPERAND (arg, 0);
+ if (TREE_CODE (arg) == PLUS_EXPR
+ && TREE_TYPE (arg) == NULL_TREE
+ && TREE_TYPE (TREE_OPERAND (arg, 0))
+ && TREE_TYPE (TREE_OPERAND (arg, 1))
+ && (POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 1)))
+ || POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))))
+ {
+ tree type = TREE_TYPE (TREE_OPERAND (arg, 1));
+ if (INTEGRAL_TYPE_P (type))
+ type = TREE_TYPE (TREE_OPERAND (arg, 0));
+ TREE_TYPE (arg) = type;
+ }
+ if (TREE_CODE (arg) == PLUS_EXPR
+ && TREE_TYPE (arg) == NULL_TREE
+ && TREE_TYPE (TREE_OPERAND (arg, 0))
+ && TREE_TYPE (TREE_OPERAND (arg, 0)) == TREE_TYPE (TREE_OPERAND (arg, 1)))
+ {
+ tree type = TREE_TYPE (TREE_OPERAND (arg, 0));
+ TREE_TYPE (arg) = type;
+ }
+ }
+ return rtype;
+
+ case MINUS_EXPR:
+ rtype = iasm_canonicalize_bracket_1 (&TREE_OPERAND (arg, 0), top);
+ arg0 = TREE_OPERAND (arg, 0);
+ arg1 = TREE_OPERAND (arg, 1);
+ arg1 = iasm_default_conv (arg1);
+ if (iasm_is_offset (arg1))
+ {
+ offset = TREE_OPERAND (top, 0);
+ if (offset == integer_zero_node)
+ arg1 = fold (build1 (NEGATE_EXPR,
+ TREE_TYPE (arg1),
+ arg1));
+ else
+ arg1 = build2 (MINUS_EXPR, void_type_node, offset, arg1);
+ TREE_OPERAND (top, 0) = arg1;
+ *argp = arg0;
+ return iasm_combine_type (rtype, iasm_canonicalize_bracket_1 (argp, top));;
+ }
+ return rtype;
+
+ case PARM_DECL:
+ case VAR_DECL:
+ {
+ *argp = iasm_addr (arg);
+ break;
+ }
+
+ case IDENTIFIER_NODE:
+ {
+ *argp = iasm_raise_reg (arg);
+ break;
+ }
+
+ case MULT_EXPR:
+ if (TREE_TYPE (arg) == NULL_TREE)
+ {
+ if (TREE_CODE (TREE_OPERAND (arg, 1)) == IDENTIFIER_NODE)
+ TREE_OPERAND (arg, 1) = iasm_raise_reg (TREE_OPERAND (arg, 1));
+ if (TREE_CODE (TREE_OPERAND (arg, 0)) == IDENTIFIER_NODE)
+ TREE_OPERAND (arg, 0) = iasm_raise_reg (TREE_OPERAND (arg, 0));
+ if (TREE_TYPE (TREE_OPERAND (arg, 0))
+ && TREE_TYPE (TREE_OPERAND (arg, 1)))
+ TREE_TYPE (arg) = TREE_TYPE (TREE_OPERAND (arg, 0));
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+/* Form an indirection for an inline asm address expression operand.
+ We give a warning when we think the optimizer might have to be used
+ to reform complex addresses, &stack_var + %eax + 4 for example,
+ after gimplification rips the address apart. */
+
+static tree
+iasm_indirect (tree addr)
+{
+ if (TREE_CODE (addr) == ADDR_EXPR
+ && TREE_CODE (TREE_TYPE (TREE_OPERAND (addr, 0))) != ARRAY_TYPE
+ /* && TREE_CODE (TREE_OPERAND (addr, 0)) == ARRAY_REF */)
+ return TREE_OPERAND (addr, 0);
+
+ addr = fold (build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr));
+
+ if (! optimize && TREE_CODE (addr) == INDIRECT_REF)
+ warning (0, "addressing mode too complex when not optimizing, will consume extra register(s)");
+
+ return addr;
+}
+
+/* For an address addition for an inline asm address expression. We
+ try and form ARRAY_REFs, as they will go through gimplification
+ without being ripped apart. */
+
+static tree
+iasm_add (tree addr, tree off)
+{
+ if (integer_zerop (off))
+ return addr;
+
+ /* We have to convert the offset to an int type, as we rip apart
+ trees whose type has been converted to a pointer type for the
+ offset already. */
+ return pointer_int_sum (PLUS_EXPR, addr, convert (integer_type_node, off));
+}
+
+/* We canonicalize the inputs form of bracket expressions as the input
+ forms are less constrained than what the assembler will accept. */
+
+static tree
+iasm_canonicalize_bracket (tree arg)
+{
+ tree rtype;
+
+ gcc_assert (TREE_CODE (arg) == BRACKET_EXPR);
+
+ /* Let the normal operand printer output this without trying to
+ decompose it into parts so that things like (%esp + 20) + 4 can
+ be output as 24(%esp) by the optimizer instead of 4(%0) and
+ burning an "R" with (%esp + 20). */
+ if (TREE_OPERAND (arg, 1) == NULL_TREE
+ && TREE_TYPE (TREE_OPERAND (arg, 0))
+ && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0))))
+ {
+ if (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL
+ || TREE_CODE (TREE_OPERAND (arg, 0)) == PARM_DECL)
+ return arg;
+ return iasm_indirect (TREE_OPERAND (arg, 0));
+ }
+
+ /* Ensure that 0 is an offset */
+ if (TREE_OPERAND (arg, 0)
+ && iasm_is_offset (TREE_OPERAND (arg, 0)))
+ {
+ /* we win if 0 is an offset already. */
+ }
+ else if (TREE_OPERAND (arg, 1) == NULL_TREE)
+ {
+ /* Move 0 to 1, if 1 is empty and 0 isn't already an offset */
+ TREE_OPERAND (arg, 1) = TREE_OPERAND (arg, 0);
+ TREE_OPERAND (arg, 0) = integer_zero_node;
+ }
+ else
+ {
+ tree swp;
+ /* Just have to force it now */
+ swp = iasm_build_bracket (TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
+ TREE_OPERAND (arg, 0) = integer_zero_node;
+ TREE_OPERAND (arg, 1) = swp;
+ }
+
+ if (TREE_OPERAND (arg, 1))
+ {
+ rtype = iasm_canonicalize_bracket_1 (&TREE_OPERAND (arg, 1), arg);
+ if (rtype)
+ TREE_TYPE (arg) = iasm_combine_type (TREE_TYPE (arg), rtype);
+ }
+
+ /* For correctness, pointer types should be raised to the tree
+ level, as they denote address calculations with stack based
+ objects, and we want print_operand to print the entire address so
+ that it can combine contants and hard registers into the address.
+ Unfortunnately we might have to rely upon the optimizer to reform
+ the address after the gimplification pass rips it apart. */
+
+ /* Handle [INTEGER_CST][ptr][op3] */
+ if (TREE_OPERAND (arg, 1)
+ && TREE_CODE (TREE_OPERAND (arg, 0)) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg, 1)) == BRACKET_EXPR
+ && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (arg, 1), 0))
+ && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (arg, 1), 0)))
+ && TREE_TYPE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (arg, 1), 0))) != void_type_node
+ && (TREE_TYPE (arg) == void_type_node
+ || (TREE_TYPE (arg) == get_identifier ("word")
+ && (TYPE_MODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (arg, 1), 0))))
+ == HImode))))
+ {
+ tree op3 = TREE_OPERAND (TREE_OPERAND (arg, 1), 1);
+ tree addr = iasm_add (TREE_OPERAND (TREE_OPERAND (arg, 1), 0),
+ TREE_OPERAND (arg, 0));
+ tree type;
+ addr = iasm_indirect (addr);
+ if (op3 == NULL_TREE)
+ return addr;
+ type = TREE_TYPE (addr);
+ type = build_pointer_type (type);
+ addr = build1 (ADDR_EXPR, type, addr);
+ addr = fold (build2 (PLUS_EXPR, type, addr, op3));
+ return iasm_indirect (addr);
+ }
+
+ /* Handle ptr + INTEGER_CST */
+ if (TREE_OPERAND (arg, 1)
+ && TREE_TYPE (arg) == void_type_node
+ && TREE_TYPE (TREE_OPERAND (arg, 1))
+ && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 1)))
+ && TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg, 1))) != void_type_node)
+ {
+ if (TREE_CODE (TREE_OPERAND (arg, 1)) == ADDR_EXPR)
+ {
+ if (TREE_OPERAND (arg, 0) == integer_zero_node)
+ return TREE_OPERAND (TREE_OPERAND (arg, 1), 0);
+ if (TREE_CODE (TREE_OPERAND (arg, 0)) == INTEGER_CST)
+ return iasm_indirect (iasm_add (TREE_OPERAND (arg, 1), TREE_OPERAND (arg, 0)));
+ }
+ if (TREE_CODE (TREE_OPERAND (arg, 1)) == PLUS_EXPR)
+ {
+ if (TREE_OPERAND (arg, 0) == integer_zero_node)
+ return iasm_indirect (TREE_OPERAND (arg, 1));
+ if (TREE_CODE (TREE_OPERAND (arg, 0)) == INTEGER_CST)
+ return iasm_indirect (iasm_add (TREE_OPERAND (arg, 1), TREE_OPERAND (arg, 0)));
+ }
+ }
+ return arg;
+}
+
+/* We canonicalize the instruction by swapping operands and rewritting
+ the opcode if the output style is in ATT syntax. */
+
+tree
+iasm_x86_canonicalize_operands (const char **opcode_p, tree iargs, void *ep)
+{
+ iasm_md_extra_info *e = ep;
+ static char buf[40];
+ tree args = iargs;
+ int argnum = 1;
+ const char *opcode = *opcode_p;
+ bool fp_style = false;
+ bool fpi_style = false;
+
+ /* Don't transform if output format is the same as input format. */
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ return iargs;
+
+ if (strncasecmp (opcode, "f", 1) == 0)
+ fp_style = true;
+
+ if (fp_style
+ && strncasecmp (opcode+1, "i", 1) == 0)
+ fpi_style = true;
+
+ while (args)
+ {
+ tree arg = TREE_VALUE (args);
+
+ /* Handle st(3) */
+ if (TREE_CODE (arg) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (arg, 0)) == IDENTIFIER_NODE
+ && strcasecmp (IDENTIFIER_POINTER (TREE_OPERAND (arg, 0)), "%st") == 0
+ && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST)
+ {
+ int v = tree_low_cst (TREE_OPERAND (arg, 1), 0);
+
+ if (v < 0 || v > 7)
+ {
+ error ("unknown floating point register st(%d)", v);
+ v = 0;
+ }
+
+ /* Rewrite %st(0) to %st. */
+ if (v == 0)
+ TREE_VALUE (args) = TREE_OPERAND (arg, 0);
+ else
+ {
+ char buf[20];
+ sprintf (buf, "%%st(%d)", v);
+ TREE_VALUE (args) = get_identifier (buf);
+ }
+ }
+ else if (TREE_CODE (arg) == BRACKET_EXPR)
+ TREE_VALUE (args) = arg = iasm_canonicalize_bracket (arg);
+
+ switch (TREE_CODE (arg))
+ {
+ case ARRAY_REF:
+ case VAR_DECL:
+ case PARM_DECL:
+ case INDIRECT_REF:
+ if (TYPE_MODE (TREE_TYPE (arg)) == QImode)
+ e->mod[argnum-1] = 'b';
+ else if (TYPE_MODE (TREE_TYPE (arg)) == HImode)
+ e->mod[argnum-1] = fpi_style ? 's' : 'w';
+ else if (TYPE_MODE (TREE_TYPE (arg)) == SImode)
+ e->mod[argnum-1] = fpi_style ? 'l' : (fp_style ? 's' : 'l');
+ else if (TYPE_MODE (TREE_TYPE (arg)) == DImode)
+ e->mod[argnum-1] = 'q';
+ else if (TYPE_MODE (TREE_TYPE (arg)) == SFmode)
+ e->mod[argnum-1] = 's';
+ else if (TYPE_MODE (TREE_TYPE (arg)) == DFmode)
+ e->mod[argnum-1] = 'l';
+ else if (TYPE_MODE (TREE_TYPE (arg)) == XFmode)
+ e->mod[argnum-1] = 't';
+ break;
+ case BRACKET_EXPR:
+ /* We use the TREE_TYPE to indicate the type of operand, it
+ it set with code like: inc dword ptr [eax]. */
+ if (TREE_CODE (TREE_TYPE (arg)) == IDENTIFIER_NODE)
+ {
+ const char *s = IDENTIFIER_POINTER (TREE_TYPE (arg));
+ if (strcasecmp (s, "byte") == 0)
+ e->mod[argnum-1] = 'b';
+ else if (strcasecmp (s, "word") == 0)
+ e->mod[argnum-1] = fpi_style ? 's' : 'w';
+ else if (strcasecmp (s, "dword") == 0)
+ e->mod[argnum-1] = fpi_style ? 'l' : (fp_style ? 's' : 'l');
+ else if (strcasecmp (s, "qword") == 0)
+ e->mod[argnum-1] = 'q';
+ else if (strcasecmp (s, "real4") == 0)
+ e->mod[argnum-1] = 's';
+ else if (strcasecmp (s, "real8") == 0)
+ e->mod[argnum-1] = 'l';
+ else if (strcasecmp (s, "real10") == 0)
+ e->mod[argnum-1] = 't';
+ else if (strcasecmp (s, "tbyte") == 0)
+ e->mod[argnum-1] = 't';
+ }
+ break;
+ case LABEL_DECL:
+ e->mod[argnum-1] = 'l';
+ break;
+ case IDENTIFIER_NODE:
+ if (IDENTIFIER_LENGTH (arg) > 2
+ && IDENTIFIER_POINTER (arg)[0] == '%')
+ {
+ if (IDENTIFIER_POINTER (arg)[1] == 'e')
+ e->mod[argnum-1] = 'l';
+ else if (IDENTIFIER_POINTER (arg)[2] == 'h'
+ || IDENTIFIER_POINTER (arg)[2] == 'l')
+ e->mod[argnum-1] = 'b';
+ else if (IDENTIFIER_POINTER (arg)[2] == 'x')
+ e->mod[argnum-1] = 'w';
+ }
+ break;
+ default:
+ break;
+ }
+ args = TREE_CHAIN (args);
+ ++argnum;
+ }
+ --argnum;
+
+ args = iasm_x86_swap_operands (opcode, iargs);
+ if (opcode[0] == ' ' && iasm_is_pseudo (opcode+1))
+ e->pseudo = true;
+
+ if (strcasecmp (opcode, "movs") == 0
+ || strcasecmp (opcode, "scas") == 0
+ || strcasecmp (opcode, "stos") == 0
+ || strcasecmp (opcode, "xlat") == 0)
+ args = NULL_TREE;
+ else if (strcasecmp (opcode, "cmovpo") == 0)
+ opcode = "cmovnp";
+ else if (strcasecmp (opcode, "cmovpe") == 0)
+ opcode = "cmovp";
+ else if (strcasecmp (opcode, "outs") == 0
+ && TREE_CHAIN (args))
+ {
+ e->mod[0] = e->mod[1];
+ }
+ else if (strcasecmp (opcode, "ins") == 0
+ && TREE_CHAIN (args))
+ {
+ e->mod[1] = 0;
+ }
+ /* movsx isn't part of the AT&T syntax, they spell it movs. */
+ else if (strcasecmp (opcode, "movsx") == 0)
+ opcode = "movs";
+ else if (strcasecmp (opcode, "pushfd") == 0)
+ *opcode_p = "pushf";
+ else if (strcasecmp (opcode, "popfd") == 0)
+ *opcode_p = "popf";
+
+ /* movzx isn't part of the AT&T syntax, they spell it movz. */
+ if (strcasecmp (opcode, "movzx") == 0)
+ {
+ /* Silly extention of the day, A zero extended move that has the
+ same before and after size is accepted and it just a normal
+ move. */
+ if (argnum == 2
+ && (e->mod[0] == e->mod[1]
+ || e->mod[1] == 0))
+ opcode = "mov";
+ else
+ opcode = "movz";
+ }
+
+ if (strncasecmp (opcode, "f", 1) == 0 &&
+ (!(strcasecmp (opcode, "fldcw") == 0)))
+ {
+ if (e->mod[0] == 'w')
+ e->mod[0] = 's';
+ if (e->mod[1] == 'w')
+ e->mod[1] = 's';
+ }
+ else if (strcasecmp (opcode, "mov") == 0)
+ {
+ /* The 32-bit integer instructions can be used on floats. */
+ if (e->mod[0] == 's')
+ e->mod[0] = 'l';
+ if (e->mod[1] == 's')
+ e->mod[1] = 'l';
+ }
+
+ if (e->pseudo)
+ e->mod[0] = e->mod[1] = 0;
+ else if (strcasecmp (opcode, "clflush") == 0
+ || strcasecmp (opcode, "fbld") == 0
+ || strcasecmp (opcode, "fbstp") == 0
+ || strcasecmp (opcode, "fldt") == 0
+ || strcasecmp (opcode, "fnstcw") == 0
+ || strcasecmp (opcode, "fnstsw") == 0
+ || strcasecmp (opcode, "fstcw") == 0
+ || strcasecmp (opcode, "fstsw") == 0
+ || strcasecmp (opcode, "fxrstor") == 0
+ || strcasecmp (opcode, "fxsave") == 0
+ || strcasecmp (opcode, "invlpg") == 0
+ || strcasecmp (opcode, "jmp") == 0
+ || strcasecmp (opcode, "call") == 0
+ || strcasecmp (opcode, "ja") == 0
+ || strcasecmp (opcode, "jae") == 0
+ || strcasecmp (opcode, "jb") == 0
+ || strcasecmp (opcode, "jbe") == 0
+ || strcasecmp (opcode, "jc") == 0
+ || strcasecmp (opcode, "je") == 0
+ || strcasecmp (opcode, "jg") == 0
+ || strcasecmp (opcode, "jge") == 0
+ || strcasecmp (opcode, "jl") == 0
+ || strcasecmp (opcode, "jle") == 0
+ || strcasecmp (opcode, "jna") == 0
+ || strcasecmp (opcode, "jnae") == 0
+ || strcasecmp (opcode, "jnb") == 0
+ || strcasecmp (opcode, "jnc") == 0
+ || strcasecmp (opcode, "jne") == 0
+ || strcasecmp (opcode, "jng") == 0
+ || strcasecmp (opcode, "jnge") == 0
+ || strcasecmp (opcode, "jnl") == 0
+ || strcasecmp (opcode, "jnle") == 0
+ || strcasecmp (opcode, "jno") == 0
+ || strcasecmp (opcode, "jnp") == 0
+ || strcasecmp (opcode, "jns") == 0
+ || strcasecmp (opcode, "jnz") == 0
+ || strcasecmp (opcode, "jo") == 0
+ || strcasecmp (opcode, "jp") == 0
+ || strcasecmp (opcode, "jpe") == 0
+ || strcasecmp (opcode, "jpo") == 0
+ || strcasecmp (opcode, "js") == 0
+ || strcasecmp (opcode, "jz") == 0
+ || strcasecmp (opcode, "ldmxcsr") == 0
+ || strcasecmp (opcode, "lgdt") == 0
+ || strcasecmp (opcode, "lidt") == 0
+ || strcasecmp (opcode, "lldt") == 0
+ || strcasecmp (opcode, "lmsw") == 0
+ || strcasecmp (opcode, "ltr") == 0
+ || strcasecmp (opcode, "movapd") == 0
+ || strcasecmp (opcode, "movaps") == 0
+ || strcasecmp (opcode, "movd") == 0
+ || strcasecmp (opcode, "movhpd") == 0
+ || strcasecmp (opcode, "movhps") == 0
+ || strcasecmp (opcode, "movlpd") == 0
+ || strcasecmp (opcode, "movlps") == 0
+ || strcasecmp (opcode, "movntdq") == 0
+ || strcasecmp (opcode, "movntpd") == 0
+ || strcasecmp (opcode, "movntps") == 0
+ || strcasecmp (opcode, "movntq") == 0
+ || strcasecmp (opcode, "movq") == 0
+ || strcasecmp (opcode, "movsd") == 0
+ || strcasecmp (opcode, "movss") == 0
+ || strcasecmp (opcode, "movupd") == 0
+ || strcasecmp (opcode, "movups") == 0
+ || strcasecmp (opcode, "out") == 0
+ || strcasecmp (opcode, "prefetchnta") == 0
+ || strcasecmp (opcode, "prefetcht0") == 0
+ || strcasecmp (opcode, "prefetcht1") == 0
+ || strcasecmp (opcode, "prefetcht2") == 0
+ || strcasecmp (opcode, "seta") == 0
+ || strcasecmp (opcode, "setae") == 0
+ || strcasecmp (opcode, "setb") == 0
+ || strcasecmp (opcode, "setbe") == 0
+ || strcasecmp (opcode, "setc") == 0
+ || strcasecmp (opcode, "sete") == 0
+ || strcasecmp (opcode, "setg") == 0
+ || strcasecmp (opcode, "setge") == 0
+ || strcasecmp (opcode, "setl") == 0
+ || strcasecmp (opcode, "setle") == 0
+ || strcasecmp (opcode, "setna") == 0
+ || strcasecmp (opcode, "setnae") == 0
+ || strcasecmp (opcode, "setnb") == 0
+ || strcasecmp (opcode, "setnbe") == 0
+ || strcasecmp (opcode, "setnc") == 0
+ || strcasecmp (opcode, "setne") == 0
+ || strcasecmp (opcode, "setng") == 0
+ || strcasecmp (opcode, "setnge") == 0
+ || strcasecmp (opcode, "setnl") == 0
+ || strcasecmp (opcode, "setnle") == 0
+ || strcasecmp (opcode, "setno") == 0
+ || strcasecmp (opcode, "setnp") == 0
+ || strcasecmp (opcode, "setns") == 0
+ || strcasecmp (opcode, "setnz") == 0
+ || strcasecmp (opcode, "seto") == 0
+ || strcasecmp (opcode, "setp") == 0
+ || strcasecmp (opcode, "setpe") == 0
+ || strcasecmp (opcode, "setpo") == 0
+ || strcasecmp (opcode, "sets") == 0
+ || strcasecmp (opcode, "setz") == 0
+ || strcasecmp (opcode, "sldt") == 0
+ || strcasecmp (opcode, "smsw") == 0
+ || strcasecmp (opcode, "stmxcsr") == 0
+ || strcasecmp (opcode, "str") == 0
+ || strcasecmp (opcode, "xlat") == 0)
+ e->mod[0] = 0;
+ else if (strcasecmp (opcode, "lea") == 0
+ || strcasecmp (opcode, "rcl") == 0
+ || strcasecmp (opcode, "rcr") == 0
+ || strcasecmp (opcode, "rol") == 0
+ || strcasecmp (opcode, "ror") == 0
+ || strcasecmp (opcode, "sal") == 0
+ || strcasecmp (opcode, "sar") == 0
+ || strcasecmp (opcode, "shl") == 0
+ || strcasecmp (opcode, "shr") == 0)
+ e->mod[1] = 0;
+
+ if ((argnum == 1 && e->mod[0])
+ || (argnum == 2 && e->mod[0]
+ && (e->mod[0] == e->mod[1]
+ || e->mod[1] == 0)))
+ {
+ sprintf (buf, "%s%c", opcode, e->mod[0]);
+ *opcode_p = buf;
+ }
+ else if (argnum == 2 && e->mod[0] && e->mod[1])
+ {
+ sprintf (buf, "%s%c%c", opcode, e->mod[1], e->mod[0]);
+ *opcode_p = buf;
+ }
+
+ return args;
+}
+
+/* Character used to seperate the prefix words. */
+/* See radr://4141844 for the enhancement to make this uniformly ' '. */
+#define IASM_PREFIX_SEP '/'
+
+void
+iasm_x86_print_prefix (char *buf, tree prefix_list)
+{
+ buf += strlen (buf);
+ while (prefix_list)
+ {
+ tree prefix = TREE_VALUE (prefix_list);
+ size_t len = IDENTIFIER_LENGTH (prefix);
+ memcpy (buf, IDENTIFIER_POINTER (prefix), len);
+ buf += len;
+ buf[0] = IASM_PREFIX_SEP;
+ ++buf;
+ buf[0] = 0;
+ prefix_list = TREE_CHAIN (prefix_list);
+ }
+}
+
+/* Warn when a variables address is used to form a memory address when
+ that address will use an extra register during reload. */
+
+static void
+iasm_warn_extra_reg (tree arg)
+{
+ if (TREE_CODE (arg) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL
+ || TREE_CODE (TREE_OPERAND (arg, 0)) == PARM_DECL))
+ warning (0, "addressing mode too complex, will consume an extra register");
+}
+
+bool
+iasm_print_op (char *buf, tree arg, unsigned argnum, tree *uses,
+ bool must_be_reg, bool must_not_be_reg, void *ep)
+{
+ iasm_md_extra_info *e = ep;
+ switch (TREE_CODE (arg))
+ {
+ case BRACKET_EXPR:
+ {
+ tree op1 = TREE_OPERAND (arg, 0);
+ tree op2 = TREE_OPERAND (arg, 1);
+ tree op0 = NULL_TREE, op3 = NULL_TREE;
+ tree scale = NULL_TREE;
+
+ if (op2 == NULL_TREE
+ && TREE_TYPE (op1)
+ && POINTER_TYPE_P (TREE_TYPE (op1)))
+ {
+ /* Let the normal operand printer output this without trying to
+ decompose it into parts so that things like (%esp + 20) + 4
+ can be output as 24(%esp) by the optimizer instead of 4(%0)
+ and burning an "R" with (%esp + 20). */
+ iasm_force_constraint ("m", e);
+ iasm_get_register_var (op1, "", buf, argnum, must_be_reg, e);
+ iasm_force_constraint (0, e);
+ break;
+ }
+
+ if (op2
+ && TREE_CODE (op2) == BRACKET_EXPR)
+ {
+ op3 = TREE_OPERAND (op2, 1);
+ op2 = TREE_OPERAND (op2, 0);
+ if (TREE_CODE (op2) == BRACKET_EXPR)
+ {
+ op0 = TREE_OPERAND (op2, 1);
+ op2 = TREE_OPERAND (op2, 0);
+ }
+ }
+ if (op0)
+ return false;
+
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ strcat (buf, "[");
+
+ if (op3 == NULL_TREE
+ && op2 && TREE_CODE (op2) == PLUS_EXPR)
+ {
+ op3 = TREE_OPERAND (op2, 0);
+ op2 = TREE_OPERAND (op2, 1);
+ }
+ if (op2 && TREE_CODE (op2) == MULT_EXPR)
+ {
+ tree t;
+ t = op3;
+ op3 = op2;
+ op2 = t;
+ }
+
+ /* Crack out the scaling, if any. */
+ if (ASSEMBLER_DIALECT == ASM_ATT
+ && op3
+ && TREE_CODE (op3) == MULT_EXPR)
+ {
+ if (TREE_CODE (TREE_OPERAND (op3, 1)) == INTEGER_CST)
+ {
+ scale = TREE_OPERAND (op3, 1);
+ op3 = TREE_OPERAND (op3, 0);
+ }
+ else if (TREE_CODE (TREE_OPERAND (op3, 0)) == INTEGER_CST)
+ {
+ scale = TREE_OPERAND (op3, 0);
+ op3 = TREE_OPERAND (op3, 1);
+ }
+ }
+
+ /* Complicated expression as JMP or CALL target. */
+ if (e->modifier && strcmp(e->modifier, "A") == 0)
+ {
+ strcat (buf, "*");
+ e->modifier = 0;
+ }
+ e->as_immediate = true;
+ iasm_print_operand (buf, op1, argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ e->as_immediate = false;
+
+ /* Just an immediate. */
+ if (op2 == NULL_TREE && op3 == NULL_TREE)
+ break;
+
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ strcat (buf, "]");
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ strcat (buf, "[");
+ else
+ strcat (buf, "(");
+
+ if (op2)
+ {
+ /* We know by context, this has to be an R. */
+ iasm_force_constraint ("R", e);
+ iasm_warn_extra_reg (op2);
+ iasm_print_operand (buf, op2, argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ iasm_force_constraint (0, e);
+ }
+ if (op3)
+ {
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ strcat (buf, "][");
+ else
+ strcat (buf, ",");
+
+ /* We know by context, this has to be an l. */
+ iasm_force_constraint ("l", e);
+ iasm_warn_extra_reg (op3);
+ iasm_print_operand (buf, op3, argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ iasm_force_constraint (0, e);
+ if (scale)
+ {
+ strcat (buf, ",");
+ e->as_immediate = true;
+ iasm_print_operand (buf, scale, argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ e->as_immediate = false;
+ }
+ }
+ if (ASSEMBLER_DIALECT == ASM_INTEL)
+ strcat (buf, "]");
+ else
+ strcat (buf, ")");
+ }
+ break;
+
+ case ADDR_EXPR:
+ if ((TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF
+ || TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL)
+ && ! e->as_immediate)
+ {
+ iasm_get_register_var (arg, "", buf, argnum, must_be_reg, e);
+ break;
+ }
+ if (! e->as_immediate)
+ e->as_offset = true;
+ iasm_print_operand (buf, TREE_OPERAND (arg, 0), argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ e->as_offset = false;
+ break;
+
+ case MULT_EXPR:
+ iasm_print_operand (buf, TREE_OPERAND (arg, 0), argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ strcat (buf, "*");
+ iasm_print_operand (buf, TREE_OPERAND (arg, 1), argnum, uses,
+ must_be_reg, must_not_be_reg, e);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+/* APPLE LOCAL end CW asm blocks */
+
+/* Return the mangling of TYPE if it is an extended fundamental type. */
+
+static const char *
+/* APPLE LOCAL mangle_type 7105099 */
+ix86_mangle_type (tree type)
+{
+ /* APPLE LOCAL begin mangle_type 7105099 */
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
+ && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
+ return NULL;
+
+ /* APPLE LOCAL end mangle_type 7105099 */
+ switch (TYPE_MODE (type))
+ {
+ case TFmode:
+ /* __float128 is "g". */
+ return "g";
+ case XFmode:
+ /* "long double" or __float80 is "e". */
+ return "e";
+ default:
+ return NULL;
+ }
+}
+
+/* For 32-bit code we can save PIC register setup by using
+ __stack_chk_fail_local hidden function instead of calling
+ __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
+ register, so it is better to call __stack_chk_fail directly. */
+
+static tree
+ix86_stack_protect_fail (void)
+{
+ return TARGET_64BIT
+ ? default_external_stack_protect_fail ()
+ : default_hidden_stack_protect_fail ();
+}
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations.
+
+ ??? All x86 object file formats are capable of representing this.
+ After all, the relocation needed is the same as for the call insn.
+ Whether or not a particular assembler allows us to enter such, I
+ guess we'll have to see. */
+int
+asm_preferred_eh_data_format (int code, int global)
+{
+ if (flag_pic)
+ {
+ int type = DW_EH_PE_sdata8;
+ if (!TARGET_64BIT
+ || ix86_cmodel == CM_SMALL_PIC
+ || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
+ type = DW_EH_PE_sdata4;
+ return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
+ }
+ if (ix86_cmodel == CM_SMALL
+ || (ix86_cmodel == CM_MEDIUM && code))
+ return DW_EH_PE_udata4;
+ return DW_EH_PE_absptr;
+}
+
+#include "gt-i386.h"
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/i386.h b/gcc-4.2.1-5666.3/gcc/config/i386/i386.h
new file mode 100644
index 000000000..df7703b36
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/i386.h
@@ -0,0 +1,3230 @@
+/* Definitions of target machine for GCC for IA-32.
+ Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+ 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* The purpose of this file is to define the characteristics of the i386,
+ independent of assembler syntax or operating system.
+
+ Three other files build on this one to describe a specific assembler syntax:
+ bsd386.h, att386.h, and sun386.h.
+
+ The actual tm.h file for a particular system should include
+ this file, and then the file for the appropriate assembler syntax.
+
+ Many macros that specify assembler syntax are omitted entirely from
+ this file because they really belong in the files for particular
+ assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR,
+ ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many
+ that start with ASM_ or end in ASM_OP. */
+
+/* Define the specific costs for a given cpu */
+
+struct processor_costs {
+ const int add; /* cost of an add instruction */
+ const int lea; /* cost of a lea instruction */
+ const int shift_var; /* variable shift costs */
+ const int shift_const; /* constant shift costs */
+ const int mult_init[5]; /* cost of starting a multiply
+ in QImode, HImode, SImode, DImode, TImode*/
+ const int mult_bit; /* cost of multiply per each bit set */
+ const int divide[5]; /* cost of a divide/mod
+ in QImode, HImode, SImode, DImode, TImode*/
+ int movsx; /* The cost of movsx operation. */
+ int movzx; /* The cost of movzx operation. */
+ const int large_insn; /* insns larger than this cost more */
+ const int move_ratio; /* The threshold of number of scalar
+ memory-to-memory move insns. */
+ const int movzbl_load; /* cost of loading using movzbl */
+ const int int_load[3]; /* cost of loading integer registers
+ in QImode, HImode and SImode relative
+ to reg-reg move (2). */
+ const int int_store[3]; /* cost of storing integer register
+ in QImode, HImode and SImode */
+ const int fp_move; /* cost of reg,reg fld/fst */
+ const int fp_load[3]; /* cost of loading FP register
+ in SFmode, DFmode and XFmode */
+ const int fp_store[3]; /* cost of storing FP register
+ in SFmode, DFmode and XFmode */
+ const int mmx_move; /* cost of moving MMX register. */
+ const int mmx_load[2]; /* cost of loading MMX register
+ in SImode and DImode */
+ const int mmx_store[2]; /* cost of storing MMX register
+ in SImode and DImode */
+ const int sse_move; /* cost of moving SSE register. */
+ const int sse_load[3]; /* cost of loading SSE register
+ in SImode, DImode and TImode*/
+ const int sse_store[3]; /* cost of storing SSE register
+ in SImode, DImode and TImode*/
+ const int mmxsse_to_integer; /* cost of moving mmxsse register to
+ integer and vice versa. */
+ const int prefetch_block; /* bytes moved to cache for prefetch. */
+ const int simultaneous_prefetches; /* number of parallel prefetch
+ operations. */
+ const int branch_cost; /* Default value for BRANCH_COST. */
+ const int fadd; /* cost of FADD and FSUB instructions. */
+ const int fmul; /* cost of FMUL instruction. */
+ const int fdiv; /* cost of FDIV instruction. */
+ const int fabs; /* cost of FABS instruction. */
+ const int fchs; /* cost of FCHS instruction. */
+ const int fsqrt; /* cost of FSQRT instruction. */
+};
+
+extern const struct processor_costs *ix86_cost;
+
+/* Macros used in the machine description to test the flags. */
+
+/* configure can arrange to make this 2, to force a 486. */
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_DEFAULT_generic
+#endif
+
+#ifndef TARGET_FPMATH_DEFAULT
+#define TARGET_FPMATH_DEFAULT \
+ (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387)
+#endif
+
+#define TARGET_FLOAT_RETURNS_IN_80387 TARGET_FLOAT_RETURNS
+/* APPLE LOCAL begin AT&T-style stub 4164563 */
+#define MACHOPIC_NL_SYMBOL_PTR_SECTION ".section __IMPORT,__pointers,non_lazy_symbol_pointers"
+/* APPLE LOCAL end AT&T-style stub 4164563 */
+
+/* 64bit Sledgehammer mode. For libgcc2 we make sure this is a
+ compile-time constant. */
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __x86_64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#else
+#ifndef TARGET_BI_ARCH
+#undef TARGET_64BIT
+#if TARGET_64BIT_DEFAULT
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+#endif
+
+#define HAS_LONG_COND_BRANCH 1
+#define HAS_LONG_UNCOND_BRANCH 1
+
+#define TARGET_386 (ix86_tune == PROCESSOR_I386)
+#define TARGET_486 (ix86_tune == PROCESSOR_I486)
+#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
+#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
+#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
+#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
+#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
+#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
+#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
+#define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA)
+/* APPLE LOCAL mainline */
+#define TARGET_CORE2 (ix86_tune == PROCESSOR_CORE2)
+#define TARGET_GENERIC32 (ix86_tune == PROCESSOR_GENERIC32)
+#define TARGET_GENERIC64 (ix86_tune == PROCESSOR_GENERIC64)
+#define TARGET_GENERIC (TARGET_GENERIC32 || TARGET_GENERIC64)
+
+#define TUNEMASK (1 << ix86_tune)
+extern const int x86_use_leave, x86_push_memory, x86_zero_extend_with_and;
+extern const int x86_use_bit_test, x86_cmove, x86_deep_branch;
+extern const int x86_branch_hints, x86_unroll_strlen;
+extern const int x86_double_with_add, x86_partial_reg_stall, x86_movx;
+extern const int x86_use_himode_fiop, x86_use_simode_fiop;
+extern const int x86_use_mov0, x86_use_cltd, x86_read_modify_write;
+extern const int x86_read_modify, x86_split_long_moves;
+extern const int x86_promote_QImode, x86_single_stringop, x86_fast_prefix;
+extern const int x86_himode_math, x86_qimode_math, x86_promote_qi_regs;
+extern const int x86_promote_hi_regs, x86_integer_DFmode_moves;
+extern const int x86_add_esp_4, x86_add_esp_8, x86_sub_esp_4, x86_sub_esp_8;
+extern const int x86_partial_reg_dependency, x86_memory_mismatch_stall;
+extern const int x86_accumulate_outgoing_args, x86_prologue_using_move;
+extern const int x86_epilogue_using_move, x86_decompose_lea;
+extern const int x86_arch_always_fancy_math_387, x86_shift1;
+extern const int x86_sse_partial_reg_dependency, x86_sse_split_regs;
+extern const int x86_sse_typeless_stores, x86_sse_load0_by_pxor;
+extern const int x86_use_ffreep;
+extern const int x86_inter_unit_moves, x86_schedule;
+extern const int x86_use_bt;
+/* APPLE LOCAL override options */
+extern int x86_cmpxchg, x86_cmpxchg8b, x86_cmpxchg16b, x86_xadd;
+extern const int x86_use_incdec;
+extern const int x86_pad_returns;
+/* APPLE LOCAL mainline bswap/local override options */
+extern int x86_bswap;
+extern const int x86_partial_flag_reg_stall;
+extern int x86_prefetch_sse;
+
+#define TARGET_USE_LEAVE (x86_use_leave & TUNEMASK)
+#define TARGET_PUSH_MEMORY (x86_push_memory & TUNEMASK)
+#define TARGET_ZERO_EXTEND_WITH_AND (x86_zero_extend_with_and & TUNEMASK)
+#define TARGET_USE_BIT_TEST (x86_use_bit_test & TUNEMASK)
+#define TARGET_UNROLL_STRLEN (x86_unroll_strlen & TUNEMASK)
+/* For sane SSE instruction set generation we need fcomi instruction. It is
+ safe to enable all CMOVE instructions. */
+#define TARGET_CMOVE ((x86_cmove & (1 << ix86_arch)) || TARGET_SSE)
+#define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387)
+#define TARGET_DEEP_BRANCH_PREDICTION (x86_deep_branch & TUNEMASK)
+#define TARGET_BRANCH_PREDICTION_HINTS (x86_branch_hints & TUNEMASK)
+#define TARGET_DOUBLE_WITH_ADD (x86_double_with_add & TUNEMASK)
+#define TARGET_USE_SAHF ((x86_use_sahf & TUNEMASK) && !TARGET_64BIT)
+#define TARGET_MOVX (x86_movx & TUNEMASK)
+#define TARGET_PARTIAL_REG_STALL (x86_partial_reg_stall & TUNEMASK)
+#define TARGET_PARTIAL_FLAG_REG_STALL (x86_partial_flag_reg_stall & TUNEMASK)
+#define TARGET_USE_HIMODE_FIOP (x86_use_himode_fiop & TUNEMASK)
+#define TARGET_USE_SIMODE_FIOP (x86_use_simode_fiop & TUNEMASK)
+#define TARGET_USE_MOV0 (x86_use_mov0 & TUNEMASK)
+#define TARGET_USE_CLTD (x86_use_cltd & TUNEMASK)
+#define TARGET_SPLIT_LONG_MOVES (x86_split_long_moves & TUNEMASK)
+#define TARGET_READ_MODIFY_WRITE (x86_read_modify_write & TUNEMASK)
+#define TARGET_READ_MODIFY (x86_read_modify & TUNEMASK)
+#define TARGET_PROMOTE_QImode (x86_promote_QImode & TUNEMASK)
+#define TARGET_FAST_PREFIX (x86_fast_prefix & TUNEMASK)
+#define TARGET_SINGLE_STRINGOP (x86_single_stringop & TUNEMASK)
+#define TARGET_QIMODE_MATH (x86_qimode_math & TUNEMASK)
+#define TARGET_HIMODE_MATH (x86_himode_math & TUNEMASK)
+#define TARGET_PROMOTE_QI_REGS (x86_promote_qi_regs & TUNEMASK)
+#define TARGET_PROMOTE_HI_REGS (x86_promote_hi_regs & TUNEMASK)
+#define TARGET_ADD_ESP_4 (x86_add_esp_4 & TUNEMASK)
+#define TARGET_ADD_ESP_8 (x86_add_esp_8 & TUNEMASK)
+#define TARGET_SUB_ESP_4 (x86_sub_esp_4 & TUNEMASK)
+#define TARGET_SUB_ESP_8 (x86_sub_esp_8 & TUNEMASK)
+#define TARGET_INTEGER_DFMODE_MOVES (x86_integer_DFmode_moves & TUNEMASK)
+#define TARGET_PARTIAL_REG_DEPENDENCY (x86_partial_reg_dependency & TUNEMASK)
+#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \
+ (x86_sse_partial_reg_dependency & TUNEMASK)
+#define TARGET_SSE_SPLIT_REGS (x86_sse_split_regs & TUNEMASK)
+#define TARGET_SSE_TYPELESS_STORES (x86_sse_typeless_stores & TUNEMASK)
+#define TARGET_SSE_LOAD0_BY_PXOR (x86_sse_load0_by_pxor & TUNEMASK)
+#define TARGET_MEMORY_MISMATCH_STALL (x86_memory_mismatch_stall & TUNEMASK)
+#define TARGET_PROLOGUE_USING_MOVE (x86_prologue_using_move & TUNEMASK)
+#define TARGET_EPILOGUE_USING_MOVE (x86_epilogue_using_move & TUNEMASK)
+#define TARGET_PREFETCH_SSE (x86_prefetch_sse)
+#define TARGET_SHIFT1 (x86_shift1 & TUNEMASK)
+#define TARGET_USE_FFREEP (x86_use_ffreep & TUNEMASK)
+#define TARGET_REP_MOVL_OPTIMAL (x86_rep_movl_optimal & TUNEMASK)
+#define TARGET_INTER_UNIT_MOVES (x86_inter_unit_moves & TUNEMASK)
+#define TARGET_FOUR_JUMP_LIMIT (x86_four_jump_limit & TUNEMASK)
+#define TARGET_SCHEDULE (x86_schedule & TUNEMASK)
+#define TARGET_USE_BT (x86_use_bt & TUNEMASK)
+#define TARGET_USE_INCDEC (x86_use_incdec & TUNEMASK)
+#define TARGET_PAD_RETURNS (x86_pad_returns & TUNEMASK)
+
+#define ASSEMBLER_DIALECT (ix86_asm_dialect)
+
+#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0)
+#define TARGET_MIX_SSE_I387 ((ix86_fpmath & FPMATH_SSE) \
+ && (ix86_fpmath & FPMATH_387))
+/* APPLE LOCAL mainline */
+#define TARGET_SSSE3 ((target_flags & MASK_SSSE3) != 0)
+
+#define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU)
+#define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2)
+#define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS)
+#define TARGET_SUN_TLS (ix86_tls_dialect == TLS_DIALECT_SUN)
+
+#define TARGET_CMPXCHG (x86_cmpxchg & (1 << ix86_arch))
+#define TARGET_CMPXCHG8B (x86_cmpxchg8b & (1 << ix86_arch))
+#define TARGET_CMPXCHG16B (x86_cmpxchg16b & (1 << ix86_arch))
+#define TARGET_XADD (x86_xadd & (1 << ix86_arch))
+/* APPLE LOCAL mainline bswap */
+#define TARGET_BSWAP (x86_bswap & (1 << ix86_arch))
+
+#ifndef TARGET_64BIT_DEFAULT
+#define TARGET_64BIT_DEFAULT 0
+#endif
+#ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT
+#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0
+#endif
+
+/* Once GDB has been enhanced to deal with functions without frame
+ pointers, we can change this to allow for elimination of
+ the frame pointer in leaf functions. */
+#define TARGET_DEFAULT 0
+/* APPLE LOCAL begin mainline */
+/* Extra bits to force. */
+#define TARGET_SUBTARGET32_DEFAULT 0
+
+#define TARGET_SUBTARGET64_DEFAULT 0
+/* APPLE LOCAL end mainline */
+
+/* This is not really a target flag, but is done this way so that
+ it's analogous to similar code for Mach-O on PowerPC. darwin.h
+ redefines this to 1. */
+#define TARGET_MACHO 0
+/* APPLE LOCAL begin mach-o cleanup */
+#define MACHOPIC_INDIRECT 0
+#define MACHOPIC_PURE 0
+/* APPLE LOCAL end mach-o cleanup */
+
+/* Subtargets may reset this to 1 in order to enable 96-bit long double
+ with the rounding mode forced to 53 bits. */
+#define TARGET_96_ROUND_53_LONG_DOUBLE 0
+
+/* Sometimes certain combinations of command options do not make
+ sense on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Don't use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
+
+#define OVERRIDE_OPTIONS override_options ()
+
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
+ optimization_options ((LEVEL), (SIZE))
+
+/* -march=native handling only makes sense with compiler running on
+ an x86 or x86_64 chip. If changing this condition, also change
+ the condition in driver-i386.c. */
+#if defined(__i386__) || defined(__x86_64__)
+/* In driver-i386.c. */
+extern const char *host_detect_local_cpu (int argc, const char **argv);
+#define EXTRA_SPEC_FUNCTIONS \
+ { "local_cpu_detect", host_detect_local_cpu },
+#define HAVE_LOCAL_CPU_DETECT
+#endif
+
+/* Support for configure-time defaults of some command line options.
+ The order here is important so that -march doesn't squash the
+ tune or cpu values. */
+#define OPTION_DEFAULT_SPECS \
+ {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
+ {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
+ {"arch", "%{!march=*:-march=%(VALUE)}"}
+
+/* Specs for the compiler proper */
+
+#ifndef CC1_CPU_SPEC
+#define CC1_CPU_SPEC_1 "\
+%{!mtune*: \
+%{m386:mtune=i386 \
+%n`-m386' is deprecated. Use `-march=i386' or `-mtune=i386' instead.\n} \
+%{m486:-mtune=i486 \
+%n`-m486' is deprecated. Use `-march=i486' or `-mtune=i486' instead.\n} \
+%{mpentium:-mtune=pentium \
+%n`-mpentium' is deprecated. Use `-march=pentium' or `-mtune=pentium' instead.\n} \
+%{mpentiumpro:-mtune=pentiumpro \
+%n`-mpentiumpro' is deprecated. Use `-march=pentiumpro' or `-mtune=pentiumpro' instead.\n} \
+%{mcpu=*:-mtune=%* \
+%n`-mcpu=' is deprecated. Use `-mtune=' or '-march=' instead.\n}} \
+%<mcpu=* \
+%{mintel-syntax:-masm=intel \
+%n`-mintel-syntax' is deprecated. Use `-masm=intel' instead.\n} \
+%{mno-intel-syntax:-masm=att \
+%n`-mno-intel-syntax' is deprecated. Use `-masm=att' instead.\n}"
+
+#ifndef HAVE_LOCAL_CPU_DETECT
+#define CC1_CPU_SPEC CC1_CPU_SPEC_1
+#else
+#define CC1_CPU_SPEC CC1_CPU_SPEC_1 \
+"%{march=native:%<march=native %:local_cpu_detect(arch) \
+ %{!mtune=*:%<mtune=native %:local_cpu_detect(tune)}} \
+%{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
+#endif
+#endif
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ size_t arch_len = strlen (ix86_arch_string); \
+ size_t tune_len = strlen (ix86_tune_string); \
+ int last_arch_char = ix86_arch_string[arch_len - 1]; \
+ int last_tune_char = ix86_tune_string[tune_len - 1]; \
+ \
+ if (TARGET_64BIT) \
+ { \
+ builtin_assert ("cpu=x86_64"); \
+ builtin_assert ("machine=x86_64"); \
+ builtin_define ("__amd64"); \
+ builtin_define ("__amd64__"); \
+ builtin_define ("__x86_64"); \
+ builtin_define ("__x86_64__"); \
+ } \
+ else \
+ { \
+ builtin_assert ("cpu=i386"); \
+ builtin_assert ("machine=i386"); \
+ builtin_define_std ("i386"); \
+ } \
+ \
+ /* Built-ins based on -mtune= (or -march= if no \
+ -mtune= given). */ \
+ if (TARGET_386) \
+ builtin_define ("__tune_i386__"); \
+ else if (TARGET_486) \
+ builtin_define ("__tune_i486__"); \
+ else if (TARGET_PENTIUM) \
+ { \
+ builtin_define ("__tune_i586__"); \
+ builtin_define ("__tune_pentium__"); \
+ if (last_tune_char == 'x') \
+ builtin_define ("__tune_pentium_mmx__"); \
+ } \
+ else if (TARGET_PENTIUMPRO) \
+ { \
+ builtin_define ("__tune_i686__"); \
+ builtin_define ("__tune_pentiumpro__"); \
+ switch (last_tune_char) \
+ { \
+ case '3': \
+ builtin_define ("__tune_pentium3__"); \
+ /* FALLTHRU */ \
+ case '2': \
+ builtin_define ("__tune_pentium2__"); \
+ break; \
+ } \
+ } \
+ else if (TARGET_K6) \
+ { \
+ builtin_define ("__tune_k6__"); \
+ if (last_tune_char == '2') \
+ builtin_define ("__tune_k6_2__"); \
+ else if (last_tune_char == '3') \
+ builtin_define ("__tune_k6_3__"); \
+ } \
+ else if (TARGET_ATHLON) \
+ { \
+ builtin_define ("__tune_athlon__"); \
+ /* Only plain "athlon" lacks SSE. */ \
+ if (last_tune_char != 'n') \
+ builtin_define ("__tune_athlon_sse__"); \
+ } \
+ else if (TARGET_K8) \
+ builtin_define ("__tune_k8__"); \
+ else if (TARGET_PENTIUM4) \
+ builtin_define ("__tune_pentium4__"); \
+ else if (TARGET_NOCONA) \
+ builtin_define ("__tune_nocona__"); \
+ /* APPLE LOCAL begin mainline */ \
+ else if (TARGET_CORE2) \
+ builtin_define ("__tune_core2__"); \
+ /* APPLE LOCAL end mainline */ \
+ \
+ if (TARGET_MMX) \
+ builtin_define ("__MMX__"); \
+ if (TARGET_3DNOW) \
+ builtin_define ("__3dNOW__"); \
+ if (TARGET_3DNOW_A) \
+ builtin_define ("__3dNOW_A__"); \
+ if (TARGET_SSE) \
+ builtin_define ("__SSE__"); \
+ if (TARGET_SSE2) \
+ builtin_define ("__SSE2__"); \
+ if (TARGET_SSE3) \
+ builtin_define ("__SSE3__"); \
+ /* APPLE LOCAL begin mainline */ \
+ if (TARGET_SSSE3) \
+ builtin_define ("__SSSE3__"); \
+ /* APPLE LOCAL end mainline */ \
+ /* APPLE LOCAL begin 5612787 mainline sse4 */ \
+ if (TARGET_SSE4_1) \
+ builtin_define ("__SSE4_1__"); \
+ if (TARGET_SSE4_2) \
+ builtin_define ("__SSE4_2__"); \
+ if (TARGET_SSE4A) \
+ builtin_define ("__SSE4A__"); \
+ /* APPLE LOCAL end 5612787 mainline sse4 */ \
+ if (TARGET_SSE_MATH && TARGET_SSE) \
+ builtin_define ("__SSE_MATH__"); \
+ if (TARGET_SSE_MATH && TARGET_SSE2) \
+ builtin_define ("__SSE2_MATH__"); \
+ \
+ /* Built-ins based on -march=. */ \
+ if (ix86_arch == PROCESSOR_I486) \
+ { \
+ builtin_define ("__i486"); \
+ builtin_define ("__i486__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_PENTIUM) \
+ { \
+ builtin_define ("__i586"); \
+ builtin_define ("__i586__"); \
+ builtin_define ("__pentium"); \
+ builtin_define ("__pentium__"); \
+ if (last_arch_char == 'x') \
+ builtin_define ("__pentium_mmx__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_PENTIUMPRO) \
+ { \
+ builtin_define ("__i686"); \
+ builtin_define ("__i686__"); \
+ builtin_define ("__pentiumpro"); \
+ builtin_define ("__pentiumpro__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_K6) \
+ { \
+ \
+ builtin_define ("__k6"); \
+ builtin_define ("__k6__"); \
+ if (last_arch_char == '2') \
+ builtin_define ("__k6_2__"); \
+ else if (last_arch_char == '3') \
+ builtin_define ("__k6_3__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_ATHLON) \
+ { \
+ builtin_define ("__athlon"); \
+ builtin_define ("__athlon__"); \
+ /* Only plain "athlon" lacks SSE. */ \
+ if (last_arch_char != 'n') \
+ builtin_define ("__athlon_sse__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_K8) \
+ { \
+ builtin_define ("__k8"); \
+ builtin_define ("__k8__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_PENTIUM4) \
+ { \
+ builtin_define ("__pentium4"); \
+ builtin_define ("__pentium4__"); \
+ } \
+ else if (ix86_arch == PROCESSOR_NOCONA) \
+ { \
+ builtin_define ("__nocona"); \
+ builtin_define ("__nocona__"); \
+ } \
+ /* APPLE LOCAL begin mainline */ \
+ else if (ix86_arch == PROCESSOR_CORE2) \
+ { \
+ builtin_define ("__core2"); \
+ builtin_define ("__core2__"); \
+ } \
+ /* APPLE LOCAL end mainline */ \
+ } \
+ while (0)
+
+#define TARGET_CPU_DEFAULT_i386 0
+#define TARGET_CPU_DEFAULT_i486 1
+#define TARGET_CPU_DEFAULT_pentium 2
+#define TARGET_CPU_DEFAULT_pentium_mmx 3
+#define TARGET_CPU_DEFAULT_pentiumpro 4
+#define TARGET_CPU_DEFAULT_pentium2 5
+#define TARGET_CPU_DEFAULT_pentium3 6
+#define TARGET_CPU_DEFAULT_pentium4 7
+#define TARGET_CPU_DEFAULT_k6 8
+#define TARGET_CPU_DEFAULT_k6_2 9
+#define TARGET_CPU_DEFAULT_k6_3 10
+#define TARGET_CPU_DEFAULT_athlon 11
+#define TARGET_CPU_DEFAULT_athlon_sse 12
+#define TARGET_CPU_DEFAULT_k8 13
+#define TARGET_CPU_DEFAULT_pentium_m 14
+#define TARGET_CPU_DEFAULT_prescott 15
+#define TARGET_CPU_DEFAULT_nocona 16
+#define TARGET_CPU_DEFAULT_generic 17
+/* APPLE LOCAL mainline */
+#define TARGET_CPU_DEFAULT_core2 18
+/* APPLE LOCAL begin mainline */
+#define TARGET_CPU_DEFAULT_NAMES {"i386", "i486", "pentium", "pentium-mmx",\
+ "pentiumpro", "pentium2", "pentium3", \
+ "pentium4", "k6", "k6-2", "k6-3",\
+ "athlon", "athlon-4", "k8", \
+ "pentium-m", "prescott", "nocona", \
+ "generic", "core2" }
+/* APPLE LOCAL end mainline */
+
+#ifndef CC1_SPEC
+#define CC1_SPEC "%(cc1_cpu) "
+#endif
+
+/* This macro defines names of additional specifications to put in the
+ specs that can be used in various specifications like CC1_SPEC. Its
+ definition is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define EXTRA_SPECS \
+ { "cc1_cpu", CC1_CPU_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+/* target machine storage layout */
+
+#define LONG_DOUBLE_TYPE_SIZE 80
+
+/* Set the value of FLT_EVAL_METHOD in float.h. When using only the
+ FPU, assume that the fpcw is set to extended precision; when using
+ only SSE, rounding is correct; when using both SSE and the FPU,
+ the rounding precision is indeterminate, since either may be chosen
+ apparently at random. */
+#define TARGET_FLT_EVAL_METHOD \
+ (TARGET_MIX_SSE_I387 ? -1 : TARGET_SSE_MATH ? 0 : 2)
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define FLOAT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_LONG_TYPE_SIZE 64
+
+#if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT
+#define MAX_BITS_PER_WORD 64
+#else
+#define MAX_BITS_PER_WORD 32
+#endif
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is true on the 80386. */
+
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is not true on the 80386. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+/* Not true for 80386 */
+#define WORDS_BIG_ENDIAN 0
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#ifdef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#else
+#define MIN_UNITS_PER_WORD 4
+#endif
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY BITS_PER_WORD
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+/* APPLE LOCAL begin compiler should obey -mpreferred-stack-boundary (radar 3232990) */
+/* prefer * #define STACK_BOUNDARY ((ix86_preferred_stack_boundary > 128) ? 128 : ix86_preferred_stack_boundary) */
+/* We're going to extremes to yield a result of indeterminite
+ signedness here; this macro will be expanded in signed and
+ unsigned contexts, and mixed signedness induces fatal
+ warnings. Radar 3941684. */
+#define STACK_BOUNDARY ((ix86_preferred_stack_boundary >= 128) ? 128 : \
+ (ix86_preferred_stack_boundary == 64) ? 64 : 32)
+/* APPLE LOCAL end compiler should obey -mpreferred-stack-boundary (radar 3232990) */
+
+/* Boundary (in *bits*) on which the stack pointer prefers to be
+ aligned; the compiler cannot rely on having this alignment. */
+#define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary
+
+/* APPLE LOCAL begin radar 4216496, 4229407, 4120689, 4095567 */
+#define SAVE_PREFERRED_STACK_BOUNDARY ix86_save_preferred_stack_boundary
+/* APPLE LOCAL end radar 4216496, 4229407, 4120689, 4095567 */
+
+/* As of July 2001, many runtimes do not align the stack properly when
+ entering main. This causes expand_main_function to forcibly align
+ the stack, which results in aligned frames for functions called from
+ main, though it does nothing for the alignment of main itself. */
+#define FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN \
+ (ix86_preferred_stack_boundary > STACK_BOUNDARY && !TARGET_64BIT)
+
+/* Minimum allocation boundary for the code of a function. */
+#define FUNCTION_BOUNDARY 8
+
+/* C++ stores the virtual bit in the lowest bit of function pointers. */
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn
+
+/* Alignment of field after `int : 0' in a structure. */
+
+#define EMPTY_FIELD_BOUNDARY BITS_PER_WORD
+
+/* Minimum size in bits of the largest boundary to which any
+ and all fundamental data types supported by the hardware
+ might need to be aligned. No data type wants to be aligned
+ rounder than this.
+
+ Pentium+ prefers DFmode values to be aligned to 64 bit boundary
+ and Pentium Pro XFmode values at 128 bit boundaries. */
+
+#define BIGGEST_ALIGNMENT 128
+
+/* Decide whether a variable of mode MODE should be 128 bit aligned. */
+#define ALIGN_MODE_128(MODE) \
+ ((MODE) == XFmode || SSE_REG_MODE_P (MODE))
+
+/* The published ABIs say that doubles should be aligned on word
+ boundaries, so lower the alignment for structure fields unless
+ -malign-double is set. */
+
+/* ??? Blah -- this macro is used directly by libobjc. Since it
+ supports no vector modes, cut out the complexity and fall back
+ on BIGGEST_FIELD_ALIGNMENT. */
+#ifdef IN_TARGET_LIBS
+#ifdef __x86_64__
+#define BIGGEST_FIELD_ALIGNMENT 128
+#else
+#define BIGGEST_FIELD_ALIGNMENT 32
+#endif
+#else
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ x86_field_alignment (FIELD, COMPUTED)
+#endif
+
+/* If defined, a C expression to compute the alignment given to a
+ constant that is being placed in memory. EXP is the constant
+ and ALIGN is the alignment that the object would ordinarily have.
+ The value of this macro is used instead of that alignment to align
+ the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ The typical use of this macro is to increase alignment for string
+ constants to be word aligned so that `strcpy' calls that copy
+ constants can be done inline. */
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) ix86_constant_alignment ((EXP), (ALIGN))
+
+/* If defined, a C expression to compute the alignment for a static
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that `strcpy' calls
+ that copy constants to character arrays can be done inline. */
+
+#define DATA_ALIGNMENT(TYPE, ALIGN) ix86_data_alignment ((TYPE), (ALIGN))
+
+/* If defined, a C expression to compute the alignment for a local
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. */
+
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) ix86_local_alignment ((TYPE), (ALIGN))
+
+/* If defined, a C expression that gives the alignment boundary, in
+ bits, of an argument with the specified mode and type. If it is
+ not defined, `PARM_BOUNDARY' is used for all arguments. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ ix86_function_arg_boundary ((MODE), (TYPE))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* If bit field type is int, don't let it cross an int,
+ and give entire struct the alignment of an int. */
+/* Required on the 386 since it doesn't have bit-field insns. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Standard register usage. */
+
+/* This processor has special stack-like registers. See reg-stack.c
+ for details. */
+
+#define STACK_REGS
+#define IS_STACK_MODE(MODE) \
+ (((MODE) == SFmode && (!TARGET_SSE || !TARGET_SSE_MATH)) \
+ || ((MODE) == DFmode && (!TARGET_SSE2 || !TARGET_SSE_MATH)) \
+ || (MODE) == XFmode)
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ In the 80386 we give the 8 general purpose registers the numbers 0-7.
+ We number the floating point registers 8-15.
+ Note that registers 0-7 can be accessed as a short or int,
+ while only 0-3 may be used with byte `mov' instructions.
+
+ Reg 16 does not correspond to any hardware register, but instead
+ appears in the RTL as an argument pointer prior to reload, and is
+ eliminated during reloading in favor of either the stack or frame
+ pointer. */
+
+#define FIRST_PSEUDO_REGISTER 53
+
+/* Number of hardware registers that go into the DWARF-2 unwind info.
+ If not defined, equals FIRST_PSEUDO_REGISTER. */
+
+#define DWARF_FRAME_REGISTERS 17
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the 80386, the stack pointer is such, as is the arg pointer.
+
+ The value is zero if the register is not fixed on either 32 or
+ 64 bit targets, one if the register if fixed on both 32 and 64
+ bit targets, two if it is only fixed on 32bit targets and three
+ if its only fixed on 64bit targets.
+ Proper values are computed in the CONDITIONAL_REGISTER_USAGE.
+ */
+#define FIXED_REGISTERS \
+/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
+{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \
+/*arg,flags,fpsr,dir,frame*/ \
+ 1, 1, 1, 1, 1, \
+/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+/*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
+ 2, 2, 2, 2, 2, 2, 2, 2, \
+/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
+ 2, 2, 2, 2, 2, 2, 2, 2}
+
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+
+ The value is zero if the register is not call used on either 32 or
+ 64 bit targets, one if the register if call used on both 32 and 64
+ bit targets, two if it is only call used on 32bit targets and three
+ if its only call used on 64bit targets.
+ Proper values are computed in the CONDITIONAL_REGISTER_USAGE.
+*/
+#define CALL_USED_REGISTERS \
+/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
+{ 1, 1, 1, 0, 3, 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+/*arg,flags,fpsr,dir,frame*/ \
+ 1, 1, 1, 1, 1, \
+/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+/*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
+ 1, 1, 1, 1, 2, 2, 2, 2, \
+/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
+ 1, 1, 1, 1, 1, 1, 1, 1} \
+
+/* Order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS. List frame pointer
+ late and fixed registers last. Note that, in general, we prefer
+ registers listed in CALL_USED_REGISTERS, keeping the others
+ available for storage of persistent values.
+
+ The ORDER_REGS_FOR_LOCAL_ALLOC actually overwrite the order,
+ so this is just empty initializer for array. */
+
+#define REG_ALLOC_ORDER \
+{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, \
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
+ 48, 49, 50, 51, 52 }
+
+/* ORDER_REGS_FOR_LOCAL_ALLOC is a macro which permits reg_alloc_order
+ to be rearranged based on a particular function. When using sse math,
+ we want to allocate SSE before x87 registers and vice vera. */
+
+#define ORDER_REGS_FOR_LOCAL_ALLOC x86_order_regs_for_local_alloc ()
+
+
+/* Macro to conditionally modify fixed_regs/call_used_regs. */
+#define CONDITIONAL_REGISTER_USAGE \
+do { \
+ int i; \
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
+ { \
+ if (fixed_regs[i] > 1) \
+ fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2)); \
+ if (call_used_regs[i] > 1) \
+ call_used_regs[i] = (call_used_regs[i] \
+ == (TARGET_64BIT ? 3 : 2)); \
+ } \
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ } \
+ if (! TARGET_MMX) \
+ { \
+ int i; \
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i)) \
+ fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
+ } \
+ if (! TARGET_SSE) \
+ { \
+ int i; \
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
+ if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i)) \
+ fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
+ } \
+ if (! TARGET_80387 && ! TARGET_FLOAT_RETURNS_IN_80387) \
+ { \
+ int i; \
+ HARD_REG_SET x; \
+ COPY_HARD_REG_SET (x, reg_class_contents[(int)FLOAT_REGS]); \
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
+ if (TEST_HARD_REG_BIT (x, i)) \
+ fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = ""; \
+ } \
+ if (! TARGET_64BIT) \
+ { \
+ int i; \
+ for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++) \
+ reg_names[i] = ""; \
+ for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++) \
+ reg_names[i] = ""; \
+ } \
+ } while (0)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ Actually there are no two word move instructions for consecutive
+ registers. And only registers 0-3 may have mov byte instructions
+ applied to them.
+ */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (FP_REGNO_P (REGNO) || SSE_REGNO_P (REGNO) || MMX_REGNO_P (REGNO) \
+ ? (COMPLEX_MODE_P (MODE) ? 2 : 1) \
+ : ((MODE) == XFmode \
+ ? (TARGET_64BIT ? 2 : 3) \
+ : (MODE) == XCmode \
+ ? (TARGET_64BIT ? 4 : 6) \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
+
+#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \
+ ((TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT) \
+ ? (FP_REGNO_P (REGNO) || SSE_REGNO_P (REGNO) || MMX_REGNO_P (REGNO) \
+ ? 0 \
+ : ((MODE) == XFmode || (MODE) == XCmode)) \
+ : 0)
+
+#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8)
+
+#define VALID_SSE2_REG_MODE(MODE) \
+ ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
+ || (MODE) == V2DImode || (MODE) == DFmode)
+
+#define VALID_SSE_REG_MODE(MODE) \
+ ((MODE) == TImode || (MODE) == V4SFmode || (MODE) == V4SImode \
+ || (MODE) == SFmode || (MODE) == TFmode)
+
+#define VALID_MMX_REG_MODE_3DNOW(MODE) \
+ ((MODE) == V2SFmode || (MODE) == SFmode)
+
+#define VALID_MMX_REG_MODE(MODE) \
+ ((MODE) == DImode || (MODE) == V8QImode || (MODE) == V4HImode \
+/* APPLE LOCAL 4656532 use V1DImode for _m64 */ \
+ || (MODE) == V2SImode || (MODE) == SImode || (MODE) == V1DImode)
+
+/* ??? No autovectorization into MMX or 3DNOW until we can reliably
+ place emms and femms instructions. */
+#define UNITS_PER_SIMD_WORD (TARGET_SSE ? 16 : UNITS_PER_WORD)
+
+#define VALID_FP_MODE_P(MODE) \
+ ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \
+ || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) \
+
+#define VALID_INT_MODE_P(MODE) \
+ ((MODE) == QImode || (MODE) == HImode || (MODE) == SImode \
+ || (MODE) == DImode \
+ || (MODE) == CQImode || (MODE) == CHImode || (MODE) == CSImode \
+ || (MODE) == CDImode \
+ || (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode \
+ || (MODE) == TFmode || (MODE) == TCmode)))
+
+/* Return true for modes passed in SSE registers. */
+#define SSE_REG_MODE_P(MODE) \
+ ((MODE) == TImode || (MODE) == V16QImode || (MODE) == TFmode \
+ || (MODE) == V8HImode || (MODE) == V2DFmode || (MODE) == V2DImode \
+ || (MODE) == V4SFmode || (MODE) == V4SImode)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ix86_hard_regno_mode_ok ((REGNO), (MODE))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) ix86_modes_tieable_p (MODE1, MODE2)
+
+/* It is possible to write patterns to move flags; but until someone
+ does it, */
+#define AVOID_CCMODE_COPIES
+
+/* Specify the modes required to caller save a given hard regno.
+ We do this on i386 to prevent flags from being saved at all.
+
+ Kill any attempts to combine saving of modes. */
+
+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+ (CC_REGNO_P (REGNO) ? VOIDmode \
+ : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
+ : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false)\
+ : (MODE) == HImode && !TARGET_PARTIAL_REG_STALL ? SImode \
+ : (MODE) == QImode && (REGNO) >= 4 && !TARGET_64BIT ? SImode \
+ : (MODE))
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* on the 386 the pc register is %eip, and is not usable as a general
+ register. The ordinary mov instructions won't work */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 7
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM 6
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 20
+
+/* First floating point reg */
+#define FIRST_FLOAT_REG 8
+
+/* First & last stack-like regs */
+#define FIRST_STACK_REG FIRST_FLOAT_REG
+#define LAST_STACK_REG (FIRST_FLOAT_REG + 7)
+
+#define FIRST_SSE_REG (FRAME_POINTER_REGNUM + 1)
+#define LAST_SSE_REG (FIRST_SSE_REG + 7)
+
+#define FIRST_MMX_REG (LAST_SSE_REG + 1)
+#define LAST_MMX_REG (FIRST_MMX_REG + 7)
+
+#define FIRST_REX_INT_REG (LAST_MMX_REG + 1)
+#define LAST_REX_INT_REG (FIRST_REX_INT_REG + 7)
+
+#define FIRST_REX_SSE_REG (LAST_REX_INT_REG + 1)
+#define LAST_REX_SSE_REG (FIRST_REX_SSE_REG + 7)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED ix86_frame_pointer_required ()
+
+/* Override this in other tm.h files to cope with various OS lossage
+ requiring a frame pointer. */
+#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
+#define SUBTARGET_FRAME_POINTER_REQUIRED 0
+#endif
+
+/* Make sure we can access arbitrary call frames. */
+#define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses ()
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 16
+
+/* Register in which static-chain is passed to a function.
+ We do use ECX as static chain register for 32 bit ABI. On the
+ 64bit ABI, ECX is an argument register, so we use R10 instead. */
+#define STATIC_CHAIN_REGNUM (TARGET_64BIT ? FIRST_REX_INT_REG + 10 - 8 : 2)
+
+/* Register to hold the addressing base for position independent
+ code access to data items. We don't use PIC pointer for 64bit
+ mode. Define the regnum to dummy value to prevent gcc from
+ pessimizing code dealing with EBX.
+
+ To avoid clobbering a call-saved register unnecessarily, we renumber
+ the pic register when possible. The change is visible after the
+ prologue has been emitted. */
+
+#define REAL_PIC_OFFSET_TABLE_REGNUM 3
+
+#define PIC_OFFSET_TABLE_REGNUM \
+ /* APPLE LOCAL begin 5695218 */ \
+ ((TARGET_64BIT && ix86_cmodel == CM_SMALL_PIC) \
+ || !flag_pic ? INVALID_REGNUM \
+ : reload_completed && pic_offset_table_rtx ? REGNO (pic_offset_table_rtx) \
+ : REAL_PIC_OFFSET_TABLE_REGNUM) \
+ /* APPLE LOCAL end 5695218 */
+
+#define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_"
+
+/* A C expression which can inhibit the returning of certain function
+ values in registers, based on the type of value. A nonzero value
+ says to return the function value in memory, just as large
+ structures are always returned. Here TYPE will be a C expression
+ of type `tree', representing the data type of the value.
+
+ Note that values of mode `BLKmode' must be explicitly handled by
+ this macro. Also, the option `-fpcc-struct-return' takes effect
+ regardless of this macro. On most systems, it is possible to
+ leave the macro undefined; this causes a default definition to be
+ used, whose value is the constant 1 for `BLKmode' values, and 0
+ otherwise.
+
+ Do not use this macro to indicate that structures and unions
+ should always be returned in memory. You should instead use
+ `DEFAULT_PCC_STRUCT_RETURN' to indicate this. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+ ix86_return_in_memory (TYPE)
+
+/* APPLE LOCAL begin radar 4781080 */
+#define OBJC_FPRETURN_MSGCALL(TYPE,WHICH) \
+ ix86_objc_fpreturn_msgcall (TYPE, WHICH)
+/* APPLE LOCAL end radar 4781080 */
+
+/* This is overridden by <cygwin.h>. */
+#define MS_AGGREGATE_RETURN 0
+
+/* This is overridden by <netware.h>. */
+#define KEEP_AGGREGATE_RETURN_POINTER 0
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union.
+
+ It might seem that class BREG is unnecessary, since no useful 386
+ opcode needs reg %ebx. But some systems pass args to the OS in ebx,
+ and the "b" register constraint is useful in asms for syscalls.
+
+ The flags and fpsr registers are in no class. */
+
+enum reg_class
+{
+ NO_REGS,
+ AREG, DREG, CREG, BREG, SIREG, DIREG,
+ AD_REGS, /* %eax/%edx for DImode */
+ Q_REGS, /* %eax %ebx %ecx %edx */
+ NON_Q_REGS, /* %esi %edi %ebp %esp */
+ INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
+ LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */
+ GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp %r8 - %r15*/
+ FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */
+ FLOAT_REGS,
+ /* APPLE LOCAL 5612787 mainline sse4 */
+ SSE_FIRST_REG,
+ SSE_REGS,
+ MMX_REGS,
+ FP_TOP_SSE_REGS,
+ FP_SECOND_SSE_REGS,
+ FLOAT_SSE_REGS,
+ FLOAT_INT_REGS,
+ INT_SSE_REGS,
+ FLOAT_INT_SSE_REGS,
+ ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
+
+#define INTEGER_CLASS_P(CLASS) \
+ reg_class_subset_p ((CLASS), GENERAL_REGS)
+#define FLOAT_CLASS_P(CLASS) \
+ reg_class_subset_p ((CLASS), FLOAT_REGS)
+#define SSE_CLASS_P(CLASS) \
+ ((CLASS) == SSE_REGS)
+#define MMX_CLASS_P(CLASS) \
+ ((CLASS) == MMX_REGS)
+#define MAYBE_INTEGER_CLASS_P(CLASS) \
+ reg_classes_intersect_p ((CLASS), GENERAL_REGS)
+#define MAYBE_FLOAT_CLASS_P(CLASS) \
+ reg_classes_intersect_p ((CLASS), FLOAT_REGS)
+#define MAYBE_SSE_CLASS_P(CLASS) \
+ reg_classes_intersect_p (SSE_REGS, (CLASS))
+#define MAYBE_MMX_CLASS_P(CLASS) \
+ reg_classes_intersect_p (MMX_REGS, (CLASS))
+
+#define Q_CLASS_P(CLASS) \
+ reg_class_subset_p ((CLASS), Q_REGS)
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ "NO_REGS", \
+ "AREG", "DREG", "CREG", "BREG", \
+ "SIREG", "DIREG", \
+ "AD_REGS", \
+ "Q_REGS", "NON_Q_REGS", \
+ "INDEX_REGS", \
+ "LEGACY_REGS", \
+ "GENERAL_REGS", \
+ "FP_TOP_REG", "FP_SECOND_REG", \
+ "FLOAT_REGS", \
+ /* APPLE LOCAL 5612787 mainline sse4 */ \
+ "SSE_FIRST_REG", \
+ "SSE_REGS", \
+ "MMX_REGS", \
+ "FP_TOP_SSE_REGS", \
+ "FP_SECOND_SSE_REGS", \
+ "FLOAT_SSE_REGS", \
+ "FLOAT_INT_REGS", \
+ "INT_SSE_REGS", \
+ "FLOAT_INT_SSE_REGS", \
+ "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ { 0x00, 0x0 }, \
+ { 0x01, 0x0 }, { 0x02, 0x0 }, /* AREG, DREG */ \
+ { 0x04, 0x0 }, { 0x08, 0x0 }, /* CREG, BREG */ \
+ { 0x10, 0x0 }, { 0x20, 0x0 }, /* SIREG, DIREG */ \
+ { 0x03, 0x0 }, /* AD_REGS */ \
+ { 0x0f, 0x0 }, /* Q_REGS */ \
+ { 0x1100f0, 0x1fe0 }, /* NON_Q_REGS */ \
+ { 0x7f, 0x1fe0 }, /* INDEX_REGS */ \
+ { 0x1100ff, 0x0 }, /* LEGACY_REGS */ \
+ { 0x1100ff, 0x1fe0 }, /* GENERAL_REGS */ \
+ { 0x100, 0x0 }, { 0x0200, 0x0 },/* FP_TOP_REG, FP_SECOND_REG */\
+ { 0xff00, 0x0 }, /* FLOAT_REGS */ \
+/* APPLE LOCAL 5612787 mainline sse4 */ \
+ { 0x200000, 0x0 }, /* SSE_FIRST_REG */ \
+{ 0x1fe00000,0x1fe000 }, /* SSE_REGS */ \
+{ 0xe0000000, 0x1f }, /* MMX_REGS */ \
+{ 0x1fe00100,0x1fe000 }, /* FP_TOP_SSE_REG */ \
+{ 0x1fe00200,0x1fe000 }, /* FP_SECOND_SSE_REG */ \
+{ 0x1fe0ff00,0x1fe000 }, /* FLOAT_SSE_REGS */ \
+ { 0x1ffff, 0x1fe0 }, /* FLOAT_INT_REGS */ \
+{ 0x1fe100ff,0x1fffe0 }, /* INT_SSE_REGS */ \
+{ 0x1fe1ffff,0x1fffe0 }, /* FLOAT_INT_SSE_REGS */ \
+{ 0xffffffff,0x1fffff } \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO])
+
+/* When defined, the compiler allows registers explicitly used in the
+ rtl to be used as spill registers but prevents the compiler from
+ extending the lifetime of these registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define QI_REG_P(X) \
+ (REG_P (X) && REGNO (X) < 4)
+
+#define GENERAL_REGNO_P(N) \
+ ((N) < 8 || REX_INT_REGNO_P (N))
+
+#define GENERAL_REG_P(X) \
+ (REG_P (X) && GENERAL_REGNO_P (REGNO (X)))
+
+#define ANY_QI_REG_P(X) (TARGET_64BIT ? GENERAL_REG_P(X) : QI_REG_P (X))
+
+#define NON_QI_REG_P(X) \
+ (REG_P (X) && REGNO (X) >= 4 && REGNO (X) < FIRST_PSEUDO_REGISTER)
+
+#define REX_INT_REGNO_P(N) ((N) >= FIRST_REX_INT_REG && (N) <= LAST_REX_INT_REG)
+#define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X)))
+
+#define FP_REG_P(X) (REG_P (X) && FP_REGNO_P (REGNO (X)))
+#define FP_REGNO_P(N) ((N) >= FIRST_STACK_REG && (N) <= LAST_STACK_REG)
+#define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X)))
+#define ANY_FP_REGNO_P(N) (FP_REGNO_P (N) || SSE_REGNO_P (N))
+
+#define SSE_REGNO_P(N) \
+ (((N) >= FIRST_SSE_REG && (N) <= LAST_SSE_REG) \
+ || ((N) >= FIRST_REX_SSE_REG && (N) <= LAST_REX_SSE_REG))
+
+#define REX_SSE_REGNO_P(N) \
+ ((N) >= FIRST_REX_SSE_REG && (N) <= LAST_REX_SSE_REG)
+
+#define SSE_REGNO(N) \
+ ((N) < 8 ? FIRST_SSE_REG + (N) : FIRST_REX_SSE_REG + (N) - 8)
+#define SSE_REG_P(N) (REG_P (N) && SSE_REGNO_P (REGNO (N)))
+
+#define SSE_FLOAT_MODE_P(MODE) \
+ ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode))
+
+#define MMX_REGNO_P(N) ((N) >= FIRST_MMX_REG && (N) <= LAST_MMX_REG)
+#define MMX_REG_P(XOP) (REG_P (XOP) && MMX_REGNO_P (REGNO (XOP)))
+
+#define STACK_REG_P(XOP) \
+ (REG_P (XOP) && \
+ REGNO (XOP) >= FIRST_STACK_REG && \
+ REGNO (XOP) <= LAST_STACK_REG)
+
+#define NON_STACK_REG_P(XOP) (REG_P (XOP) && ! STACK_REG_P (XOP))
+
+#define STACK_TOP_P(XOP) (REG_P (XOP) && REGNO (XOP) == FIRST_STACK_REG)
+
+#define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X)))
+#define CC_REGNO_P(X) ((X) == FLAGS_REG || (X) == FPSR_REG)
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS INDEX_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Place additional restrictions on the register class to use when it
+ is necessary to be able to hold a value of mode MODE in a reload
+ register for which class CLASS would ordinarily be used. */
+
+#define LIMIT_RELOAD_CLASS(MODE, CLASS) \
+ ((MODE) == QImode && !TARGET_64BIT \
+ && ((CLASS) == ALL_REGS || (CLASS) == GENERAL_REGS \
+ || (CLASS) == LEGACY_REGS || (CLASS) == INDEX_REGS) \
+ ? Q_REGS : (CLASS))
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+ On the 80386 series, we prevent floating constants from being
+ reloaded into floating registers (since no move-insn can do that)
+ and we ensure that QImodes aren't reloaded into the esi or edi reg. */
+
+/* Put float CONST_DOUBLE in the constant pool instead of fp regs.
+ QImode must go into class Q_REGS.
+ Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
+ movdf to do mem-to-mem moves through integer regs. */
+
+#define PREFERRED_RELOAD_CLASS(X, CLASS) \
+ ix86_preferred_reload_class ((X), (CLASS))
+
+/* Discourage putting floating-point values in SSE registers unless
+ SSE math is being used, and likewise for the 387 registers. */
+
+#define PREFERRED_OUTPUT_RELOAD_CLASS(X, CLASS) \
+ ix86_preferred_output_reload_class ((X), (CLASS))
+
+/* If we are copying between general and FP registers, we need a memory
+ location. The same is true for SSE and MMX registers. */
+#define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \
+ ix86_secondary_memory_needed ((CLASS1), (CLASS2), (MODE), 1)
+
+/* QImode spills from non-QI registers need a scratch. This does not
+ happen often -- the only example so far requires an uninitialized
+ pseudo. */
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, OUT) \
+ (((CLASS) == GENERAL_REGS || (CLASS) == LEGACY_REGS \
+ || (CLASS) == INDEX_REGS) && !TARGET_64BIT && (MODE) == QImode \
+ ? Q_REGS : NO_REGS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On the 80386, this is the size of MODE in words,
+ except in the FP regs, where a single reg is always enough. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (!MAYBE_INTEGER_CLASS_P (CLASS) \
+ ? (COMPLEX_MODE_P (MODE) ? 2 : 1) \
+ : (((((MODE) == XFmode ? 12 : GET_MODE_SIZE (MODE))) \
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* A C expression whose value is nonzero if pseudos that have been
+ assigned to registers of class CLASS would likely be spilled
+ because registers of CLASS are needed for spill registers.
+
+ The default value of this macro returns 1 if CLASS has exactly one
+ register and zero otherwise. On most machines, this default
+ should be used. Only define this macro to some other expression
+ if pseudo allocated by `local-alloc.c' end up in memory because
+ their hard registers were needed for spill registers. If this
+ macro returns nonzero for those classes, those pseudos will only
+ be allocated by `global.c', which knows how to reallocate the
+ pseudo to another register. If there would not be another
+ register available for reallocation, you should not change the
+ definition of this macro since the only effect of such a
+ definition would be to slow down register allocation. */
+
+#define CLASS_LIKELY_SPILLED_P(CLASS) \
+ (((CLASS) == AREG) \
+ || ((CLASS) == DREG) \
+ || ((CLASS) == CREG) \
+ || ((CLASS) == BREG) \
+ || ((CLASS) == AD_REGS) \
+ || ((CLASS) == SIREG) \
+ || ((CLASS) == DIREG) \
+ || ((CLASS) == FP_TOP_REG) \
+ || ((CLASS) == FP_SECOND_REG))
+
+/* Return a class of registers that cannot change FROM mode to TO mode. */
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ ix86_cannot_change_mode_class (FROM, TO, CLASS)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On 386, we have pushw instruction that decrements by exactly 2 no
+ matter what the position was, there is no pushb.
+ But as CIE data alignment factor on this arch is -4, we need to make
+ sure all stack pointer adjustments are in multiple of 4.
+
+ For 64bit ABI we round up to 8 bytes.
+ */
+
+#define PUSH_ROUNDING(BYTES) \
+ (TARGET_64BIT \
+ ? (((BYTES) + 7) & (-8)) \
+ : (((BYTES) + 3) & (-4)))
+
+/* If defined, the maximum amount of space required for outgoing arguments will
+ be computed and placed into the variable
+ `current_function_outgoing_args_size'. No space will be pushed onto the
+ stack for each call; instead, the function prologue should increase the stack
+ frame size by this amount. */
+
+#define ACCUMULATE_OUTGOING_ARGS TARGET_ACCUMULATE_OUTGOING_ARGS
+
+/* If defined, a C expression whose value is nonzero when we want to use PUSH
+ instructions to pass outgoing arguments. */
+
+#define PUSH_ARGS (TARGET_PUSH_ARGS && !ACCUMULATE_OUTGOING_ARGS)
+
+/* We want the stack and args grow in opposite directions, even if
+ PUSH_ARGS is 0. */
+#define PUSH_ARGS_REVERSED 1
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Define this macro if functions should assume that stack space has been
+ allocated for arguments even when their values are passed in registers.
+
+ The value of this macro is the size, in bytes, of the area reserved for
+ arguments passed in registers for the function represented by FNDECL.
+
+ This space can be allocated by the caller, or be a part of the
+ machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says
+ which. */
+#define REG_PARM_STACK_SPACE(FNDECL) 0
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the 80386, the RTD insn may be used to pop them if the number
+ of args is fixed, but if the number is variable then the caller
+ must pop them all. RTD can't be used for library calls now
+ because the library is compiled with the Unix compiler.
+ Use of RTD is a selectable option, since it is incompatible with
+ standard Unix calling sequences. If the option is not selected,
+ the caller must always pop the args.
+
+ The attribute stdcall is equivalent to RTD on a per module basis. */
+
+#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) \
+ ix86_return_pops_args ((FUNDECL), (FUNTYPE), (SIZE))
+
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ix86_function_value_regno_p (N)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ ix86_libcall_value (MODE)
+
+/* Define the size of the result block used for communication between
+ untyped_call and untyped_return. The block contains a DImode value
+ followed by the block used by fnsave and frstor. */
+
+#define APPLY_RESULT_SIZE (8+108)
+
+/* 1 if N is a possible register number for function argument passing. */
+#define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N)
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+
+typedef struct ix86_args {
+ int words; /* # words passed so far */
+ int nregs; /* # registers available for passing */
+ int regno; /* next available register number */
+ int fastcall; /* fastcall calling convention is used */
+ int sse_words; /* # sse words passed so far */
+ int sse_nregs; /* # sse registers available for passing */
+ int warn_sse; /* True when we want to warn about SSE ABI. */
+ int warn_mmx; /* True when we want to warn about MMX ABI. */
+ int sse_regno; /* next available sse register number */
+ int mmx_words; /* # mmx words passed so far */
+ int mmx_nregs; /* # mmx registers available for passing */
+ int mmx_regno; /* next available mmx register number */
+ int maybe_vaarg; /* true for calls to possibly vardic fncts. */
+ int float_in_sse; /* 1 if in 32-bit mode SFmode (2 for DFmode) should
+ be passed in SSE registers. Otherwise 0. */
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_advance (&(CUM), (MODE), (TYPE), (NAMED))
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&(CUM), (MODE), (TYPE), (NAMED))
+
+/* Implement `va_start' for varargs and stdarg. */
+#define EXPAND_BUILTIN_VA_START(VALIST, NEXTARG) \
+ ix86_va_start (VALIST, NEXTARG)
+
+#define TARGET_ASM_FILE_END ix86_file_end
+#define NEED_INDICATE_EXEC_STACK 0
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) x86_function_profiler (FILE, LABELNO)
+
+#define MCOUNT_NAME "_mcount"
+
+#define PROFILE_COUNT_REGISTER "edx"
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+/* Note on the 386 it might be more efficient not to define this since
+ we have to restore it ourselves from the frame pointer, in order to
+ use pop */
+
+#define EXIT_IGNORE_STACK 1
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On the 386, the trampoline contains two instructions:
+ mov #STATIC,ecx
+ jmp FUNCTION
+ The trampoline is generated entirely at runtime. The operand of JMP
+ is the address of FUNCTION relative to the instruction following the
+ JMP (which is 5 bytes long). */
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE (TARGET_64BIT ? 23 : 10)
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ x86_initialize_trampoline ((TRAMP), (FNADDR), (CXT))
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ There are two registers that can always be eliminated on the i386.
+ The frame pointer and the arg pointer can be replaced by either the
+ hard frame pointer or to the stack pointer, depending upon the
+ circumstances. The hard frame pointer is not used before reload and
+ so it is not eligible for elimination. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
+
+/* Given FROM and TO register numbers, say whether this elimination is
+ allowed. Frame pointer elimination is automatically handled.
+
+ All other eliminations are valid. */
+
+#define CAN_ELIMINATE(FROM, TO) \
+ ((TO) == STACK_POINTER_REGNUM ? ! frame_pointer_needed : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO)))
+
+/* Addressing modes, and classification of registers for them. */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < STACK_POINTER_REGNUM \
+ || (REGNO >= FIRST_REX_INT_REG \
+ && (REGNO) <= LAST_REX_INT_REG) \
+ || ((unsigned) reg_renumber[(REGNO)] >= FIRST_REX_INT_REG \
+ && (unsigned) reg_renumber[(REGNO)] <= LAST_REX_INT_REG) \
+ || (unsigned) reg_renumber[(REGNO)] < STACK_POINTER_REGNUM)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) <= STACK_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO >= FIRST_REX_INT_REG \
+ && (REGNO) <= LAST_REX_INT_REG) \
+ || ((unsigned) reg_renumber[(REGNO)] >= FIRST_REX_INT_REG \
+ && (unsigned) reg_renumber[(REGNO)] <= LAST_REX_INT_REG) \
+ || (unsigned) reg_renumber[(REGNO)] <= STACK_POINTER_REGNUM)
+
+#define REGNO_OK_FOR_SIREG_P(REGNO) \
+ ((REGNO) == 4 || reg_renumber[(REGNO)] == 4)
+#define REGNO_OK_FOR_DIREG_P(REGNO) \
+ ((REGNO) == 5 || reg_renumber[(REGNO)] == 5)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+
+/* Non strict versions, pseudos are ok. */
+#define REG_OK_FOR_INDEX_NONSTRICT_P(X) \
+ (REGNO (X) < STACK_POINTER_REGNUM \
+ || (REGNO (X) >= FIRST_REX_INT_REG \
+ && REGNO (X) <= LAST_REX_INT_REG) \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_OK_FOR_BASE_NONSTRICT_P(X) \
+ (REGNO (X) <= STACK_POINTER_REGNUM \
+ || REGNO (X) == ARG_POINTER_REGNUM \
+ || REGNO (X) == FRAME_POINTER_REGNUM \
+ || (REGNO (X) >= FIRST_REX_INT_REG \
+ && REGNO (X) <= LAST_REX_INT_REG) \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+/* Strict versions, hard registers only */
+#define REG_OK_FOR_INDEX_STRICT_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#ifndef REG_OK_STRICT
+#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X)
+#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X)
+
+#else
+#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X)
+#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X)
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ except for CONSTANT_ADDRESS_P which is usually machine-independent.
+
+ See legitimize_pic_address in i386.c for details as to what
+ constitutes a legitimate address when -fpic is used. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define CONSTANT_ADDRESS_P(X) constant_address_p (X)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_CONSTANT_P(X) legitimate_constant_p (X)
+
+#ifdef REG_OK_STRICT
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+do { \
+ if (legitimate_address_p ((MODE), (X), 1)) \
+ goto ADDR; \
+} while (0)
+
+#else
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+do { \
+ if (legitimate_address_p ((MODE), (X), 0)) \
+ goto ADDR; \
+} while (0)
+
+#endif
+
+/* If defined, a C expression to determine the base term of address X.
+ This macro is used in only one place: `find_base_term' in alias.c.
+
+ It is always safe for this macro to not be defined. It exists so
+ that alias analysis can understand machine-dependent addresses.
+
+ The typical use of this macro is to handle addresses containing
+ a label_ref or symbol_ref within an UNSPEC. */
+
+#define FIND_BASE_TERM(X) ix86_find_base_term (X)
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For the 80386, we handle X+REG by loading X into a register R and
+ using R+REG. R will go in a general reg and indexing will be used.
+ However, if REG is a broken-out memory address or multiplication,
+ nothing needs to be done because REG can certainly go in a general reg.
+
+ When -fpic is used, special handling is needed for symbolic references.
+ See comments by legitimize_pic_address in i386.c for details. */
+
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+do { \
+ (X) = legitimize_address ((X), (OLDX), (MODE)); \
+ if (memory_address_p ((MODE), (X))) \
+ goto WIN; \
+} while (0)
+
+#define REWRITE_ADDRESS(X) rewrite_address (X)
+
+/* Nonzero if the constant value X is a legitimate general operand
+ when generating PIC code. It is given that flag_pic is on and
+ that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X)
+
+#define SYMBOLIC_CONST(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the 80386, only postdecrement and postincrement address depend thus
+ (the amount of decrement or increment being the length of the operand). */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
+do { \
+ if (GET_CODE (ADDR) == POST_INC \
+ || GET_CODE (ADDR) == POST_DEC) \
+ goto LABEL; \
+} while (0)
+
+/* Max number of args passed in registers. If this is more than 3, we will
+ have problems with ebx (register #4), since it is a caller save register and
+ is also used as the pic register in ELF. So for now, don't allow more than
+ 3 registers to be passed in registers. */
+
+#define REGPARM_MAX (TARGET_64BIT ? 6 : 3)
+
+/* APPLE LOCAL regparmandstackparm */
+#define SSE_REGPARM_MAX (TARGET_64BIT ? 8 : (TARGET_MACHO ? 4 : (TARGET_SSE ? 3 : 0)))
+
+#define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : (TARGET_MMX ? 3 : 0))
+
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE (!TARGET_64BIT || flag_pic ? SImode : DImode)
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Number of bytes moved into a data cache for a single prefetch operation. */
+#define PREFETCH_BLOCK ix86_cost->prefetch_block
+
+/* Number of prefetch operations that can be done in parallel. */
+#define SIMULTANEOUS_PREFETCHES ix86_cost->simultaneous_prefetches
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 16
+
+/* MOVE_MAX_PIECES is the number of bytes at a time which we can
+ move efficiently, as opposed to MOVE_MAX which is the maximum
+ number of bytes we can move with a single instruction. */
+#define MOVE_MAX_PIECES (TARGET_64BIT ? 8 : 4)
+
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction pairs, we will do a movmem or libcall instead.
+ Increasing the value will always make code faster, but eventually
+ incurs high cost in increased code size.
+
+ If you don't define this, a reasonable default is used. */
+
+#define MOVE_RATIO (optimize_size ? 3 : ix86_cost->move_ratio)
+
+/* If a clear memory operation would take CLEAR_RATIO or more simple
+ move-instruction sequences, we will do a clrmem or libcall instead. */
+
+#define CLEAR_RATIO (optimize_size ? 2 \
+ : ix86_cost->move_ratio > 6 ? 6 : ix86_cost->move_ratio)
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* On i386, shifts do truncate the count. But bit opcodes don't. */
+
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* A macro to update M and UNSIGNEDP when an object whose type is
+ TYPE and which has the specified mode and signedness is to be
+ stored in a register. This macro is only called when TYPE is a
+ scalar type.
+
+ On i386 it is sometimes useful to promote HImode and QImode
+ quantities to SImode. The choice depends on target type. */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+do { \
+ if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \
+ || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \
+ (MODE) = SImode; \
+} while (0)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode (TARGET_64BIT ? DImode : SImode)
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* A C expression for the cost of moving data from a register in class FROM to
+ one in class TO. The classes are expressed using the enumeration values
+ such as `GENERAL_REGS'. A value of 2 is the default; other values are
+ interpreted relative to that.
+
+ It is not required that the cost always equal 2 when FROM is the same as TO;
+ on some machines it is expensive to move between registers if they are not
+ general registers. */
+
+#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
+ ix86_register_move_cost ((MODE), (CLASS1), (CLASS2))
+
+/* A C expression for the cost of moving data of mode M between a
+ register and memory. A value of 2 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ If moving between registers and memory is more expensive than
+ between two registers, you should define this macro to express the
+ relative cost. */
+
+#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
+ ix86_memory_move_cost ((MODE), (CLASS), (IN))
+
+/* A C expression for the cost of a branch instruction. A value of 1
+ is the default; other values are interpreted relative to that. */
+
+#define BRANCH_COST ix86_branch_cost
+
+/* Define this macro as a C expression which is nonzero if accessing
+ less than a word of memory (i.e. a `char' or a `short') is no
+ faster than accessing a word of memory, i.e., if such access
+ require more than one instruction or if there is no difference in
+ cost between byte and (aligned) word loads.
+
+ When this macro is not defined, the compiler will access a field by
+ finding the smallest containing object; when it is defined, a
+ fullword load will be used if alignment permits. Unless bytes
+ accesses are faster than word accesses, using word accesses is
+ preferable since it may eliminate subsequent memory access if
+ subsequent accesses occur to other fields in the same word of the
+ structure, but to different bytes. */
+
+/* APPLE LOCAL 6131435 */
+#define SLOW_BYTE_ACCESS (!flag_apple_kext && !flag_mkernel && !TARGET_64BIT)
+
+/* Nonzero if access to memory by shorts is slow and undesirable. */
+#define SLOW_SHORT_ACCESS 0
+
+/* Define this macro to be the value 1 if unaligned accesses have a
+ cost many times greater than aligned accesses, for example if they
+ are emulated in a trap handler.
+
+ When this macro is nonzero, the compiler will act as if
+ `STRICT_ALIGNMENT' were nonzero when generating code for block
+ moves. This can cause significantly more instructions to be
+ produced. Therefore, do not set this macro nonzero if unaligned
+ accesses only add a cycle or two to the time for a memory access.
+
+ If the value of this macro is always zero, it need not be defined. */
+
+/* #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 0 */
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register.
+
+ Desirable on the 386 because a CALL with a constant address is
+ faster than one with a register address. */
+
+#define NO_FUNCTION_CSE
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+
+ For floating-point equality comparisons, CCFPEQmode should be used.
+ VOIDmode should be used in all other cases.
+
+ For integer comparisons against zero, reduce to CCNOmode or CCZmode if
+ possible, to allow for more combinations. */
+
+#define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y))
+
+/* Return nonzero if MODE implies a floating point inequality can be
+ reversed. */
+
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+/* A C expression whose value is reversed condition code of the CODE for
+ comparison done in CC_MODE mode. */
+#define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE))
+
+
+/* Control the assembler format that we output, to the extent
+ this does not vary between assemblers. */
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+/* In order to refer to the first 8 regs as 32 bit regs, prefix an "e".
+ For non floating point regs, the following are the HImode names.
+
+ For float regs, the stack top is sometimes referred to as "%st(0)"
+ instead of just "%st". PRINT_OPERAND handles this with the "y" code. */
+
+#define HI_REGISTER_NAMES \
+{"ax","dx","cx","bx","si","di","bp","sp", \
+ "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \
+ "argp", "flags", "fpsr", "dirflag", "frame", \
+ "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" , \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"}
+
+#define REGISTER_NAMES HI_REGISTER_NAMES
+
+/* Table of additional register names to use in user input. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ { "eax", 0 }, { "edx", 1 }, { "ecx", 2 }, { "ebx", 3 }, \
+ { "esi", 4 }, { "edi", 5 }, { "ebp", 6 }, { "esp", 7 }, \
+ { "rax", 0 }, { "rdx", 1 }, { "rcx", 2 }, { "rbx", 3 }, \
+ { "rsi", 4 }, { "rdi", 5 }, { "rbp", 6 }, { "rsp", 7 }, \
+ { "al", 0 }, { "dl", 1 }, { "cl", 2 }, { "bl", 3 }, \
+ { "ah", 0 }, { "dh", 1 }, { "ch", 2 }, { "bh", 3 } }
+
+/* Note we are omitting these since currently I don't know how
+to get gcc to use these, since they want the same but different
+number as al, and ax.
+*/
+
+#define QI_REGISTER_NAMES \
+{"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl",}
+
+/* These parallel the array above, and can be used to access bits 8:15
+ of regs 0 through 3. */
+
+#define QI_HIGH_REGISTER_NAMES \
+{"ah", "dh", "ch", "bh", }
+
+/* How to renumber registers for dbx and gdb. */
+
+#define DBX_REGISTER_NUMBER(N) \
+ (TARGET_64BIT ? dbx64_register_map[(N)] : dbx_register_map[(N)])
+
+extern int const dbx_register_map[FIRST_PSEUDO_REGISTER];
+extern int const dbx64_register_map[FIRST_PSEUDO_REGISTER];
+extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER];
+
+/* Before the prologue, RA is at 0(%esp). */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_rtx_MEM (VOIDmode, gen_rtx_REG (VOIDmode, STACK_POINTER_REGNUM))
+
+/* After the prologue, RA is at -4(AP) in the current frame. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, -UNITS_PER_WORD)) \
+ : gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD)))
+
+/* PC is dbx register 8; let's use that column for RA. */
+#define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8)
+
+/* Before the prologue, the top of the frame is at 4(%esp). */
+#define INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 2)
+
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations.
+
+ ??? All x86 object file formats are capable of representing this.
+ After all, the relocation needed is the same as for the call insn.
+ Whether or not a particular assembler allows us to enter such, I
+ guess we'll have to see. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ asm_preferred_eh_data_format ((CODE), (GLOBAL))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \
+do { \
+ if (TARGET_64BIT) \
+ asm_fprintf ((FILE), "\tpush{q}\t%%r%s\n", \
+ reg_names[(REGNO)] + (REX_INT_REGNO_P (REGNO) != 0)); \
+ else \
+ asm_fprintf ((FILE), "\tpush{l}\t%%e%s\n", reg_names[(REGNO)]); \
+} while (0)
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE, REGNO) \
+do { \
+ if (TARGET_64BIT) \
+ asm_fprintf ((FILE), "\tpop{q}\t%%r%s\n", \
+ reg_names[(REGNO)] + (REX_INT_REGNO_P (REGNO) != 0)); \
+ else \
+ asm_fprintf ((FILE), "\tpop{l}\t%%e%s\n", reg_names[(REGNO)]); \
+} while (0)
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ ix86_output_addr_vec_elt ((FILE), (VALUE))
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ ix86_output_addr_diff_elt ((FILE), (VALUE), (REL))
+
+/* Under some conditions we need jump tables in the text section,
+ because the assembler cannot handle label differences between
+ sections. This is the case for x86_64 on Mach-O for example. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION \
+ (flag_pic && ((TARGET_MACHO && TARGET_64BIT) \
+ || (!TARGET_64BIT && !HAVE_AS_GOTOFF_IN_DATA)))
+
+/* Switch to init or fini section via SECTION_OP, emit a call to FUNC,
+ and switch back. For x86 we do this only to save a few bytes that
+ would otherwise be unused in the text section. */
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n\t" \
+ "call " USER_LABEL_PREFIX #FUNC "\n" \
+ TEXT_SECTION_ASM_OP);
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ Effect of various CODE letters is described in i386.c near
+ print_operand function. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '*' || (CODE) == '+' || (CODE) == '&')
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ print_operand ((FILE), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address ((FILE), (ADDR))
+
+#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
+do { \
+ if (! output_addr_const_extra (FILE, (X))) \
+ goto FAIL; \
+} while (0);
+
+/* a letter which is not needed by the normal asm syntax, which
+ we can use for operand syntax in the extended asm */
+
+#define ASM_OPERAND_LETTER '#'
+#define RET return ""
+#define AT_SP(MODE) (gen_rtx_MEM ((MODE), stack_pointer_rtx))
+
+/* Which processor to schedule for. The cpu attribute defines a list that
+ mirrors this list, so changes to i386.md must be made at the same time. */
+
+enum processor_type
+{
+ PROCESSOR_I386, /* 80386 */
+ PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */
+ PROCESSOR_PENTIUM,
+ PROCESSOR_PENTIUMPRO,
+ PROCESSOR_K6,
+ PROCESSOR_ATHLON,
+ PROCESSOR_PENTIUM4,
+ PROCESSOR_K8,
+ PROCESSOR_NOCONA,
+ /* APPLE LOCAL mainline */
+ PROCESSOR_CORE2,
+ PROCESSOR_GENERIC32,
+ PROCESSOR_GENERIC64,
+ PROCESSOR_max
+};
+
+extern enum processor_type ix86_tune;
+extern enum processor_type ix86_arch;
+
+enum fpmath_unit
+{
+ FPMATH_387 = 1,
+ FPMATH_SSE = 2
+};
+
+extern enum fpmath_unit ix86_fpmath;
+
+enum tls_dialect
+{
+ TLS_DIALECT_GNU,
+ TLS_DIALECT_GNU2,
+ TLS_DIALECT_SUN
+};
+
+extern enum tls_dialect ix86_tls_dialect;
+
+enum cmodel {
+ CM_32, /* The traditional 32-bit ABI. */
+ CM_SMALL, /* Assumes all code and data fits in the low 31 bits. */
+ CM_KERNEL, /* Assumes all code and data fits in the high 31 bits. */
+ CM_MEDIUM, /* Assumes code fits in the low 31 bits; data unlimited. */
+ CM_LARGE, /* No assumptions. */
+ CM_SMALL_PIC, /* Assumes code+data+got/plt fits in a 31 bit region. */
+ CM_MEDIUM_PIC /* Assumes code+got/plt fits in a 31 bit region. */
+};
+
+extern enum cmodel ix86_cmodel;
+
+/* Size of the RED_ZONE area. */
+#define RED_ZONE_SIZE 128
+/* Reserved area of the red zone for temporaries. */
+#define RED_ZONE_RESERVE 8
+
+enum asm_dialect {
+ ASM_ATT,
+ ASM_INTEL
+};
+
+extern enum asm_dialect ix86_asm_dialect;
+/* APPLE LOCAL begin regparmandstackparm */
+extern void ix86_darwin_handle_regparmandstackparm (tree fndecl);
+extern void ix86_darwin_redirect_calls(void);
+/* APPLE LOCAL end regparmandstackparm */
+
+extern unsigned int ix86_preferred_stack_boundary;
+/* APPLE LOCAL begin radar 4216496, 4229407, 4120689, 4095567 */
+extern unsigned int ix86_save_preferred_stack_boundary;
+/* APPLE LOCAL end radar 4216496, 4229407, 4120689, 4095567 */
+extern int ix86_branch_cost, ix86_section_threshold;
+
+/* Smallest class containing REGNO. */
+extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER];
+
+extern rtx ix86_compare_op0; /* operand 0 for comparisons */
+extern rtx ix86_compare_op1; /* operand 1 for comparisons */
+extern rtx ix86_compare_emitted;
+
+/* To properly truncate FP values into integers, we need to set i387 control
+ word. We can't emit proper mode switching code before reload, as spills
+ generated by reload may truncate values incorrectly, but we still can avoid
+ redundant computation of new control word by the mode switching pass.
+ The fldcw instructions are still emitted redundantly, but this is probably
+ not going to be noticeable problem, as most CPUs do have fast path for
+ the sequence.
+
+ The machinery is to emit simple truncation instructions and split them
+ before reload to instructions having USEs of two memory locations that
+ are filled by this code to old and new control word.
+
+ Post-reload pass may be later used to eliminate the redundant fildcw if
+ needed. */
+
+enum ix86_entity
+{
+ I387_TRUNC = 0,
+ I387_FLOOR,
+ I387_CEIL,
+ I387_MASK_PM,
+ MAX_386_ENTITIES
+};
+
+enum ix86_stack_slot
+{
+ SLOT_VIRTUAL = 0,
+ SLOT_TEMP,
+ SLOT_CW_STORED,
+ SLOT_CW_TRUNC,
+ SLOT_CW_FLOOR,
+ SLOT_CW_CEIL,
+ SLOT_CW_MASK_PM,
+ MAX_386_STACK_LOCALS
+};
+
+/* Define this macro if the port needs extra instructions inserted
+ for mode switching in an optimizing compilation. */
+
+#define OPTIMIZE_MODE_SWITCHING(ENTITY) \
+ ix86_optimize_mode_switching[(ENTITY)]
+
+/* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as
+ initializer for an array of integers. Each initializer element N
+ refers to an entity that needs mode switching, and specifies the
+ number of different modes that might need to be set for this
+ entity. The position of the initializer in the initializer -
+ starting counting at zero - determines the integer that is used to
+ refer to the mode-switched entity in question. */
+
+#define NUM_MODES_FOR_MODE_SWITCHING \
+ { I387_CW_ANY, I387_CW_ANY, I387_CW_ANY, I387_CW_ANY }
+
+/* ENTITY is an integer specifying a mode-switched entity. If
+ `OPTIMIZE_MODE_SWITCHING' is defined, you must define this macro to
+ return an integer value not larger than the corresponding element
+ in `NUM_MODES_FOR_MODE_SWITCHING', to denote the mode that ENTITY
+ must be switched into prior to the execution of INSN. */
+
+#define MODE_NEEDED(ENTITY, I) ix86_mode_needed ((ENTITY), (I))
+
+/* This macro specifies the order in which modes for ENTITY are
+ processed. 0 is the highest priority. */
+
+#define MODE_PRIORITY_TO_MODE(ENTITY, N) (N)
+
+/* Generate one or more insns to set ENTITY to MODE. HARD_REG_LIVE
+ is the set of hard registers live at the point where the insn(s)
+ are to be inserted. */
+
+#define EMIT_MODE_SET(ENTITY, MODE, HARD_REGS_LIVE) \
+ ((MODE) != I387_CW_ANY && (MODE) != I387_CW_UNINITIALIZED \
+ ? emit_i387_cw_initialization (MODE), 0 \
+ : 0)
+
+
+/* Avoid renaming of stack registers, as doing so in combination with
+ scheduling just increases amount of live registers at time and in
+ the turn amount of fxch instructions needed.
+
+ ??? Maybe Pentium chips benefits from renaming, someone can try.... */
+
+#define HARD_REGNO_RENAME_OK(SRC, TARGET) \
+ ((SRC) < FIRST_STACK_REG || (SRC) > LAST_STACK_REG)
+
+
+#define DLL_IMPORT_EXPORT_PREFIX '#'
+
+#define FASTCALL_PREFIX '@'
+
+struct machine_function GTY(())
+{
+ struct stack_local_entry *stack_locals;
+ const char *some_ld_name;
+ rtx force_align_arg_pointer;
+ int save_varrargs_registers;
+ int accesses_prev_frame;
+ int optimize_mode_switching[MAX_386_ENTITIES];
+ /* Set by ix86_compute_frame_layout and used by prologue/epilogue expander to
+ determine the style used. */
+ int use_fast_prologue_epilogue;
+ /* Number of saved registers USE_FAST_PROLOGUE_EPILOGUE has been computed
+ for. */
+ int use_fast_prologue_epilogue_nregs;
+ /* If true, the current function needs the default PIC register, not
+ an alternate register (on x86) and must not use the red zone (on
+ x86_64), even if it's a leaf function. We don't want the
+ function to be regarded as non-leaf because TLS calls need not
+ affect register allocation. This flag is set when a TLS call
+ instruction is expanded within a function, and never reset, even
+ if all such instructions are optimized away. Use the
+ ix86_current_function_calls_tls_descriptor macro for a better
+ approximation. */
+ int tls_descriptor_call_expanded_p;
+};
+
+#define ix86_stack_locals (cfun->machine->stack_locals)
+#define ix86_save_varrargs_registers (cfun->machine->save_varrargs_registers)
+#define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching)
+#define ix86_tls_descriptor_calls_expanded_in_cfun \
+ (cfun->machine->tls_descriptor_call_expanded_p)
+/* Since tls_descriptor_call_expanded is not cleared, even if all TLS
+ calls are optimized away, we try to detect cases in which it was
+ optimized away. Since such instructions (use (reg REG_SP)), we can
+ verify whether there's any such instruction live by testing that
+ REG_SP is live. */
+#define ix86_current_function_calls_tls_descriptor \
+ (ix86_tls_descriptor_calls_expanded_in_cfun && regs_ever_live[SP_REG])
+
+/* Control behavior of x86_file_start. */
+#define X86_FILE_START_VERSION_DIRECTIVE false
+#define X86_FILE_START_FLTUSED false
+
+/* APPLE LOCAL begin CW asm blocks */
+#undef TARGET_IASM_EXTRA_INFO
+#define TARGET_IASM_EXTRA_INFO \
+ char mod[3]; \
+ bool as_immediate; \
+ bool as_offset; \
+ bool pseudo;
+
+#define TARGET_IASM_REORDER_ARG(OPCODE, NEWARGNUM, NUM_ARGS, ARGNUM) \
+ do { \
+ /* If we are outputting AT&T style assembly language, the argument \
+ numbering is reversed. */ \
+ if (iasm_x86_needs_swapping (opcode)) \
+ NEWARGNUM = NUM_ARGS - ARGNUM + 1; \
+ } while (0)
+
+#define IASM_SYNTH_CONSTRAINTS(R, ARGNUM, NUM_ARGS, DB) \
+ do { \
+ /* On x86, operand 2 or 3 can be left out and the assembler will deal with it. \
+ \
+ Take for example an opcode: \
+ \
+ opcode r m i \
+ \
+ We allow: \
+ \
+ opcode r mi \
+ \
+ when we have only 2 operands. */ \
+ if (R \
+ && ARGNUM == 2 \
+ && NUM_ARGS == 2 \
+ && R < &DB[sizeof(DB) / sizeof (DB[0]) - 1] \
+ && strcmp (R[1].opcode, R->opcode) == 0 \
+ && R[1].argnum == 3) \
+ { \
+ tree t; \
+ size_t len = strlen (r->constraint) + strlen (r[1].constraint) + 1; \
+ char *p = alloca (len); \
+ \
+ sprintf(p, "%s%s", r->constraint, r[1].constraint); \
+ t = build_string (len, p); \
+ return TREE_STRING_POINTER (t); \
+ } \
+ } while (0)
+
+#define TARGET_IASM_PRINT_OP(BUF, ARG, ARGNUM, USES, MUST_BE_REG, MUST_NOT_BE_REG, E) \
+ iasm_print_op (BUF, ARG, ARGNUM, USES, MUST_BE_REG, MUST_NOT_BE_REG, E)
+
+extern tree iasm_x86_canonicalize_operands (const char **, tree, void *);
+/* On x86, we can rewrite opcodes, change argument ordering and so no... */
+#define IASM_CANONICALIZE_OPERANDS(OPCODE, NEW_OPCODE, IARGS, E) \
+ do { \
+ NEW_OPCODE = OPCODE; \
+ IARGS = iasm_x86_canonicalize_operands (&NEW_OPCODE, IARGS, E); \
+ } while (0)
+
+#define IASM_SEE_OPCODE(YYCHAR, T) \
+ /* If we see an int, arrange to see it as an identifier (opcode), \
+ not as a type. */ \
+ ((YYCHAR == TYPESPEC \
+ && C_RID_CODE (T) == RID_INT) \
+ ? IDENTIFIER : YYCHAR)
+
+/* Return true iff the ID is a prefix for an instruction. */
+
+#define IASM_IS_PREFIX(ID) \
+ do { \
+ const char *myname = IDENTIFIER_POINTER (ID); \
+ if (strcasecmp (myname, "lock") == 0 \
+ || strcasecmp (myname, "rep") == 0 \
+ || strcasecmp (myname, "repe") == 0 \
+ || strcasecmp (myname, "repz") == 0 \
+ || strcasecmp (myname, "repne") == 0 \
+ || strcasecmp (myname, "repnz") == 0) \
+ return true; \
+ } while (0)
+
+#define IASM_PRINT_PREFIX(BUF, PREFIX_LIST) iasm_x86_print_prefix(BUF, PREFIX_LIST)
+
+#define IASM_IMMED_PREFIX(E, BUF) \
+ do { \
+ if (!E->pseudo && ! E->as_immediate) \
+ sprintf (BUF + strlen (BUF), "$"); \
+ } while (0)
+
+#define IASM_OFFSET_PREFIX(E, BUF) \
+ do { \
+ if (E->as_offset) \
+ sprintf (BUF + strlen (BUF), "$"); \
+ } while (0)
+
+/* We can't yet expose ST(x) to reg-stack.c, don't try. */
+#define IASM_HIDE_REG(R) FP_REGNO_P (R)
+
+#define IASM_SEE_IMMEDIATE(E) \
+ E->as_immediate = true
+
+#define IASM_SEE_NO_IMMEDIATE(E) \
+ E->as_immediate = false
+
+/* Table of instructions that need extra constraints. Keep this table sorted. */
+#undef TARGET_IASM_OP_CONSTRAINT
+#define TARGET_IASM_OP_CONSTRAINT \
+ { "adc", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64 },\
+ { "adc", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64 },\
+ { "add", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64 },\
+ { "add", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64},\
+ { "addpd", 1, "+x"}, \
+ { "addpd", 2, "xm"}, \
+ { "addps", 1, "+x"}, \
+ { "addps", 2, "xm"}, \
+ { "addsd", 1, "+x"}, \
+ { "addsd", 2, "xm"}, \
+ { "addss", 1, "+x"}, \
+ { "addss", 2, "xm"}, \
+ { "addsubpd", 1, "+x"}, \
+ { "addsubpd", 2, "xm"}, \
+ { "addsubps", 1, "+x"}, \
+ { "addsubps", 2, "xm"}, \
+ { "and", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64},\
+ { "and", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64},\
+ { "andnpd", 1, "+x"}, \
+ { "andnpd", 2, "xm"}, \
+ { "andnps", 1, "+x"}, \
+ { "andnps", 2, "xm"}, \
+ { "andpd", 1, "+x"}, \
+ { "andpd", 2, "xm"}, \
+ { "andps", 1, "+x"}, \
+ { "andps", 2, "xm"}, \
+ { NX "arpl", 1, "+" rm16}, \
+ { NX "arpl", 2, r16}, \
+ { "bound", 1, U("r")}, \
+ { "bound", 2, U("m")}, \
+ { "bsf", 1, "=r"}, \
+ { "bsf", 2, "rm"}, \
+ { "bsr", 1, "=r"}, \
+ { "bsr", 2, "rm"}, \
+ { "bt", 1, "rm"}, \
+ { "bt", 2, "ri"}, \
+ { "btc", 1, "rm"}, \
+ { "btc", 2, "ri"}, \
+ { "btr", 1, "rm"}, \
+ { "btr", 2, "ri"}, \
+ { "bts", 1, "rm"}, \
+ { "bts", 2, "ri"}, \
+ { NX "call", 1, "rsm"}, \
+ { "clflush", 1, "=m"}, \
+ { "cmova", 1, r16 "," r32 C R64},\
+ { "cmova", 2, rm16 "," rm32 C RM64},\
+ { "cmovae", 2, "rm"}, \
+ { "cmovb", 2, "rm"}, \
+ { "cmovbe", 2, "rm"}, \
+ { "cmovc", 2, "rm"}, \
+ { "cmove", 2, "rm"}, \
+ { "cmovg", 2, "rm"}, \
+ { "cmovge", 2, "rm"}, \
+ { "cmovl", 2, "rm"}, \
+ { "cmovle", 2, "rm"}, \
+ { "cmovna", 2, "rm"}, \
+ { "cmovnae", 2, "rm"}, \
+ { "cmovnb", 2, "rm"}, \
+ { "cmovnbe", 2, "rm"}, \
+ { "cmovnc", 2, "rm"}, \
+ { "cmovne", 2, "rm"}, \
+ { "cmovng", 2, "rm"}, \
+ { "cmovnge", 2, "rm"}, \
+ { "cmovnl", 2, "rm"}, \
+ { "cmovnle", 2, "rm"}, \
+ { "cmovno", 2, "rm"}, \
+ { "cmovnp", 2, "rm"}, \
+ { "cmovns", 2, "rm"}, \
+ { "cmovnz", 2, "rm"}, \
+ { "cmovo", 2, "rm"}, \
+ { "cmovp", 2, "rm"}, \
+ { "cmovpe", 2, "rm"}, \
+ { "cmovpo", 2, "rm"}, \
+ { "cmovs", 2, "rm"}, \
+ { "cmovz", 2, "rm"}, \
+ { "cmp", 1, rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64},\
+ { "cmp", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64},\
+ { "cmpeqpd", 1, "=x"}, \
+ { "cmpeqpd", 2, "xm"}, \
+ { "cmpeqps", 1, "=x"}, \
+ { "cmpeqps", 2, "xm"}, \
+ { "cmpeqsd", 1, "=x"}, \
+ { "cmpeqsd", 2, "xm"}, \
+ { "cmpeqss", 1, "=x"}, \
+ { "cmpeqss", 2, "xm"}, \
+ { "cmplepd", 1, "=x"}, \
+ { "cmplepd", 2, "xm"}, \
+ { "cmpleps", 1, "=x"}, \
+ { "cmpleps", 2, "xm"}, \
+ { "cmplesd", 1, "=x"}, \
+ { "cmplesd", 2, "xm"}, \
+ { "cmpless", 1, "=x"}, \
+ { "cmpless", 2, "xm"}, \
+ { "cmpltpd", 1, "=x"}, \
+ { "cmpltpd", 2, "xm"}, \
+ { "cmpltps", 1, "=x"}, \
+ { "cmpltps", 2, "xm"}, \
+ { "cmpltsd", 1, "=x"}, \
+ { "cmpltsd", 2, "xm"}, \
+ { "cmpltss", 1, "=x"}, \
+ { "cmpltss", 2, "xm"}, \
+ { "cmpneqpd", 1, "=x"}, \
+ { "cmpneqpd", 2, "xm"}, \
+ { "cmpneqps", 1, "=x"}, \
+ { "cmpneqps", 2, "xm"}, \
+ { "cmpneqsd", 1, "=x"}, \
+ { "cmpneqsd", 2, "xm"}, \
+ { "cmpneqss", 1, "=x"}, \
+ { "cmpneqss", 2, "xm"}, \
+ { "cmpnlepd", 1, "=x"}, \
+ { "cmpnlepd", 2, "xm"}, \
+ { "cmpnleps", 1, "=x"}, \
+ { "cmpnleps", 2, "xm"}, \
+ { "cmpnlesd", 1, "=x"}, \
+ { "cmpnlesd", 2, "xm"}, \
+ { "cmpnless", 1, "=x"}, \
+ { "cmpnless", 2, "xm"}, \
+ { "cmpnltpd", 1, "=x"}, \
+ { "cmpnltpd", 2, "xm"}, \
+ { "cmpnltps", 1, "=x"}, \
+ { "cmpnltps", 2, "xm"}, \
+ { "cmpnltsd", 1, "=x"}, \
+ { "cmpnltsd", 2, "xm"}, \
+ { "cmpnltss", 1, "=x"}, \
+ { "cmpnltss", 2, "xm"}, \
+ { "cmpordpd", 1, "=x"}, \
+ { "cmpordpd", 2, "xm"}, \
+ { "cmpordps", 1, "=x"}, \
+ { "cmpordps", 2, "xm"}, \
+ { "cmpordsd", 1, "=x"}, \
+ { "cmpordsd", 2, "xm"}, \
+ { "cmpordss", 1, "=x"}, \
+ { "cmpordss", 2, "xm"}, \
+ { "cmppd", 1, "=x"}, \
+ { "cmppd", 2, "xm"}, \
+ { "cmppd", 3, "i"}, \
+ { "cmpps", 1, "=x"}, \
+ { "cmpps", 2, "xm"}, \
+ { "cmpps", 3, "i"}, \
+ { "cmpsd", 1, "=x"}, \
+ { "cmpsd", 2, "xm"}, \
+ { "cmpsd", 3, "i"}, \
+ { "cmpss", 1, "=x"}, \
+ { "cmpss", 2, "xm"}, \
+ { "cmpss", 3, "i"}, \
+ { "cmpunordpd", 1, "=x"}, \
+ { "cmpunordpd", 2, "xm"}, \
+ { "cmpunordps", 1, "=x"}, \
+ { "cmpunordps", 2, "xm"}, \
+ { "cmpunordsd", 1, "=x"}, \
+ { "cmpunordsd", 2, "xm"}, \
+ { "cmpunordss", 1, "=x"}, \
+ { "cmpunordss", 2, "xm"}, \
+ { "cmpxchg", 1, "+mr"}, \
+ { "cmpxchg", 2, "r"}, \
+ { "comisd", 1, "x"}, \
+ { "comisd", 2, "xm"}, \
+ { "comiss", 1, "x"}, \
+ { "comiss", 2, "xm"}, \
+ { "cvtdq2pd", 1, "=x"}, \
+ { "cvtdq2pd", 2, "xm"}, \
+ { "cvtdq2ps", 1, "=x"}, \
+ { "cvtdq2ps", 2, "xm"}, \
+ { "cvtpd2dq", 1, "=x"}, \
+ { "cvtpd2dq", 2, "xm"}, \
+ { "cvtpd2pi", 1, "=y"}, \
+ { "cvtpd2pi", 2, "xm"}, \
+ { "cvtpd2ps", 1, "=x"}, \
+ { "cvtpd2ps", 2, "xm"}, \
+ { "cvtpi2pd", 1, "=x"}, \
+ { "cvtpi2pd", 2, "ym"}, \
+ { "cvtpi2ps", 1, "=x"}, \
+ { "cvtpi2ps", 2, "ym"}, \
+ { "cvtps2dq", 1, "=x"}, \
+ { "cvtps2dq", 2, "xm"}, \
+ { "cvtps2pd", 1, "=x"}, \
+ { "cvtps2pd", 2, "xm"}, \
+ { "cvtps2pi", 1, "=y"}, \
+ { "cvtps2pi", 2, "xm"}, \
+ { "cvtsd2si", 1, "=" r32R64}, \
+ { "cvtsd2si", 2, "xm"}, \
+ { "cvtsd2ss", 1, "=x"}, \
+ { "cvtsd2ss", 2, "xm"}, \
+ { "cvtsi2sd", 1, "=x"}, \
+ { "cvtsi2sd", 2, rm32RM64}, \
+ { "cvtsi2ss", 1, "=x"}, \
+ { "cvtsi2ss", 2, rm32RM64}, \
+ { "cvtss2sd", 1, "=x"}, \
+ { "cvtss2sd", 2, "xm"}, \
+ { "cvtss2si", 1, "=r"}, \
+ { "cvtss2si", 2, "xm"}, \
+ { "cvttpd2dq", 1, "=x"}, \
+ { "cvttpd2dq", 2, "xm"}, \
+ { "cvttpd2pi", 1, "=y"}, \
+ { "cvttpd2pi", 2, "xm"}, \
+ { "cvttps2dq", 1, "=x"}, \
+ { "cvttps2dq", 2, "xm"}, \
+ { "cvttps2pi", 1, "=y"}, \
+ { "cvttps2pi", 2, "xm"}, \
+ { "cvttsd2si", 1, "=r"}, \
+ { "cvttsd2si", 2, "xm"}, \
+ { "cvttss2si", 1, "=r"}, \
+ { "cvttss2si", 2, "xm"}, \
+ { "dec", 1, "+" rm8rm16rm32RM64},\
+ { "div", 1, rm8rm16rm32}, \
+ { "divpd", 1, "+x"}, \
+ { "divpd", 2, "xm"}, \
+ { "divps", 1, "+x"}, \
+ { "divps", 2, "xm"}, \
+ { "divsd", 1, "+x"}, \
+ { "divsd", 2, "xm"}, \
+ { "divss", 1, "+x"}, \
+ { "divss", 2, "xm"}, \
+ { "enter", 1, "i"}, \
+ { "enter", 2, "i"}, \
+ { "fadd", 1, "+t,f,@"}, \
+ { "fadd", 2, "f,t," m32fpm64fp},\
+ { "faddp", 1, "+f"}, \
+ { "faddp", 2, "t"}, \
+ { "fbld", 1, "m"}, \
+ { "fbstp", 1, "m"}, \
+ { "fcmovb", 1, "=t"}, \
+ { "fcmovb", 2, "f"}, \
+ { "fcmovbe", 1, "=t"}, \
+ { "fcmovbe", 2, "f"}, \
+ { "fcmove", 1, "=t"}, \
+ { "fcmove", 2, "f"}, \
+ { "fcmovnb", 1, "=t"}, \
+ { "fcmovnb", 2, "f"}, \
+ { "fcmovnbe", 1, "=t"}, \
+ { "fcmovnbe", 2, "f"}, \
+ { "fcmovne", 1, "=t"}, \
+ { "fcmovne", 2, "f"}, \
+ { "fcmovnu", 1, "=t"}, \
+ { "fcmovnu", 2, "f"}, \
+ { "fcmovu", 1, "=t"}, \
+ { "fcmovu", 2, "f"}, \
+ { "fcom", 1, "f" m32fpm64fp}, \
+ { "fcomi", 1, "t"}, \
+ { "fcomi", 2, "f"}, \
+ { "fcomip", 1, "t"}, \
+ { "fcomip", 2, "f"}, \
+ { "fcomp", 1, "f" m32fpm64fp},\
+ { "fdiv", 1, "+t,f,@"}, \
+ { "fdiv", 2, "f,t," m32fpm64fp},\
+ { "fdivp", 1, "+f"}, \
+ { "fdivp", 2, "t"}, \
+ { "fdivr", 1, "+t,@"}, \
+ { "fdivr", 2, "f," m32fpm64fp},\
+ { "fdivrp", 1, "+f"}, \
+ { "fdivrp", 2, "t"}, \
+ { "ffree", 1, "f"}, \
+ { "fiadd", 1, m16m32}, \
+ { "ficom", 1, m16m32}, \
+ { "ficomp", 1, m16m32}, \
+ { "fidiv", 1, m16m32}, \
+ { "fidivr", 1, m16m32}, \
+ { "fild", 1, m16m32m64}, \
+ { "fimul", 1, m16m32}, \
+ { "fist", 1, "=" m16m32}, \
+ { "fistp", 1, "=" m16m32m64}, \
+ { "fisttp", 1, "=" m16m32m64},\
+ { "fisub", 1, m16m32}, \
+ { "fisubr", 1, m16m32}, \
+ { "fld", 1, "f" m32fpm64fpm80fp},\
+ { "fldcw", 1, m16}, \
+ { "fldenv", 1, "m"}, \
+ { "fldt", 1, "m"}, \
+ { "fmul", 1, "=f,t,@"}, \
+ { "fmul", 2, "t,f," m32fpm64fp},\
+ { "fmulp", 1, "=f"}, \
+ { "fmulp", 2, "t"}, \
+ { "fnsave", 1, "=m"}, \
+ { "fnstcw", 1, "m"}, \
+ { "fnstenv", 1, "m"}, \
+ { "fnstsw", 1, "ma"}, \
+ { "frstor", 1, "m"}, \
+ { "fsave", 1, "=m"}, \
+ { "fst", 1, "=f" m32fpm64fp}, \
+ { "fstcw", 1, "=m"}, \
+ { "fstenv", 1, "=m"}, \
+ { "fstp", 1, "=f" m32fpm64fpm80fp},\
+ { "fstsw", 1, "=ma"}, \
+ { "fsub", 1, "=f,t,@"}, \
+ { "fsub", 2, "t,f," m32fpm64fp},\
+ { "fsubr", 1, "=f,t," m32fpm64fp},\
+ { "fsubr", 2, "t,f,@"}, \
+ { "fucom", 1, "f"}, \
+ { "fucomi", 1, "t"}, \
+ { "fucomi", 2, "f"}, \
+ { "fucomip", 1, "t"}, \
+ { "fucomip", 2, "f"}, \
+ { "fucomp", 1, "f"}, \
+ { "fxch", 1, "+f" }, \
+ { "fxrstor", 1, "m"}, \
+ { "fxsave", 1, "=m"}, \
+ { "haddpd", 1, "+x"}, \
+ { "haddpd", 2, "xm"}, \
+ { "haddps", 1, "+x"}, \
+ { "haddps", 2, "xm"}, \
+ { "hsubpd", 1, "+x"}, \
+ { "hsubpd", 2, "xm"}, \
+ { "hsubps", 1, "+x"}, \
+ { "hsubps", 2, "xm"}, \
+ { "idiv", 1, rm8rm16rm32RM64},\
+ { "imul", 1, "+r"}, \
+ { "imul", 2, "rm"}, \
+ { "imul", 3, "i"}, \
+ { "in", 1, "=a"}, \
+ { "in", 2, "i"}, \
+ { "inc", 1, "+" rm8rm16rm32RM64},\
+ { NX "ins", 1, "=" m8m16m32}, \
+ { NX "ins", 2, "d"}, \
+ { "int", 1, "i"}, \
+ { "invlpg", 1, "m"}, \
+ { "ja", 1, "s"}, \
+ { "jae", 1, "s"}, \
+ { "jb", 1, "s"}, \
+ { "jbe", 1, "s"}, \
+ { "jc", 1, "s"}, \
+ { NX "jcxz", 1, rel8}, \
+ { "je", 1, "s"}, \
+ { "jecxz", 1, rel8}, \
+ { "jg", 1, "s"}, \
+ { "jge", 1, "s"}, \
+ { "jl", 1, "s"}, \
+ { "jle", 1, "s"}, \
+ { NX "jmp", 1, "s" rm32}, \
+ { "jna", 1, "s"}, \
+ { "jnae", 1, "s"}, \
+ { "jnb", 1, "s"}, \
+ { "jnc", 1, "s"}, \
+ { "jne", 1, "s"}, \
+ { "jng", 1, "s"}, \
+ { "jnge", 1, "s"}, \
+ { "jnl", 1, "s"}, \
+ { "jnle", 1, "s"}, \
+ { "jno", 1, "s"}, \
+ { "jnp", 1, "s"}, \
+ { "jns", 1, "s"}, \
+ { "jnz", 1, "s"}, \
+ { "jo", 1, "s"}, \
+ { "jp", 1, "s"}, \
+ { "jpe", 1, "s"}, \
+ { "jpo", 1, "s"}, \
+ { "js", 1, "s"}, \
+ { "jz", 1, "s"}, \
+ { "lar", 1, "=r"}, \
+ { "lar", 2, "rm"}, \
+ { "lddqu", 1, "=x"}, \
+ { "lddqu", 2, "m"}, \
+ { "ldmxcsr", 1, "m"}, \
+ { NX "lds", 1, "=" r16 "," r32 C R64},\
+ { NX "lds", 2, m16 "," m32 C M64},\
+ { "lea", 1, "=r"}, \
+ { "lea", 2, "m"}, \
+ { NX "les", 1, "=" r16 "," r32 C R64},\
+ { NX "les", 2, m16 "," m32 C M64},\
+ { "lfs", 1, "=" r16 "," r32 C R64},\
+ { "lfs", 2, m16 "," m32 C M64},\
+ { "lgdt", 1, "m"}, \
+ { "lgs", 1, "=" r16 "," r32 C R64},\
+ { "lgs", 2, m16 "," m32 C M64},\
+ { "lidt", 1, "m"}, \
+ { "lldt", 1, rm16}, \
+ { "lmsw", 1, "m"}, \
+ { NX "lods", 1, m8m16m32M64}, \
+ { "loop", 1, rel8}, \
+ { "loope", 1, rel8}, \
+ { "loopne", 1, rel8}, \
+ { "loopnz", 1, rel8}, \
+ { "loopz", 1, rel8}, \
+ { "lsl", 1, "=" r16 "," r32}, \
+ { "lsl", 2, rm16 "," rm32}, \
+ { "lss", 1, "=" r16 "," r32 C R64},\
+ { "lss", 2, m16 "," m32 C M64},\
+ { "ltr", 1, rm16}, \
+ { "maskmovdqu", 1, "x"}, \
+ { "maskmovdqu", 2, "x"}, \
+ { "maskmovq", 1, "y"}, \
+ { "maskmovq", 2, "y"}, \
+ { "maxpd", 1, "+x"}, \
+ { "maxpd", 2, "xm"}, \
+ { "maxps", 1, "+x"}, \
+ { "maxps", 2, "xm"}, \
+ { "maxsd", 1, "+x"}, \
+ { "maxsd", 2, "xm"}, \
+ { "maxss", 1, "+x"}, \
+ { "maxss", 2, "xm"}, \
+ { "minpd", 1, "+x"}, \
+ { "minpd", 2, "xm"}, \
+ { "minps", 1, "+x"}, \
+ { "minps", 2, "xm"}, \
+ { "minsd", 1, "+x"}, \
+ { "minsd", 2, "xm"}, \
+ { "minss", 1, "+x"}, \
+ { "minss", 2, "xm"}, \
+ { "mov", 1, "=" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64}, \
+ { "mov", 2, ri8 "," ri16 "," ri32 C RI64 "," rmi8 "," rmi16 "," rmi32 C RMI64}, \
+ { "movapd", 1, "=x,xm"}, \
+ { "movapd", 2, "xm,x"}, \
+ { "movaps", 1, "=x,xm"}, \
+ { "movaps", 2, "xm,x"}, \
+ { "movd", 1, "=rm,x,y,rm"}, \
+ { "movd", 2, "x,rm,rm,y"}, \
+ { "movddup", 1, "=x"}, \
+ { "movddup", 2, "xm"}, \
+ { "movdq2q", 1, "=y"}, \
+ { "movdq2q", 2, "x"}, \
+ { "movdqa", 1, "=x"}, \
+ { "movdqa", 2, "xm"}, \
+ { "movdqu", 1, "=x"}, \
+ { "movdqu", 2, "xm"}, \
+ { "movhlps", 1, "=x"}, \
+ { "movhlps", 2, "x"}, \
+ { "movhpd", 1, "=x,m"}, \
+ { "movhpd", 2, "m,x"}, \
+ { "movhps", 1, "=x,m"}, \
+ { "movhps", 2, "m,x"}, \
+ { "movlhps", 1, "=x"}, \
+ { "movlhps", 2, "x"}, \
+ { "movlpd", 1, "=x,m"}, \
+ { "movlpd", 2, "m,x"}, \
+ { "movlps", 1, "=x,m"}, \
+ { "movlps", 2, "m,x"}, \
+ { "movmskpd", 1, "=r"}, \
+ { "movmskpd", 2, "x"}, \
+ { "movmskps", 1, "=r"}, \
+ { "movmskps", 2, "x"}, \
+ { "movntdq", 1, "=m"}, \
+ { "movntdq", 2, "x"}, \
+ { "movnti", 1, "=m"}, \
+ { "movnti", 2, "r"}, \
+ { "movntpd", 1, "=m"}, \
+ { "movntpd", 2, "x"}, \
+ { "movntps", 1, "=m"}, \
+ { "movntps", 2, "x"}, \
+ { "movntq", 1, "=m"}, \
+ { "movntq", 2, "y"}, \
+ { "movq", 1, "=x,m,y,m"}, \
+ { "movq", 2, "xm,x,ym,y"}, \
+ { "movq2dq", 1, "=x"}, \
+ { "movq2dq", 2, "y"}, \
+ { "movs", 1, "=" m8 "," m16 "," m32 C M64},\
+ { "movs", 2, m8 "," m16 "," m32 C M64},\
+ { "movsd", 1, "=xm,x"}, \
+ { "movsd", 2, "x,xm"}, \
+ { "movshdup", 1, "=x"}, \
+ { "movshdup", 2, "xm"}, \
+ { "movsldup", 1, "=x"}, \
+ { "movsldup", 2, "xm"}, \
+ { "movss", 1, "=xm,x"}, \
+ { "movss", 2, "x,xm"}, \
+ { "movsx", 1, "=" r16 "," r32},\
+ { "movsx", 2, rm8 "," rm8rm16},\
+ { "movupd", 1, "=x,xm"}, \
+ { "movupd", 2, "xm,x"}, \
+ { "movups", 1, "=x,xm"}, \
+ { "movups", 2, "xm,x"}, \
+ { "movzx", 1, "=" r16 "," r32},\
+ { "movzx", 2, rm8 "," rm8rm16},\
+ { "mul", 1, rm8rm16rm32}, \
+ { "mulpd", 1, "=x"}, \
+ { "mulpd", 2, "xm"}, \
+ { "mulps", 1, "=x"}, \
+ { "mulps", 2, "xm"}, \
+ { "mulsd", 1, "=x"}, \
+ { "mulsd", 2, "xm"}, \
+ { "mulss", 1, "=x"}, \
+ { "mulss", 2, "xm"}, \
+ { "neg", 1, "+" rm8rm16rm32}, \
+ { "not", 1, "+" rm8rm16rm32}, \
+ { "or", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64},\
+ { "or", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64},\
+ { "orpd", 1, "+x"}, \
+ { "orpd", 2, "xm"}, \
+ { "orps", 1, "+x"}, \
+ { "orps", 2, "xm"}, \
+ { "out", 1, "id"}, \
+ { "out", 2, a8 a16 a32}, \
+ { NX "outs", 1, "d"}, \
+ { NX "outs", 2, m8m16m32}, \
+ { "packssdw", 1, "+x,y"}, \
+ { "packssdw", 2, "xm,ym"}, \
+ { "packsswb", 1, "+x,y"}, \
+ { "packsswb", 2, "xm,ym"}, \
+ { "packuswb", 1, "+x,y"}, \
+ { "packuswb", 2, "xm,ym"}, \
+ { "paddb", 1, "+x,y"}, \
+ { "paddb", 2, "xm,ym"}, \
+ { "paddd", 1, "+x,y"}, \
+ { "paddd", 2, "xm,ym"}, \
+ { "paddq", 1, "+x,y"}, \
+ { "paddq", 2, "xm,ym"}, \
+ { "paddsb", 1, "+x,y"}, \
+ { "paddsb", 2, "xm,ym"}, \
+ { "paddsw", 1, "+x,y"}, \
+ { "paddsw", 2, "xm,ym"}, \
+ { "paddusb", 1, "+x,y"}, \
+ { "paddusb", 2, "xm,ym"}, \
+ { "paddusw", 1, "+x,y"}, \
+ { "paddusw", 2, "xm,ym"}, \
+ { "paddw", 1, "+x,y"}, \
+ { "paddw", 2, "xm,ym"}, \
+ { "pand", 1, "+x,y"}, \
+ { "pand", 2, "xm,ym"}, \
+ { "pandn", 1, "+x,y"}, \
+ { "pandn", 2, "xm,ym"}, \
+ { "pavgb", 1, "+x,y"}, \
+ { "pavgb", 2, "xm,ym"}, \
+ { "pavgw", 1, "+x,y"}, \
+ { "pavgw", 2, "xm,ym"}, \
+ { "pcmpeqb", 1, "+x,y"}, \
+ { "pcmpeqb", 2, "xm,ym"}, \
+ { "pcmpeqd", 1, "+x,y"}, \
+ { "pcmpeqd", 2, "xm,ym"}, \
+ { "pcmpeqw", 1, "+x,y"}, \
+ { "pcmpeqw", 2, "xm,ym"}, \
+ { "pcmpgtb", 1, "+x,y"}, \
+ { "pcmpgtb", 2, "xm,ym"}, \
+ { "pcmpgtd", 1, "+x,y"}, \
+ { "pcmpgtd", 2, "xm,ym"}, \
+ { "pcmpgtw", 1, "+x,y"}, \
+ { "pcmpgtw", 2, "xm,ym"}, \
+ { "pextrw", 1, "=" r32R64}, \
+ { "pextrw", 2, "xy"}, \
+ { "pextrw", 3, "i"}, \
+ { "pinsrw", 1, "=xy"}, \
+ { "pinsrw", 2, r32R64 "m"}, \
+ { "pinsrw", 3, "i"}, \
+ { "pmaddwd", 1, "+x,y"}, \
+ { "pmaddwd", 2, "xm,ym"}, \
+ { "pmaxsw", 1, "+x,y"}, \
+ { "pmaxsw", 2, "xm,ym"}, \
+ { "pmaxub", 1, "+x,y"}, \
+ { "pmaxub", 2, "xm,ym"}, \
+ { "pminsw", 1, "+x,y"}, \
+ { "pminsw", 2, "xm,ym"}, \
+ { "pminub", 1, "+x,y"}, \
+ { "pminub", 2, "xm,ym"}, \
+ { "pmovmskb", 1, "+" r32R64}, \
+ { "pmovmskb", 2, "xy"}, \
+ { "pmulhuw", 1, "+x,y"}, \
+ { "pmulhuw", 2, "xm,ym"}, \
+ { "pmulhw", 1, "+x,y"}, \
+ { "pmulhw", 2, "xm,ym"}, \
+ { "pmullw", 1, "+x,y"}, \
+ { "pmullw", 2, "xm,ym"}, \
+ { "pmuludq", 1, "+x,y"}, \
+ { "pmuludq", 2, "xm,ym"}, \
+ { "pop", 1, rm16 T(rm32) RM64},\
+ { "por", 1, "+x,y"}, \
+ { "por", 2, "xm,ym"}, \
+ { "prefetchnta", 1, "m"}, \
+ { "prefetcht0", 1, "m"}, \
+ { "prefetcht1", 1, "m"}, \
+ { "prefetcht2", 1, "m"}, \
+ { "psadbw", 1, "+x,y"}, \
+ { "psadbw", 2, "xm,ym"}, \
+ { "pshufd", 1, "=x"}, \
+ { "pshufd", 2, "xm"}, \
+ { "pshufd", 3, "i"}, \
+ { "pshufhw", 1, "=x"}, \
+ { "pshufhw", 2, "xm"}, \
+ { "pshufhw", 3, "i"}, \
+ { "pshuflw", 1, "=x"}, \
+ { "pshuflw", 2, "xm"}, \
+ { "pshuflw", 3, "i"}, \
+ { "pshufw", 1, "=y"}, \
+ { "pshufw", 2, "ym"}, \
+ { "pshufw", 3, "i"}, \
+ { "pslld", 1, "+x,y"}, \
+ { "pslld", 2, "xmi,ymi"}, \
+ { "pslldq", 1, "+x"}, \
+ { "pslldq", 2, "i"}, \
+ { "psllq", 1, "+x,y"}, \
+ { "psllq", 2, "xmi,ymi"}, \
+ { "psllw", 1, "+x,y"}, \
+ { "psllw", 2, "xmi,ymi"}, \
+ { "psrad", 1, "+x,y"}, \
+ { "psrad", 2, "xmi,ymi"}, \
+ { "psraw", 1, "+x,y"}, \
+ { "psraw", 2, "xmi,ymi"}, \
+ { "psrld", 1, "+x,y"}, \
+ { "psrld", 2, "xmi,ymi"}, \
+ { "psrldq", 1, "+x"}, \
+ { "psrldq", 2, "i"}, \
+ { "psrlq", 1, "+x,y"}, \
+ { "psrlq", 2, "xmi,ymi"}, \
+ { "psrlw", 1, "+x,y"}, \
+ { "psrlw", 2, "xmi,ymi"}, \
+ { "psubb", 1, "+x,y"}, \
+ { "psubb", 2, "xm,ym"}, \
+ { "psubd", 1, "+x,y"}, \
+ { "psubd", 2, "xm,ym"}, \
+ { "psubq", 1, "+x,y"}, \
+ { "psubq", 2, "xm,ym"}, \
+ { "psubsb", 1, "+x,y"}, \
+ { "psubsb", 2, "xm,ym"}, \
+ { "psubsw", 1, "+x,y"}, \
+ { "psubsw", 2, "xm,ym"}, \
+ { "psubusb", 1, "+x,y"}, \
+ { "psubusb", 2, "xm,ym"}, \
+ { "psubusw", 1, "+x,y"}, \
+ { "psubusw", 2, "xm,ym"}, \
+ { "psubw", 1, "+x,y"}, \
+ { "psubw", 2, "xm,ym"}, \
+ { "punpckhbw", 1, "+x,y"}, \
+ { "punpckhbw", 2, "xm,ym"}, \
+ { "punpckhdq", 1, "+x,y"}, \
+ { "punpckhdq", 2, "xm,ym"}, \
+ { "punpckhqdq", 1, "+x"}, \
+ { "punpckhqdq", 2, "xm"}, \
+ { "punpckhwd", 1, "+x,y"}, \
+ { "punpckhwd", 2, "xm,ym"}, \
+ { "punpcklbw", 1, "+x,y"}, \
+ { "punpcklbw", 2, "xm,ym"}, \
+ { "punpckldq", 1, "+x,y"}, \
+ { "punpckldq", 2, "xm,ym"}, \
+ { "punpcklqdq", 1, "+x"}, \
+ { "punpcklqdq", 2, "xm"}, \
+ { "punpcklwd", 1, "+x,y"}, \
+ { "punpcklwd", 2, "xm,ym"}, \
+ { "push", 1, rm16 T(rm32) RM64 "i"},\
+ { "pxor", 1, "+x,y"}, \
+ { "pxor", 2, "xm,ym"}, \
+ { "rcl", 1, "+" rm8rm16rm32}, \
+ { "rcl", 2, "ic"}, \
+ { "rcpps", 1, "+x"}, \
+ { "rcpps", 2, "xm"}, \
+ { "rcpss", 1, "+x"}, \
+ { "rcpss", 2, "xm"}, \
+ { "rcr", 1, "+" rm8rm16rm32}, \
+ { "rcr", 2, "ic"}, \
+ { "ret", 1, "i"}, \
+ { "rol", 1, "+" rm8rm16rm32}, \
+ { "rol", 2, "ic"}, \
+ { "ror", 1, "+" rm8rm16rm32}, \
+ { "ror", 2, "ic"}, \
+ { "rsqrtps", 1, "=x"}, \
+ { "rsqrtps", 2, "xm"}, \
+ { "rsqrtss", 1, "=x"}, \
+ { "rsqrtss", 2, "xm"}, \
+ { "sal", 1, "+" rm8rm16rm32}, \
+ { "sal", 2, "ic"}, \
+ { "sar", 1, "+" rm8rm16rm32}, \
+ { "sar", 2, "ic"}, \
+ { "sbb", 1, "+" rm8 "," rm16 "," rm32 "," r8 "," r16 "," r32},\
+ { "sbb", 2, ri8 "," ri16 "," ri32 "," m8 "," m16 "," m32},\
+ { "scas", 1, m8m16m32M64}, \
+ { "seta", 1, "=qm"}, \
+ { "setae", 1, "=qm"}, \
+ { "setb", 1, "=qm"}, \
+ { "setbe", 1, "=qm"}, \
+ { "setc", 1, "=qm"}, \
+ { "sete", 1, "=qm"}, \
+ { "setg", 1, "=qm"}, \
+ { "setge", 1, "=qm"}, \
+ { "setl", 1, "=qm"}, \
+ { "setle", 1, "=qm"}, \
+ { "setna", 1, "=qm"}, \
+ { "setnae", 1, "=qm"}, \
+ { "setnb", 1, "=qm"}, \
+ { "setnbe", 1, "=qm"}, \
+ { "setnc", 1, "=qm"}, \
+ { "setne", 1, "=qm"}, \
+ { "setng", 1, "=qm"}, \
+ { "setnge", 1, "=qm"}, \
+ { "setnl", 1, "=qm"}, \
+ { "setnle", 1, "=qm"}, \
+ { "setno", 1, "=qm"}, \
+ { "setnp", 1, "=qm"}, \
+ { "setns", 1, "=qm"}, \
+ { "setnz", 1, "=qm"}, \
+ { "seto", 1, "=qm"}, \
+ { "setp", 1, "=qm"}, \
+ { "setpe", 1, "=qm"}, \
+ { "setpo", 1, "=qm"}, \
+ { "sets", 1, "=qm"}, \
+ { "setz", 1, "=qm"}, \
+ { NY "sgdt", 1, "=m"}, \
+ { "shl", 1, "+" rm8rm16rm32}, \
+ { "shl", 2, "ic"}, \
+ { "shld", 1, "+" rm16 "," rm32 C RM64},\
+ { "shld", 2, r16 "," r32 C R64},\
+ { "shld", 3, "ic,ic" X(",ic")},\
+ { "shr", 1, "+" rm8rm16rm32}, \
+ { "shr", 2, "ic"}, \
+ { "shrd", 1, "+" rm16 "," rm32 C RM64},\
+ { "shrd", 2, r16 "," r32 C R64},\
+ { "shrd", 3, "ic,ic" X(",ic")}, \
+ { "shufpd", 1, "+x"}, \
+ { "shufpd", 2, "xm"}, \
+ { "shufpd", 3, "i"}, \
+ { "shufps", 1, "+x"}, \
+ { "shufps", 2, "xm"}, \
+ { "shufps", 3, "i"}, \
+ { NY "sidt", 1, "=m"}, \
+ { "sldt", 1, "=q" S("2") "m"},\
+ { "smsw", 1, "=q" S("2") "m"},\
+ { "sqrtpd", 1, "=x"}, \
+ { "sqrtpd", 2, "xm"}, \
+ { "sqrtps", 1, "=x"}, \
+ { "sqrtps", 2, "xm"}, \
+ { "sqrtsd", 1, "=x"}, \
+ { "sqrtsd", 2, "xm"}, \
+ { "sqrtss", 1, "=x"}, \
+ { "sqrtss", 2, "xm"}, \
+ { "stmxcsr", 1, "m"}, \
+ { "stos", 1, "=m"}, \
+ { "str", 1, "=q" S("2") "m"},\
+ { "sub", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64},\
+ { "sub", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64},\
+ { "subpd", 1, "+x"}, \
+ { "subpd", 2, "xm"}, \
+ { "subps", 1, "+x"}, \
+ { "subps", 2, "xm"}, \
+ { "subsd", 1, "+x"}, \
+ { "subsd", 2, "xm"}, \
+ { "subss", 1, "+x"}, \
+ { "subss", 2, "xm"}, \
+ { "test", 1, "+r," rm8rm16rm32},\
+ { "test", 2, "r,i"}, \
+ { "ucomisd", 1, "+x"}, \
+ { "ucomisd", 2, "xm"}, \
+ { "ucomiss", 1, "+x"}, \
+ { "ucomiss", 2, "xm"}, \
+ { "unpckhpd", 1, "+x"}, \
+ { "unpckhpd", 2, "xm"}, \
+ { "unpckhps", 1, "+x"}, \
+ { "unpckhps", 2, "xm"}, \
+ { "unpcklpd", 1, "+x"}, \
+ { "unpcklpd", 2, "xm"}, \
+ { "unpcklps", 1, "+x"}, \
+ { "unpcklps", 2, "xm"}, \
+ { "verr", 1, rm16}, \
+ { "verw", 1, rm16}, \
+ { "xadd", 1, "+" rm8 "," rm16 "," rm32},\
+ { "xadd", 2, r8 "," r16 "," r32},\
+ { "xchg", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64},\
+ { "xchg", 2, "+" r8 "," r16 "," r32 C R64 "," m8 "," m16 "," m32 C M64},\
+ { "xlat", 1, "m"}, \
+ { "xor", 1, "+" rm8 "," rm16 "," rm32 C RM64 "," r8 "," r16 "," r32 C R64},\
+ { "xor", 2, ri8 "," ri16 "," ri32 C RI64 "," m8 "," m16 "," m32 C M64},\
+ { "xorpd", 1, "+x"}, \
+ { "xorpd", 2, "xm"}, \
+ { "xorps", 1, "+x"}, \
+ { "xorps", 2, "xm"},
+
+#define TARGET_IASM_EXTRA_CLOBBERS \
+ { "rdtsc", { "edx", "eax"} }
+
+#define IASM_FUNCTION_MODIFIER "P"
+
+#define IASM_REGISTER_NAME(STR, BUF) i386_iasm_register_name (STR, BUF)
+
+/* APPLE LOCAL end CW asm blocks */
+
+/* Flag to mark data that is in the large address area. */
+#define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0)
+#define SYMBOL_REF_FAR_ADDR_P(X) \
+ ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0)
+/*
+Local variables:
+version-control: t
+End:
+*/
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/i386.md b/gcc-4.2.1-5666.3/gcc/config/i386/i386.md
new file mode 100644
index 000000000..e825a0474
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/i386.md
@@ -0,0 +1,21399 @@
+;; GCC machine description for IA-32 and x86-64.
+;; Copyright (C) 1988, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+;; 2001, 2002, 2003, 2004, 2005, 2006
+;; Free Software Foundation, Inc.
+;; Mostly by William Schelter.
+;; x86_64 support added by Jan Hubicka
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA. */
+;;
+;; The original PO technology requires these to be ordered by speed,
+;; so that assigner will pick the fastest.
+;;
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+;;
+;; Macro REG_CLASS_FROM_LETTER in file i386.h defines the register
+;; constraint letters.
+;;
+;; The special asm out single letter directives following a '%' are:
+;; 'z' mov%z1 would be movl, movw, or movb depending on the mode of
+;; operands[1].
+;; 'L' Print the opcode suffix for a 32-bit integer opcode.
+;; 'W' Print the opcode suffix for a 16-bit integer opcode.
+;; 'B' Print the opcode suffix for an 8-bit integer opcode.
+;; 'Q' Print the opcode suffix for a 64-bit float opcode.
+;; 'S' Print the opcode suffix for a 32-bit float opcode.
+;; 'T' Print the opcode suffix for an 80-bit extended real XFmode float opcode.
+;; 'J' Print the appropriate jump operand.
+;;
+;; 'b' Print the QImode name of the register for the indicated operand.
+;; %b0 would print %al if operands[0] is reg 0.
+;; 'w' Likewise, print the HImode name of the register.
+;; 'k' Likewise, print the SImode name of the register.
+;; 'h' Print the QImode name for a "high" register, either ah, bh, ch or dh.
+;; 'y' Print "st(0)" instead of "st" as a register.
+
+;; UNSPEC usage:
+
+(define_constants
+ [; Relocation specifiers
+ (UNSPEC_GOT 0)
+ (UNSPEC_GOTOFF 1)
+ (UNSPEC_GOTPCREL 2)
+ (UNSPEC_GOTTPOFF 3)
+ (UNSPEC_TPOFF 4)
+ (UNSPEC_NTPOFF 5)
+ (UNSPEC_DTPOFF 6)
+ (UNSPEC_GOTNTPOFF 7)
+ (UNSPEC_INDNTPOFF 8)
+
+ ; Prologue support
+ (UNSPEC_STACK_ALLOC 11)
+ (UNSPEC_SET_GOT 12)
+ (UNSPEC_SSE_PROLOGUE_SAVE 13)
+ (UNSPEC_REG_SAVE 14)
+ (UNSPEC_DEF_CFA 15)
+
+ ; TLS support
+ (UNSPEC_TP 16)
+ (UNSPEC_TLS_GD 17)
+ (UNSPEC_TLS_LD_BASE 18)
+ (UNSPEC_TLSDESC 19)
+
+ ; Other random patterns
+ (UNSPEC_SCAS 20)
+ (UNSPEC_FNSTSW 21)
+ (UNSPEC_SAHF 22)
+ (UNSPEC_FSTCW 23)
+ (UNSPEC_ADD_CARRY 24)
+ (UNSPEC_FLDCW 25)
+ (UNSPEC_REP 26)
+ (UNSPEC_EH_RETURN 27)
+ (UNSPEC_LD_MPIC 28) ; load_macho_picbase
+
+ ; For SSE/MMX support:
+ (UNSPEC_FIX_NOTRUNC 30)
+ (UNSPEC_MASKMOV 31)
+ (UNSPEC_MOVMSK 32)
+ (UNSPEC_MOVNT 33)
+ (UNSPEC_MOVU 34)
+ (UNSPEC_RCP 35)
+ (UNSPEC_RSQRT 36)
+ (UNSPEC_SFENCE 37)
+ (UNSPEC_NOP 38) ; prevents combiner cleverness
+ (UNSPEC_PFRCP 39)
+ (UNSPEC_PFRCPIT1 40)
+ (UNSPEC_PFRCPIT2 41)
+ (UNSPEC_PFRSQRT 42)
+ (UNSPEC_PFRSQIT1 43)
+ (UNSPEC_MFENCE 44)
+ (UNSPEC_LFENCE 45)
+ (UNSPEC_PSADBW 46)
+ (UNSPEC_LDQQU 47)
+ ; APPLE LOCAL begin 4121692
+ (UNSPEC_LDQ 201)
+ (UNSPEC_MOVQ 202)
+ (UNSPEC_STOQ 203)
+ ; APPLE LOCAL end 4121692
+
+ ; Generic math support
+ (UNSPEC_COPYSIGN 50)
+ (UNSPEC_IEEE_MIN 51) ; not commutative
+ (UNSPEC_IEEE_MAX 52) ; not commutative
+
+ ; x87 Floating point
+ (UNSPEC_SIN 60)
+ (UNSPEC_COS 61)
+ (UNSPEC_FPATAN 62)
+ (UNSPEC_FYL2X 63)
+ (UNSPEC_FYL2XP1 64)
+ (UNSPEC_FRNDINT 65)
+ (UNSPEC_FIST 66)
+ (UNSPEC_F2XM1 67)
+
+ ; x87 Rounding
+ (UNSPEC_FRNDINT_FLOOR 70)
+ (UNSPEC_FRNDINT_CEIL 71)
+ (UNSPEC_FRNDINT_TRUNC 72)
+ (UNSPEC_FRNDINT_MASK_PM 73)
+ (UNSPEC_FIST_FLOOR 74)
+ (UNSPEC_FIST_CEIL 75)
+ ; APPLE LOCAL 3399553
+ (UNSPEC_FLT_ROUNDS 76)
+
+ ; x87 Double output FP
+ (UNSPEC_SINCOS_COS 80)
+ (UNSPEC_SINCOS_SIN 81)
+ (UNSPEC_TAN_ONE 82)
+ (UNSPEC_TAN_TAN 83)
+ (UNSPEC_XTRACT_FRACT 84)
+ (UNSPEC_XTRACT_EXP 85)
+ (UNSPEC_FSCALE_FRACT 86)
+ (UNSPEC_FSCALE_EXP 87)
+ (UNSPEC_FPREM_F 88)
+ (UNSPEC_FPREM_U 89)
+ (UNSPEC_FPREM1_F 90)
+ (UNSPEC_FPREM1_U 91)
+
+ ; SSP patterns
+ (UNSPEC_SP_SET 100)
+ (UNSPEC_SP_TEST 101)
+ (UNSPEC_SP_TLS_SET 102)
+ (UNSPEC_SP_TLS_TEST 103)
+ ; APPLE LOCAL begin mainline
+ ; SSSE3
+ (UNSPEC_PSHUFB 220)
+ (UNSPEC_PSIGN 221)
+ (UNSPEC_PALIGNR 222)
+ ; APPLE LOCAL end mainline
+ ; APPLE LOCAL begin 5612787 mainline sse4
+ ; For SSE4A support
+ (UNSPEC_EXTRQI 130)
+ (UNSPEC_EXTRQ 131)
+ (UNSPEC_INSERTQI 132)
+ (UNSPEC_INSERTQ 133)
+
+ ; For SSE4.1 support
+ (UNSPEC_BLENDV 134)
+ (UNSPEC_INSERTPS 135)
+ (UNSPEC_DP 136)
+ (UNSPEC_MOVNTDQA 137)
+ (UNSPEC_MPSADBW 138)
+ (UNSPEC_PHMINPOSUW 139)
+ (UNSPEC_PTEST 140)
+ (UNSPEC_ROUND 141)
+
+ ; For SSE4.2 support
+ (UNSPEC_CRC32 143)
+ (UNSPEC_PCMPESTR 144)
+ (UNSPEC_PCMPISTR 145)
+ ; APPLE LOCAL end 5612787 mainline sse4
+ ])
+
+(define_constants
+ [(UNSPECV_BLOCKAGE 0)
+ (UNSPECV_STACK_PROBE 1)
+ (UNSPECV_EMMS 2)
+ (UNSPECV_LDMXCSR 3)
+ (UNSPECV_STMXCSR 4)
+ (UNSPECV_FEMMS 5)
+ (UNSPECV_CLFLUSH 6)
+ (UNSPECV_ALIGN 7)
+ (UNSPECV_MONITOR 8)
+ (UNSPECV_MWAIT 9)
+ (UNSPECV_CMPXCHG_1 10)
+ (UNSPECV_CMPXCHG_2 11)
+ (UNSPECV_XCHG 12)
+ (UNSPECV_LOCK 13)
+ ])
+
+;; Registers by name.
+(define_constants
+ [(BP_REG 6)
+ (SP_REG 7)
+ (FLAGS_REG 17)
+ (FPSR_REG 18)
+ (DIRFLAG_REG 19)
+ ])
+
+;; Insns whose names begin with "x86_" are emitted by gen_FOO calls
+;; from i386.c.
+
+;; In C guard expressions, put expressions which may be compile-time
+;; constants first. This allows for better optimization. For
+;; example, write "TARGET_64BIT && reload_completed", not
+;; "reload_completed && TARGET_64BIT".
+
+
+;; Processor type. This attribute must exactly match the processor_type
+;; enumeration in i386.h.
+; APPLE LOCAL mainline 2006-04-19 4434601
+(define_attr "cpu" "i386,i486,pentium,pentiumpro,k6,athlon,pentium4,k8,nocona,core2,generic32,generic64"
+ (const (symbol_ref "ix86_tune")))
+
+;; A basic instruction type. Refinements due to arguments to be
+;; provided in other attributes.
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_attr "type"
+ "other,multi,
+ alu,alu1,negnot,imov,imovx,lea,
+ incdec,ishift,ishift1,rotate,rotate1,imul,idiv,
+ icmp,test,ibr,setcc,icmov,
+ push,pop,call,callv,leave,
+ str,cld,
+ fmov,fop,fsgn,fmul,fdiv,fpspc,fcmov,fcmp,fxch,fistp,fisttp,frndint,
+ sselog,sselog1,sseiadd,sseishft,sseimul,
+ sse,ssemov,sseadd,ssemul,ssecmp,ssecomi,ssecvt,sseicvt,ssediv,sseins,
+ mmx,mmxmov,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft"
+ (const_string "other"))
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; Main data type used by the insn
+(define_attr "mode"
+ "unknown,none,QI,HI,SI,DI,SF,DF,XF,TI,V4SF,V2DF,V2SF,V1DF"
+ (const_string "unknown"))
+
+;; The CPU unit operations uses.
+(define_attr "unit" "integer,i387,sse,mmx,unknown"
+ (cond [(eq_attr "type" "fmov,fop,fsgn,fmul,fdiv,fpspc,fcmov,fcmp,fxch,fistp,fisttp,frndint")
+ (const_string "i387")
+ (eq_attr "type" "sselog,sselog1,sseiadd,sseishft,sseimul,
+ sse,ssemov,sseadd,ssemul,ssecmp,ssecomi,ssecvt,sseicvt,ssediv")
+ (const_string "sse")
+ (eq_attr "type" "mmx,mmxmov,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft")
+ (const_string "mmx")
+ (eq_attr "type" "other")
+ (const_string "unknown")]
+ (const_string "integer")))
+
+;; The (bounding maximum) length of an instruction immediate.
+(define_attr "length_immediate" ""
+ (cond [(eq_attr "type" "incdec,setcc,icmov,str,cld,lea,other,multi,idiv,leave")
+ (const_int 0)
+ (eq_attr "unit" "i387,sse,mmx")
+ (const_int 0)
+ (eq_attr "type" "alu,alu1,negnot,imovx,ishift,rotate,ishift1,rotate1,
+ imul,icmp,push,pop")
+ (symbol_ref "ix86_attr_length_immediate_default(insn,1)")
+ (eq_attr "type" "imov,test")
+ (symbol_ref "ix86_attr_length_immediate_default(insn,0)")
+ (eq_attr "type" "call")
+ (if_then_else (match_operand 0 "constant_call_address_operand" "")
+ (const_int 4)
+ (const_int 0))
+ (eq_attr "type" "callv")
+ (if_then_else (match_operand 1 "constant_call_address_operand" "")
+ (const_int 4)
+ (const_int 0))
+ ;; We don't know the size before shorten_branches. Expect
+ ;; the instruction to fit for better scheduling.
+ (eq_attr "type" "ibr")
+ (const_int 1)
+ ]
+ (symbol_ref "/* Update immediate_length and other attributes! */
+ gcc_unreachable (),1")))
+
+;; The (bounding maximum) length of an instruction address.
+(define_attr "length_address" ""
+ (cond [(eq_attr "type" "str,cld,other,multi,fxch")
+ (const_int 0)
+ (and (eq_attr "type" "call")
+ (match_operand 0 "constant_call_address_operand" ""))
+ (const_int 0)
+ (and (eq_attr "type" "callv")
+ (match_operand 1 "constant_call_address_operand" ""))
+ (const_int 0)
+ ]
+ (symbol_ref "ix86_attr_length_address_default (insn)")))
+
+;; Set when length prefix is used.
+(define_attr "prefix_data16" ""
+ (if_then_else (ior (eq_attr "mode" "HI")
+ (and (eq_attr "unit" "sse") (eq_attr "mode" "V2DF")))
+ (const_int 1)
+ (const_int 0)))
+
+;; Set when string REP prefix is used.
+(define_attr "prefix_rep" ""
+ (if_then_else (and (eq_attr "unit" "sse") (eq_attr "mode" "SF,DF"))
+ (const_int 1)
+ (const_int 0)))
+
+;; Set when 0f opcode prefix is used.
+(define_attr "prefix_0f" ""
+ (if_then_else
+ (ior (eq_attr "type" "imovx,setcc,icmov")
+ (eq_attr "unit" "sse,mmx"))
+ (const_int 1)
+ (const_int 0)))
+
+;; Set when REX opcode prefix is used.
+(define_attr "prefix_rex" ""
+ (cond [(and (eq_attr "mode" "DI")
+ (eq_attr "type" "!push,pop,call,callv,leave,ibr"))
+ (const_int 1)
+ (and (eq_attr "mode" "QI")
+ (ne (symbol_ref "x86_extended_QIreg_mentioned_p (insn)")
+ (const_int 0)))
+ (const_int 1)
+ (ne (symbol_ref "x86_extended_reg_mentioned_p (insn)")
+ (const_int 0))
+ (const_int 1)
+ ]
+ (const_int 0)))
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; There are also additional prefixes in SSSE3.
+(define_attr "prefix_extra" "" (const_int 0))
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; Set when modrm byte is used.
+(define_attr "modrm" ""
+ (cond [(eq_attr "type" "str,cld,leave")
+ (const_int 0)
+ (eq_attr "unit" "i387")
+ (const_int 0)
+ (and (eq_attr "type" "incdec")
+ (ior (match_operand:SI 1 "register_operand" "")
+ (match_operand:HI 1 "register_operand" "")))
+ (const_int 0)
+ (and (eq_attr "type" "push")
+ (not (match_operand 1 "memory_operand" "")))
+ (const_int 0)
+ (and (eq_attr "type" "pop")
+ (not (match_operand 0 "memory_operand" "")))
+ (const_int 0)
+ (and (eq_attr "type" "imov")
+ (ior (and (match_operand 0 "register_operand" "")
+ (match_operand 1 "immediate_operand" ""))
+ (ior (and (match_operand 0 "ax_reg_operand" "")
+ (match_operand 1 "memory_displacement_only_operand" ""))
+ (and (match_operand 0 "memory_displacement_only_operand" "")
+ (match_operand 1 "ax_reg_operand" "")))))
+ (const_int 0)
+ (and (eq_attr "type" "call")
+ (match_operand 0 "constant_call_address_operand" ""))
+ (const_int 0)
+ (and (eq_attr "type" "callv")
+ (match_operand 1 "constant_call_address_operand" ""))
+ (const_int 0)
+ ]
+ (const_int 1)))
+
+;; The (bounding maximum) length of an instruction in bytes.
+;; ??? fistp and frndint are in fact fldcw/{fistp,frndint}/fldcw sequences.
+;; Later we may want to split them and compute proper length as for
+;; other insns.
+(define_attr "length" ""
+ (cond [(eq_attr "type" "other,multi,fistp,frndint")
+ (const_int 16)
+ (eq_attr "type" "fcmp")
+ (const_int 4)
+ (eq_attr "unit" "i387")
+ (plus (const_int 2)
+ (plus (attr "prefix_data16")
+ (attr "length_address")))]
+ (plus (plus (attr "modrm")
+ (plus (attr "prefix_0f")
+ (plus (attr "prefix_rex")
+ (const_int 1))))
+ (plus (attr "prefix_rep")
+ (plus (attr "prefix_data16")
+ (plus (attr "length_immediate")
+ (attr "length_address")))))))
+
+;; The `memory' attribute is `none' if no memory is referenced, `load' or
+;; `store' if there is a simple memory reference therein, or `unknown'
+;; if the instruction is complex.
+
+(define_attr "memory" "none,load,store,both,unknown"
+ (cond [(eq_attr "type" "other,multi,str")
+ (const_string "unknown")
+ (eq_attr "type" "lea,fcmov,fpspc,cld")
+ (const_string "none")
+ (eq_attr "type" "fistp,leave")
+ (const_string "both")
+ (eq_attr "type" "frndint")
+ (const_string "load")
+ (eq_attr "type" "push")
+ (if_then_else (match_operand 1 "memory_operand" "")
+ (const_string "both")
+ (const_string "store"))
+ (eq_attr "type" "pop")
+ (if_then_else (match_operand 0 "memory_operand" "")
+ (const_string "both")
+ (const_string "load"))
+ (eq_attr "type" "setcc")
+ (if_then_else (match_operand 0 "memory_operand" "")
+ (const_string "store")
+ (const_string "none"))
+ (eq_attr "type" "icmp,test,ssecmp,ssecomi,mmxcmp,fcmp")
+ (if_then_else (ior (match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "load")
+ (const_string "none"))
+ (eq_attr "type" "ibr")
+ (if_then_else (match_operand 0 "memory_operand" "")
+ (const_string "load")
+ (const_string "none"))
+ (eq_attr "type" "call")
+ (if_then_else (match_operand 0 "constant_call_address_operand" "")
+ (const_string "none")
+ (const_string "load"))
+ (eq_attr "type" "callv")
+ (if_then_else (match_operand 1 "constant_call_address_operand" "")
+ (const_string "none")
+ (const_string "load"))
+ (and (eq_attr "type" "alu1,negnot,ishift1,sselog1")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "both")
+ (and (match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "both")
+ (match_operand 0 "memory_operand" "")
+ (const_string "store")
+ (match_operand 1 "memory_operand" "")
+ (const_string "load")
+ (and (eq_attr "type"
+ "!alu1,negnot,ishift1,
+ imov,imovx,icmp,test,
+ fmov,fcmp,fsgn,
+ sse,ssemov,ssecmp,ssecomi,ssecvt,sseicvt,sselog1,
+ mmx,mmxmov,mmxcmp,mmxcvt")
+ (match_operand 2 "memory_operand" ""))
+ (const_string "load")
+ (and (eq_attr "type" "icmov")
+ (match_operand 3 "memory_operand" ""))
+ (const_string "load")
+ ]
+ (const_string "none")))
+
+;; Indicates if an instruction has both an immediate and a displacement.
+
+(define_attr "imm_disp" "false,true,unknown"
+ (cond [(eq_attr "type" "other,multi")
+ (const_string "unknown")
+ (and (eq_attr "type" "icmp,test,imov,alu1,ishift1,rotate1")
+ (and (match_operand 0 "memory_displacement_operand" "")
+ (match_operand 1 "immediate_operand" "")))
+ (const_string "true")
+ (and (eq_attr "type" "alu,ishift,rotate,imul,idiv")
+ (and (match_operand 0 "memory_displacement_operand" "")
+ (match_operand 2 "immediate_operand" "")))
+ (const_string "true")
+ ]
+ (const_string "false")))
+
+;; Indicates if an FP operation has an integer source.
+
+(define_attr "fp_int_src" "false,true"
+ (const_string "false"))
+
+;; Defines rounding mode of an FP operation.
+
+(define_attr "i387_cw" "trunc,floor,ceil,mask_pm,uninitialized,any"
+ (const_string "any"))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "length" "128")
+ (set_attr "type" "multi")])
+
+;; All x87 floating point modes
+(define_mode_macro X87MODEF [SF DF XF])
+
+;; All integer modes handled by x87 fisttp operator.
+(define_mode_macro X87MODEI [HI SI DI])
+
+;; All integer modes handled by integer x87 operators.
+(define_mode_macro X87MODEI12 [HI SI])
+
+;; All SSE floating point modes
+(define_mode_macro SSEMODEF [SF DF])
+
+;; All integer modes handled by SSE cvtts?2si* operators.
+(define_mode_macro SSEMODEI24 [SI DI])
+
+
+;; Scheduling descriptions
+
+(include "pentium.md")
+(include "ppro.md")
+(include "k6.md")
+(include "athlon.md")
+
+
+;; Operand and operator predicates and constraints
+
+(include "predicates.md")
+(include "constraints.md")
+
+
+;; Compare instructions.
+
+;; All compare insns have expanders that save the operands away without
+;; actually generating RTL. The bCOND or sCOND (emitted immediately
+;; after the cmp) will actually emit the cmpM.
+
+(define_expand "cmpti"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "x86_64_general_operand" "")))]
+ "TARGET_64BIT"
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (TImode, operands[0]);
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_expand "cmpdi"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "x86_64_general_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (DImode, operands[0]);
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_expand "cmpsi"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:SI 0 "cmpsi_operand" "")
+ (match_operand:SI 1 "general_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (SImode, operands[0]);
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_expand "cmphi"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" "")))]
+ ""
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (HImode, operands[0]);
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_expand "cmpqi"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" "")))]
+ "TARGET_QIMODE_MATH"
+{
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[0] = force_reg (QImode, operands[0]);
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_insn "cmpdi_ccno_1_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:DI 0 "nonimmediate_operand" "r,?mr")
+ (match_operand:DI 1 "const0_operand" "n,n")))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
+ "@
+ test{q}\t{%0, %0|%0, %0}
+ cmp{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test,icmp")
+ (set_attr "length_immediate" "0,1")
+ (set_attr "mode" "DI")])
+
+(define_insn "*cmpdi_minus_1_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (minus:DI (match_operand:DI 0 "nonimmediate_operand" "rm,r")
+ (match_operand:DI 1 "x86_64_general_operand" "re,mr"))
+ (const_int 0)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)"
+ "cmp{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "DI")])
+
+(define_expand "cmpdi_1_rex64"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" "")))]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "cmpdi_1_insn_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:DI 0 "nonimmediate_operand" "mr,r")
+ (match_operand:DI 1 "x86_64_general_operand" "re,mr")))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCmode)"
+ "cmp{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "DI")])
+
+
+(define_insn "*cmpsi_ccno_1"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "r,?mr")
+ (match_operand:SI 1 "const0_operand" "n,n")))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "@
+ test{l}\t{%0, %0|%0, %0}
+ cmp{l}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test,icmp")
+ (set_attr "length_immediate" "0,1")
+ (set_attr "mode" "SI")])
+
+(define_insn "*cmpsi_minus_1"
+ [(set (reg FLAGS_REG)
+ (compare (minus:SI (match_operand:SI 0 "nonimmediate_operand" "rm,r")
+ (match_operand:SI 1 "general_operand" "ri,mr"))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCGOCmode)"
+ "cmp{l}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "SI")])
+
+(define_expand "cmpsi_1"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:SI 0 "nonimmediate_operand" "rm,r")
+ (match_operand:SI 1 "general_operand" "ri,mr")))]
+ ""
+ "")
+
+(define_insn "*cmpsi_1_insn"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "rm,r")
+ (match_operand:SI 1 "general_operand" "ri,mr")))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn, CCmode)"
+ "cmp{l}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "SI")])
+
+(define_insn "*cmphi_ccno_1"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "r,?mr")
+ (match_operand:HI 1 "const0_operand" "n,n")))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "@
+ test{w}\t{%0, %0|%0, %0}
+ cmp{w}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test,icmp")
+ (set_attr "length_immediate" "0,1")
+ (set_attr "mode" "HI")])
+
+(define_insn "*cmphi_minus_1"
+ [(set (reg FLAGS_REG)
+ (compare (minus:HI (match_operand:HI 0 "nonimmediate_operand" "rm,r")
+ (match_operand:HI 1 "general_operand" "ri,mr"))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCGOCmode)"
+ "cmp{w}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "HI")])
+
+(define_insn "*cmphi_1"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "rm,r")
+ (match_operand:HI 1 "general_operand" "ri,mr")))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn, CCmode)"
+ "cmp{w}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "HI")])
+
+(define_insn "*cmpqi_ccno_1"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "q,?mq")
+ (match_operand:QI 1 "const0_operand" "n,n")))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "@
+ test{b}\t{%0, %0|%0, %0}
+ cmp{b}\t{$0, %0|%0, 0}"
+ [(set_attr "type" "test,icmp")
+ (set_attr "length_immediate" "0,1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*cmpqi_1"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "qm,q")
+ (match_operand:QI 1 "general_operand" "qi,mq")))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn, CCmode)"
+ "cmp{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+(define_insn "*cmpqi_minus_1"
+ [(set (reg FLAGS_REG)
+ (compare (minus:QI (match_operand:QI 0 "nonimmediate_operand" "qm,q")
+ (match_operand:QI 1 "general_operand" "qi,mq"))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCGOCmode)"
+ "cmp{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+(define_insn "*cmpqi_ext_1"
+ [(set (reg FLAGS_REG)
+ (compare
+ (match_operand:QI 0 "general_operand" "Qm")
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)))]
+ "!TARGET_64BIT && ix86_match_ccmode (insn, CCmode)"
+ "cmp{b}\t{%h1, %0|%0, %h1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+(define_insn "*cmpqi_ext_1_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (match_operand:QI 0 "register_operand" "Q")
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCmode)"
+ "cmp{b}\t{%h1, %0|%0, %h1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+(define_insn "*cmpqi_ext_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)
+ (match_operand:QI 1 "const0_operand" "n")))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t%h0, %h0"
+ [(set_attr "type" "test")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_expand "cmpqi_ext_3"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8)) 0)
+ (match_operand:QI 1 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "cmpqi_ext_3_insn"
+ [(set (reg FLAGS_REG)
+ (compare
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)
+ (match_operand:QI 1 "general_operand" "Qmn")))]
+ "!TARGET_64BIT && ix86_match_ccmode (insn, CCmode)"
+ "cmp{b}\t{%1, %h0|%h0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+(define_insn "cmpqi_ext_3_insn_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)
+ (match_operand:QI 1 "nonmemory_operand" "Qn")))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCmode)"
+ "cmp{b}\t{%1, %h0|%h0, %1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+(define_insn "*cmpqi_ext_4"
+ [(set (reg FLAGS_REG)
+ (compare
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)
+ (subreg:QI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)) 0)))]
+ "ix86_match_ccmode (insn, CCmode)"
+ "cmp{b}\t{%h1, %h0|%h0, %h1}"
+ [(set_attr "type" "icmp")
+ (set_attr "mode" "QI")])
+
+;; These implement float point compares.
+;; %%% See if we can get away with VOIDmode operands on the actual insns,
+;; which would allow mix and match FP modes on the compares. Which is what
+;; the old patterns did, but with many more of them.
+
+(define_expand "cmpxf"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:XF 0 "nonmemory_operand" "")
+ (match_operand:XF 1 "nonmemory_operand" "")))]
+ "TARGET_80387"
+{
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_expand "cmpdf"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:DF 0 "cmp_fp_expander_operand" "")
+ (match_operand:DF 1 "cmp_fp_expander_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+{
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+(define_expand "cmpsf"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:SF 0 "cmp_fp_expander_operand" "")
+ (match_operand:SF 1 "cmp_fp_expander_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+{
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ DONE;
+})
+
+;; FP compares, step 1:
+;; Set the FP condition codes.
+;;
+;; CCFPmode compare with exceptions
+;; CCFPUmode compare with no exceptions
+
+;; We may not use "#" to split and emit these, since the REG_DEAD notes
+;; used to manage the reg stack popping would not be preserved.
+
+(define_insn "*cmpfp_0"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI
+ [(compare:CCFP
+ (match_operand 1 "register_operand" "f")
+ (match_operand 2 "const0_operand" "X"))]
+ UNSPEC_FNSTSW))]
+ "TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])"
+ "* return output_fp_compare (insn, operands, 0, 0);"
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387")
+ (set (attr "mode")
+ (cond [(match_operand:SF 1 "" "")
+ (const_string "SF")
+ (match_operand:DF 1 "" "")
+ (const_string "DF")
+ ]
+ (const_string "XF")))])
+
+(define_insn "*cmpfp_sf"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI
+ [(compare:CCFP
+ (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "nonimmediate_operand" "fm"))]
+ UNSPEC_FNSTSW))]
+ "TARGET_80387"
+ "* return output_fp_compare (insn, operands, 0, 0);"
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387")
+ (set_attr "mode" "SF")])
+
+(define_insn "*cmpfp_df"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI
+ [(compare:CCFP
+ (match_operand:DF 1 "register_operand" "f")
+ (match_operand:DF 2 "nonimmediate_operand" "fm"))]
+ UNSPEC_FNSTSW))]
+ "TARGET_80387"
+ "* return output_fp_compare (insn, operands, 0, 0);"
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387")
+ (set_attr "mode" "DF")])
+
+(define_insn "*cmpfp_xf"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI
+ [(compare:CCFP
+ (match_operand:XF 1 "register_operand" "f")
+ (match_operand:XF 2 "register_operand" "f"))]
+ UNSPEC_FNSTSW))]
+ "TARGET_80387"
+ "* return output_fp_compare (insn, operands, 0, 0);"
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387")
+ (set_attr "mode" "XF")])
+
+(define_insn "*cmpfp_u"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI
+ [(compare:CCFPU
+ (match_operand 1 "register_operand" "f")
+ (match_operand 2 "register_operand" "f"))]
+ UNSPEC_FNSTSW))]
+ "TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])"
+ "* return output_fp_compare (insn, operands, 0, 1);"
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387")
+ (set (attr "mode")
+ (cond [(match_operand:SF 1 "" "")
+ (const_string "SF")
+ (match_operand:DF 1 "" "")
+ (const_string "DF")
+ ]
+ (const_string "XF")))])
+
+(define_insn "*cmpfp_<mode>"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI
+ [(compare:CCFP
+ (match_operand 1 "register_operand" "f")
+ (match_operator 3 "float_operator"
+ [(match_operand:X87MODEI12 2 "memory_operand" "m")]))]
+ UNSPEC_FNSTSW))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && (GET_MODE (operands [3]) == GET_MODE (operands[1]))"
+ "* return output_fp_compare (insn, operands, 0, 0);"
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387")
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+;; FP compares, step 2
+;; Move the fpsw to ax.
+
+(define_insn "x86_fnstsw_1"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (unspec:HI [(reg:CCFP FPSR_REG)] UNSPEC_FNSTSW))]
+ "TARGET_80387"
+ "fnstsw\t%0"
+ [(set_attr "length" "2")
+ (set_attr "mode" "SI")
+ (set_attr "unit" "i387")])
+
+;; FP compares, step 3
+;; Get ax into flags, general case.
+
+(define_insn "x86_sahf_1"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC [(match_operand:HI 0 "register_operand" "a")] UNSPEC_SAHF))]
+ "!TARGET_64BIT"
+ "sahf"
+ [(set_attr "length" "1")
+ (set_attr "athlon_decode" "vector")
+ (set_attr "mode" "SI")])
+
+;; Pentium Pro can do steps 1 through 3 in one go.
+
+(define_insn "*cmpfp_i_mixed"
+ [(set (reg:CCFP FLAGS_REG)
+ (compare:CCFP (match_operand 0 "register_operand" "f,x")
+ (match_operand 1 "nonimmediate_operand" "f,xm")))]
+ "TARGET_MIX_SSE_I387
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "* return output_fp_compare (insn, operands, 1, 0);"
+ [(set_attr "type" "fcmp,ssecomi")
+ (set (attr "mode")
+ (if_then_else (match_operand:SF 1 "" "")
+ (const_string "SF")
+ (const_string "DF")))
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*cmpfp_i_sse"
+ [(set (reg:CCFP FLAGS_REG)
+ (compare:CCFP (match_operand 0 "register_operand" "x")
+ (match_operand 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE_MATH
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "* return output_fp_compare (insn, operands, 1, 0);"
+ [(set_attr "type" "ssecomi")
+ (set (attr "mode")
+ (if_then_else (match_operand:SF 1 "" "")
+ (const_string "SF")
+ (const_string "DF")))
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*cmpfp_i_i387"
+ [(set (reg:CCFP FLAGS_REG)
+ (compare:CCFP (match_operand 0 "register_operand" "f")
+ (match_operand 1 "register_operand" "f")))]
+ "TARGET_80387 && TARGET_CMOVE
+ && (!TARGET_SSE_MATH || !SSE_FLOAT_MODE_P (GET_MODE (operands[0])))
+ && FLOAT_MODE_P (GET_MODE (operands[0]))
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "* return output_fp_compare (insn, operands, 1, 0);"
+ [(set_attr "type" "fcmp")
+ (set (attr "mode")
+ (cond [(match_operand:SF 1 "" "")
+ (const_string "SF")
+ (match_operand:DF 1 "" "")
+ (const_string "DF")
+ ]
+ (const_string "XF")))
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*cmpfp_iu_mixed"
+ [(set (reg:CCFPU FLAGS_REG)
+ (compare:CCFPU (match_operand 0 "register_operand" "f,x")
+ (match_operand 1 "nonimmediate_operand" "f,xm")))]
+ "TARGET_MIX_SSE_I387
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "* return output_fp_compare (insn, operands, 1, 1);"
+ [(set_attr "type" "fcmp,ssecomi")
+ (set (attr "mode")
+ (if_then_else (match_operand:SF 1 "" "")
+ (const_string "SF")
+ (const_string "DF")))
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*cmpfp_iu_sse"
+ [(set (reg:CCFPU FLAGS_REG)
+ (compare:CCFPU (match_operand 0 "register_operand" "x")
+ (match_operand 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE_MATH
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "* return output_fp_compare (insn, operands, 1, 1);"
+ [(set_attr "type" "ssecomi")
+ (set (attr "mode")
+ (if_then_else (match_operand:SF 1 "" "")
+ (const_string "SF")
+ (const_string "DF")))
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*cmpfp_iu_387"
+ [(set (reg:CCFPU FLAGS_REG)
+ (compare:CCFPU (match_operand 0 "register_operand" "f")
+ (match_operand 1 "register_operand" "f")))]
+ "TARGET_80387 && TARGET_CMOVE
+ && (!TARGET_SSE_MATH || !SSE_FLOAT_MODE_P (GET_MODE (operands[0])))
+ && FLOAT_MODE_P (GET_MODE (operands[0]))
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])"
+ "* return output_fp_compare (insn, operands, 1, 1);"
+ [(set_attr "type" "fcmp")
+ (set (attr "mode")
+ (cond [(match_operand:SF 1 "" "")
+ (const_string "SF")
+ (match_operand:DF 1 "" "")
+ (const_string "DF")
+ ]
+ (const_string "XF")))
+ (set_attr "athlon_decode" "vector")])
+
+;; Move instructions.
+
+;; General case of fullword move.
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (SImode, operands); DONE;")
+
+;; Push/pop instructions. They are separate since autoinc/dec is not a
+;; general_operand.
+;;
+;; %%% We don't use a post-inc memory reference because x86 is not a
+;; general AUTO_INC_DEC host, which impacts how it is treated in flow.
+;; Changing this impacts compiler performance on other non-AUTO_INC_DEC
+;; targets without our curiosities, and it is just as easy to represent
+;; this differently.
+
+(define_insn "*pushsi2"
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (match_operand:SI 1 "general_no_elim_operand" "ri*m"))]
+ "!TARGET_64BIT"
+ "push{l}\t%1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "SI")])
+
+;; For 64BIT abi we always round up to 8 bytes.
+(define_insn "*pushsi2_rex64"
+ [(set (match_operand:SI 0 "push_operand" "=X")
+ (match_operand:SI 1 "nonmemory_no_elim_operand" "ri"))]
+ "TARGET_64BIT"
+ "push{q}\t%q1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "SI")])
+
+(define_insn "*pushsi2_prologue"
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (match_operand:SI 1 "general_no_elim_operand" "ri*m"))
+ (clobber (mem:BLK (scratch)))]
+ "!TARGET_64BIT"
+ "push{l}\t%1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "SI")])
+
+(define_insn "*popsi1_epilogue"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r*m")
+ (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (clobber (mem:BLK (scratch)))]
+ "!TARGET_64BIT"
+ "pop{l}\t%0"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "SI")])
+
+(define_insn "popsi1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r*m")
+ (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG) (const_int 4)))]
+ "!TARGET_64BIT"
+ "pop{l}\t%0"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "SI")])
+
+(define_insn "*movsi_xor"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "const0_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed && (!TARGET_USE_MOV0 || optimize_size)"
+ "xor{l}\t{%0, %0|%0, %0}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*movsi_or"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "immediate_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && operands[1] == constm1_rtx
+ && (TARGET_PENTIUM || optimize_size)"
+{
+ operands[1] = constm1_rtx;
+ return "or{l}\t{%1, %0|%0, %1}";
+}
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")
+ (set_attr "length_immediate" "1")])
+
+(define_insn "*movsi_1"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:SI 0 "nonimmediate_operand"
+ "=r ,m ,*y,*y,?rm,?*y,*x,*x,?r ,m ,?*Yi,*x")
+ (match_operand:SI 1 "general_operand"
+ "rinm,rin,C ,*y,*y ,rm ,C ,*x,*Yi,*x,r ,m "))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "!(MEM_P (operands[0]) && MEM_P (operands[1]))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_SSELOG1:
+ if (get_attr_mode (insn) == MODE_TI)
+ return "pxor\t%0, %0";
+ return "xorps\t%0, %0";
+
+ case TYPE_SSEMOV:
+ switch (get_attr_mode (insn))
+ {
+ case MODE_TI:
+ return "movdqa\t{%1, %0|%0, %1}";
+ case MODE_V4SF:
+ return "movaps\t{%1, %0|%0, %1}";
+ case MODE_SI:
+ return "movd\t{%1, %0|%0, %1}";
+ case MODE_SF:
+ return "movss\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+
+ case TYPE_MMXADD:
+ return "pxor\t%0, %0";
+
+ case TYPE_MMXMOV:
+ if (get_attr_mode (insn) == MODE_DI)
+ return "movq\t{%1, %0|%0, %1}";
+ return "movd\t{%1, %0|%0, %1}";
+
+ case TYPE_LEA:
+ return "lea{l}\t{%1, %0|%0, %1}";
+
+ default:
+ gcc_assert (!flag_pic || LEGITIMATE_PIC_OPERAND_P (operands[1]));
+ return "mov{l}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "mmxadd")
+ (eq_attr "alternative" "3,4,5")
+ (const_string "mmxmov")
+ (eq_attr "alternative" "6")
+ (const_string "sselog1")
+ (eq_attr "alternative" "7,8,9,10,11")
+ (const_string "ssemov")
+ (match_operand:DI 1 "pic_32bit_operand" "")
+ (const_string "lea")
+ ]
+ (const_string "imov")))
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "2,3")
+ (const_string "DI")
+ (eq_attr "alternative" "6,7")
+ (if_then_else
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (const_string "V4SF")
+ (const_string "TI"))
+ (and (eq_attr "alternative" "8,9,10,11")
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (const_string "SF")
+ ]
+ (const_string "SI")))])
+
+;; Stores and loads of ax to arbitrary constant address.
+;; We fake an second form of instruction to force reload to load address
+;; into register when rax is not available
+(define_insn "*movabssi_1_rex64"
+ [(set (mem:SI (match_operand:DI 0 "x86_64_movabs_operand" "i,r"))
+ (match_operand:SI 1 "nonmemory_operand" "a,er"))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 0)"
+ "@
+ movabs{l}\t{%1, %P0|%P0, %1}
+ mov{l}\t{%1, %a0|%a0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "memory" "store")
+ (set_attr "mode" "SI")])
+
+(define_insn "*movabssi_2_rex64"
+ [(set (match_operand:SI 0 "register_operand" "=a,r")
+ (mem:SI (match_operand:DI 1 "x86_64_movabs_operand" "i,r")))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 1)"
+ "@
+ movabs{l}\t{%P1, %0|%0, %P1}
+ mov{l}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0")
+ (set_attr "memory" "load")
+ (set_attr "mode" "SI")])
+
+(define_insn "*swapsi"
+ [(set (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:SI 1 "register_operand" "+r"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ ""
+ "xchg{l}\t%1, %0"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "SI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (HImode, operands); DONE;")
+
+(define_insn "*pushhi2"
+ [(set (match_operand:HI 0 "push_operand" "=X")
+ (match_operand:HI 1 "nonmemory_no_elim_operand" "rn"))]
+ "!TARGET_64BIT"
+ "push{l}\t%k1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "SI")])
+
+;; For 64BIT abi we always round up to 8 bytes.
+(define_insn "*pushhi2_rex64"
+ [(set (match_operand:HI 0 "push_operand" "=X")
+ (match_operand:HI 1 "nonmemory_no_elim_operand" "ri"))]
+ "TARGET_64BIT"
+ "push{q}\t%q1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movhi_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m")
+ (match_operand:HI 1 "general_operand" "r,rn,rm,rn"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ /* movzwl is faster than movw on p2 due to partial word stalls,
+ though not as fast as an aligned movl. */
+ return "movz{wl|x}\t{%1, %k0|%k0, %1}";
+ default:
+ if (get_attr_mode (insn) == MODE_SI)
+ return "mov{l}\t{%k1, %k0|%k0, %k1}";
+ else
+ return "mov{w}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "optimize_size") (const_int 0))
+ (const_string "imov")
+ (and (eq_attr "alternative" "0")
+ (ior (eq (symbol_ref "TARGET_PARTIAL_REG_STALL")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_HIMODE_MATH")
+ (const_int 0))))
+ (const_string "imov")
+ (and (eq_attr "alternative" "1,2")
+ (match_operand:HI 1 "aligned_operand" ""))
+ (const_string "imov")
+ (and (ne (symbol_ref "TARGET_MOVX")
+ (const_int 0))
+ (eq_attr "alternative" "0,2"))
+ (const_string "imovx")
+ ]
+ (const_string "imov")))
+ (set (attr "mode")
+ (cond [(eq_attr "type" "imovx")
+ (const_string "SI")
+ (and (eq_attr "alternative" "1,2")
+ (match_operand:HI 1 "aligned_operand" ""))
+ (const_string "SI")
+ (and (eq_attr "alternative" "0")
+ (ior (eq (symbol_ref "TARGET_PARTIAL_REG_STALL")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_HIMODE_MATH")
+ (const_int 0))))
+ (const_string "SI")
+ ]
+ (const_string "HI")))])
+
+;; Stores and loads of ax to arbitrary constant address.
+;; We fake an second form of instruction to force reload to load address
+;; into register when rax is not available
+(define_insn "*movabshi_1_rex64"
+ [(set (mem:HI (match_operand:DI 0 "x86_64_movabs_operand" "i,r"))
+ (match_operand:HI 1 "nonmemory_operand" "a,er"))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 0)"
+ "@
+ movabs{w}\t{%1, %P0|%P0, %1}
+ mov{w}\t{%1, %a0|%a0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "memory" "store")
+ (set_attr "mode" "HI")])
+
+(define_insn "*movabshi_2_rex64"
+ [(set (match_operand:HI 0 "register_operand" "=a,r")
+ (mem:HI (match_operand:DI 1 "x86_64_movabs_operand" "i,r")))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 1)"
+ "@
+ movabs{w}\t{%P1, %0|%0, %P1}
+ mov{w}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0")
+ (set_attr "memory" "load")
+ (set_attr "mode" "HI")])
+
+(define_insn "*swaphi_1"
+ [(set (match_operand:HI 0 "register_operand" "+r")
+ (match_operand:HI 1 "register_operand" "+r"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "!TARGET_PARTIAL_REG_STALL || optimize_size"
+ "xchg{l}\t%k1, %k0"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "SI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*swaphi_2"
+ [(set (match_operand:HI 0 "register_operand" "+r")
+ (match_operand:HI 1 "register_operand" "+r"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "TARGET_PARTIAL_REG_STALL"
+ "xchg{w}\t%1, %0"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "HI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "movstricthi"
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" ""))
+ (match_operand:HI 1 "general_operand" ""))]
+ "! TARGET_PARTIAL_REG_STALL || optimize_size"
+{
+ /* Don't generate memory->memory moves, go through a register */
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+})
+
+(define_insn "*movstricthi_1"
+ [(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+rm,r"))
+ (match_operand:HI 1 "general_operand" "rn,m"))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "mov{w}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "HI")])
+
+(define_insn "*movstricthi_xor"
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+r"))
+ (match_operand:HI 1 "const0_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && ((!TARGET_USE_MOV0 && !TARGET_PARTIAL_REG_STALL) || optimize_size)"
+ "xor{w}\t{%0, %0|%0, %0}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "HI")
+ (set_attr "length_immediate" "0")])
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (QImode, operands); DONE;")
+
+;; emit_push_insn when it calls move_by_pieces requires an insn to
+;; "push a byte". But actually we use pushl, which has the effect
+;; of rounding the amount pushed up to a word.
+
+(define_insn "*pushqi2"
+ [(set (match_operand:QI 0 "push_operand" "=X")
+ (match_operand:QI 1 "nonmemory_no_elim_operand" "rn"))]
+ "!TARGET_64BIT"
+ "push{l}\t%k1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "SI")])
+
+;; For 64BIT abi we always round up to 8 bytes.
+(define_insn "*pushqi2_rex64"
+ [(set (match_operand:QI 0 "push_operand" "=X")
+ (match_operand:QI 1 "nonmemory_no_elim_operand" "qi"))]
+ "TARGET_64BIT"
+ "push{q}\t%q1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "DI")])
+
+;; Situation is quite tricky about when to choose full sized (SImode) move
+;; over QImode moves. For Q_REG -> Q_REG move we use full size only for
+;; partial register dependency machines (such as AMD Athlon), where QImode
+;; moves issue extra dependency and for partial register stalls machines
+;; that don't use QImode patterns (and QImode move cause stall on the next
+;; instruction).
+;;
+;; For loads of Q_REG to NONQ_REG we use full sized moves except for partial
+;; register stall machines with, where we use QImode instructions, since
+;; partial register stall can be caused there. Then we use movzx.
+(define_insn "*movqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
+ (match_operand:QI 1 "general_operand" " q,qn,qm,q,rn,qm,qn"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ gcc_assert (ANY_QI_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM);
+ return "movz{bl|x}\t{%1, %k0|%k0, %1}";
+ default:
+ if (get_attr_mode (insn) == MODE_SI)
+ return "mov{l}\t{%k1, %k0|%k0, %k1}";
+ else
+ return "mov{b}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (eq_attr "alternative" "5")
+ (not (match_operand:QI 1 "aligned_operand" "")))
+ (const_string "imovx")
+ (ne (symbol_ref "optimize_size") (const_int 0))
+ (const_string "imov")
+ (and (eq_attr "alternative" "3")
+ (ior (eq (symbol_ref "TARGET_PARTIAL_REG_STALL")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_QIMODE_MATH")
+ (const_int 0))))
+ (const_string "imov")
+ (eq_attr "alternative" "3,5")
+ (const_string "imovx")
+ (and (ne (symbol_ref "TARGET_MOVX")
+ (const_int 0))
+ (eq_attr "alternative" "2"))
+ (const_string "imovx")
+ ]
+ (const_string "imov")))
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "3,4,5")
+ (const_string "SI")
+ (eq_attr "alternative" "6")
+ (const_string "QI")
+ (eq_attr "type" "imovx")
+ (const_string "SI")
+ (and (eq_attr "type" "imov")
+ (and (eq_attr "alternative" "0,1")
+ (and (ne (symbol_ref "TARGET_PARTIAL_REG_DEPENDENCY")
+ (const_int 0))
+ (and (eq (symbol_ref "optimize_size")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_PARTIAL_REG_STALL")
+ (const_int 0))))))
+ (const_string "SI")
+ ;; Avoid partial register stalls when not using QImode arithmetic
+ (and (eq_attr "type" "imov")
+ (and (eq_attr "alternative" "0,1")
+ (and (ne (symbol_ref "TARGET_PARTIAL_REG_STALL")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_QIMODE_MATH")
+ (const_int 0)))))
+ (const_string "SI")
+ ]
+ (const_string "QI")))])
+
+(define_expand "reload_outqi"
+ [(parallel [(match_operand:QI 0 "" "=m")
+ (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "register_operand" "=&q")])]
+ ""
+{
+ rtx op0, op1, op2;
+ op0 = operands[0]; op1 = operands[1]; op2 = operands[2];
+
+ gcc_assert (!reg_overlap_mentioned_p (op2, op0));
+ if (! q_regs_operand (op1, QImode))
+ {
+ emit_insn (gen_movqi (op2, op1));
+ op1 = op2;
+ }
+ emit_insn (gen_movqi (op0, op1));
+ DONE;
+})
+
+(define_insn "*swapqi_1"
+ [(set (match_operand:QI 0 "register_operand" "+r")
+ (match_operand:QI 1 "register_operand" "+r"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "!TARGET_PARTIAL_REG_STALL || optimize_size"
+ "xchg{l}\t%k1, %k0"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "SI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_insn "*swapqi_2"
+ [(set (match_operand:QI 0 "register_operand" "+q")
+ (match_operand:QI 1 "register_operand" "+q"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "TARGET_PARTIAL_REG_STALL"
+ "xchg{b}\t%1, %0"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "QI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "movstrictqi"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" ""))
+ (match_operand:QI 1 "general_operand" ""))]
+ "! TARGET_PARTIAL_REG_STALL || optimize_size"
+{
+ /* Don't generate memory->memory moves, go through a register. */
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+})
+
+(define_insn "*movstrictqi_1"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,q"))
+ (match_operand:QI 1 "general_operand" "*qn,m"))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "mov{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "QI")])
+
+; APPLE LOCAL begin radar 4645709 5131847
+; It is based on movstrictqi_xor where partial register update is performed.
+; If optimize_size is not set, it is better to update the whole register.
+(define_insn "*movstrictqi_and"
+ [(set (strict_low_part (match_operand:QI 0 "q_regs_operand" "+q"))
+ (match_operand:QI 1 "const0_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed && (!TARGET_USE_MOV0 && !optimize_size)"
+{
+ if (TARGET_64BIT)
+ return "and{q}\t{$0xffffffffffffff00, %q0}";
+ else
+ return "and{l}\t{$0xffffff00, %k0}";
+}
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")
+ (set_attr "length_immediate" "0")])
+; APPLE LOCAL end radar 4645709 5131847
+
+(define_insn "*movstrictqi_xor"
+ [(set (strict_low_part (match_operand:QI 0 "q_regs_operand" "+q"))
+ (match_operand:QI 1 "const0_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed && (!TARGET_USE_MOV0 || optimize_size)"
+ "xor{b}\t{%0, %0|%0, %0}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*movsi_extv_1"
+ [(set (match_operand:SI 0 "register_operand" "=R")
+ (sign_extract:SI (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)))]
+ ""
+ "movs{bl|x}\t{%h1, %0|%0, %h1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+(define_insn "*movhi_extv_1"
+ [(set (match_operand:HI 0 "register_operand" "=R")
+ (sign_extract:HI (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)))]
+ ""
+ "movs{bl|x}\t{%h1, %k0|%k0, %h1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+(define_insn "*movqi_extv_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=Qm,?r")
+ (sign_extract:QI (match_operand 1 "ext_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)))]
+ "!TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ return "movs{bl|x}\t{%h1, %k0|%k0, %h1}";
+ default:
+ return "mov{b}\t{%h1, %0|%0, %h1}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (and (match_operand:QI 0 "register_operand" "")
+ (ior (not (match_operand:QI 0 "q_regs_operand" ""))
+ (ne (symbol_ref "TARGET_MOVX")
+ (const_int 0))))
+ (const_string "imovx")
+ (const_string "imov")))
+ (set (attr "mode")
+ (if_then_else (eq_attr "type" "imovx")
+ (const_string "SI")
+ (const_string "QI")))])
+
+(define_insn "*movqi_extv_1_rex64"
+ [(set (match_operand:QI 0 "register_operand" "=Q,?R")
+ (sign_extract:QI (match_operand 1 "ext_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)))]
+ "TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ return "movs{bl|x}\t{%h1, %k0|%k0, %h1}";
+ default:
+ return "mov{b}\t{%h1, %0|%0, %h1}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (and (match_operand:QI 0 "register_operand" "")
+ (ior (not (match_operand:QI 0 "q_regs_operand" ""))
+ (ne (symbol_ref "TARGET_MOVX")
+ (const_int 0))))
+ (const_string "imovx")
+ (const_string "imov")))
+ (set (attr "mode")
+ (if_then_else (eq_attr "type" "imovx")
+ (const_string "SI")
+ (const_string "QI")))])
+
+;; Stores and loads of ax to arbitrary constant address.
+;; We fake an second form of instruction to force reload to load address
+;; into register when rax is not available
+(define_insn "*movabsqi_1_rex64"
+ [(set (mem:QI (match_operand:DI 0 "x86_64_movabs_operand" "i,r"))
+ (match_operand:QI 1 "nonmemory_operand" "a,er"))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 0)"
+ "@
+ movabs{b}\t{%1, %P0|%P0, %1}
+ mov{b}\t{%1, %a0|%a0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "memory" "store")
+ (set_attr "mode" "QI")])
+
+(define_insn "*movabsqi_2_rex64"
+ [(set (match_operand:QI 0 "register_operand" "=a,r")
+ (mem:QI (match_operand:DI 1 "x86_64_movabs_operand" "i,r")))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 1)"
+ "@
+ movabs{b}\t{%P1, %0|%0, %P1}
+ mov{b}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0")
+ (set_attr "memory" "load")
+ (set_attr "mode" "QI")])
+
+(define_insn "*movdi_extzv_1"
+ [(set (match_operand:DI 0 "register_operand" "=R")
+ (zero_extract:DI (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)))]
+ "TARGET_64BIT"
+ "movz{bl|x}\t{%h1, %k0|%k0, %h1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movsi_extzv_1"
+ [(set (match_operand:SI 0 "register_operand" "=R")
+ (zero_extract:SI (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)))]
+ ""
+ "movz{bl|x}\t{%h1, %0|%0, %h1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+(define_insn "*movqi_extzv_2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=Qm,?R")
+ (subreg:QI (zero_extract:SI (match_operand 1 "ext_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)) 0))]
+ "!TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ return "movz{bl|x}\t{%h1, %k0|%k0, %h1}";
+ default:
+ return "mov{b}\t{%h1, %0|%0, %h1}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (and (match_operand:QI 0 "register_operand" "")
+ (ior (not (match_operand:QI 0 "q_regs_operand" ""))
+ (ne (symbol_ref "TARGET_MOVX")
+ (const_int 0))))
+ (const_string "imovx")
+ (const_string "imov")))
+ (set (attr "mode")
+ (if_then_else (eq_attr "type" "imovx")
+ (const_string "SI")
+ (const_string "QI")))])
+
+(define_insn "*movqi_extzv_2_rex64"
+ [(set (match_operand:QI 0 "register_operand" "=Q,?R")
+ (subreg:QI (zero_extract:SI (match_operand 1 "ext_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)) 0))]
+ "TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ return "movz{bl|x}\t{%h1, %k0|%k0, %h1}";
+ default:
+ return "mov{b}\t{%h1, %0|%0, %h1}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (ior (not (match_operand:QI 0 "q_regs_operand" ""))
+ (ne (symbol_ref "TARGET_MOVX")
+ (const_int 0)))
+ (const_string "imovx")
+ (const_string "imov")))
+ (set (attr "mode")
+ (if_then_else (eq_attr "type" "imovx")
+ (const_string "SI")
+ (const_string "QI")))])
+
+(define_insn "movsi_insv_1"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "+Q")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:SI 1 "general_operand" "Qmn"))]
+ "!TARGET_64BIT"
+ "mov{b}\t{%b1, %h0|%h0, %b1}"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "QI")])
+
+(define_insn "movdi_insv_1_rex64"
+ [(set (zero_extract:DI (match_operand 0 "ext_register_operand" "+Q")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:DI 1 "nonmemory_operand" "Qn"))]
+ "TARGET_64BIT"
+ "mov{b}\t{%b1, %h0|%h0, %b1}"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "QI")])
+
+(define_insn "*movqi_insv_2"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "+Q")
+ (const_int 8)
+ (const_int 8))
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "Q")
+ (const_int 8)))]
+ ""
+ "mov{b}\t{%h1, %h0|%h0, %h1}"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "QI")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (DImode, operands); DONE;")
+
+(define_insn "*pushdi"
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (match_operand:DI 1 "general_no_elim_operand" "riF*m"))]
+ "!TARGET_64BIT"
+ "#")
+
+(define_insn "*pushdi2_rex64"
+ [(set (match_operand:DI 0 "push_operand" "=<,!<")
+ (match_operand:DI 1 "general_no_elim_operand" "re*m,n"))]
+ "TARGET_64BIT"
+ "@
+ push{q}\t%1
+ #"
+ [(set_attr "type" "push,multi")
+ (set_attr "mode" "DI")])
+
+;; Convert impossible pushes of immediate to existing instructions.
+;; First try to get scratch register and go through it. In case this
+;; fails, push sign extended lower part first and then overwrite
+;; upper part by 32bit move.
+(define_peephole2
+ [(match_scratch:DI 2 "r")
+ (set (match_operand:DI 0 "push_operand" "")
+ (match_operand:DI 1 "immediate_operand" ""))]
+ "TARGET_64BIT && !symbolic_operand (operands[1], DImode)
+ && !x86_64_immediate_operand (operands[1], DImode)"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+;; We need to define this as both peepholer and splitter for case
+;; peephole2 pass is not run.
+;; "&& 1" is needed to keep it from matching the previous pattern.
+(define_peephole2
+ [(set (match_operand:DI 0 "push_operand" "")
+ (match_operand:DI 1 "immediate_operand" ""))]
+ "TARGET_64BIT && !symbolic_operand (operands[1], DImode)
+ && !x86_64_immediate_operand (operands[1], DImode) && 1"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+ "split_di (operands + 1, 1, operands + 2, operands + 3);
+ operands[1] = gen_lowpart (DImode, operands[2]);
+ operands[2] = gen_rtx_MEM (SImode, gen_rtx_PLUS (DImode, stack_pointer_rtx,
+ GEN_INT (4)));
+ ")
+
+(define_split
+ [(set (match_operand:DI 0 "push_operand" "")
+ (match_operand:DI 1 "immediate_operand" ""))]
+ "TARGET_64BIT && ((optimize > 0 && flag_peephole2)
+ ? flow2_completed : reload_completed)
+ && !symbolic_operand (operands[1], DImode)
+ && !x86_64_immediate_operand (operands[1], DImode)"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+ "split_di (operands + 1, 1, operands + 2, operands + 3);
+ operands[1] = gen_lowpart (DImode, operands[2]);
+ operands[2] = gen_rtx_MEM (SImode, gen_rtx_PLUS (DImode, stack_pointer_rtx,
+ GEN_INT (4)));
+ ")
+
+(define_insn "*pushdi2_prologue_rex64"
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (match_operand:DI 1 "general_no_elim_operand" "re*m"))
+ (clobber (mem:BLK (scratch)))]
+ "TARGET_64BIT"
+ "push{q}\t%1"
+ [(set_attr "type" "push")
+ (set_attr "mode" "DI")])
+
+(define_insn "*popdi1_epilogue_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r*m")
+ (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG)
+ (plus:DI (reg:DI SP_REG) (const_int 8)))
+ (clobber (mem:BLK (scratch)))]
+ "TARGET_64BIT"
+ "pop{q}\t%0"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "DI")])
+
+(define_insn "popdi1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r*m")
+ (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG)
+ (plus:DI (reg:DI SP_REG) (const_int 8)))]
+ "TARGET_64BIT"
+ "pop{q}\t%0"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movdi_xor_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "const0_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (!TARGET_USE_MOV0 || optimize_size)
+ && reload_completed"
+ "xor{l}\t{%k0, %k0|%k0, %k0}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*movdi_or_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "const_int_operand" "i"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (TARGET_PENTIUM || optimize_size)
+ && reload_completed
+ && operands[1] == constm1_rtx"
+{
+ operands[1] = constm1_rtx;
+ return "or{q}\t{%1, %0|%0, %1}";
+}
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "DI")
+ (set_attr "length_immediate" "1")])
+
+(define_insn "*movdi_2"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DI 0 "nonimmediate_operand"
+ "=r ,o ,*y,m*y,*y,*Yt,m ,*Yt,*Yt,*x,m ,*x,*x")
+ (match_operand:DI 1 "general_operand"
+ "riFo,riF,C ,*y ,m ,C ,*Yt,*Yt,m ,C ,*x,*x,m "))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "!TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ #
+ #
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movdqa\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
+ movlps\t{%1, %0|%0, %1}
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "*,*,mmx,mmxmov,mmxmov,sselog1,ssemov,ssemov,ssemov,sselog1,ssemov,ssemov,ssemov")
+ (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,TI,DI,V4SF,V2SF,V4SF,V2SF")])
+
+(define_split
+ [(set (match_operand:DI 0 "push_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ "!TARGET_64BIT && reload_completed
+ && (! MMX_REG_P (operands[1]) && !SSE_REG_P (operands[1]))"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+;; %%% This multiword shite has got to go.
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ "!TARGET_64BIT && reload_completed
+ && (!MMX_REG_P (operands[0]) && !SSE_REG_P (operands[0]))
+ && (!MMX_REG_P (operands[1]) && !SSE_REG_P (operands[1]))"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+(define_insn "*movdi_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand"
+ "=r,r ,r,m ,!m,*y,*y,?rm,?*y,*x,*x,?rm,?*x,?*x,?*y")
+ (match_operand:DI 1 "general_operand"
+ "Z ,rem,i,re,n ,C ,*y,*y ,rm ,C ,*x,*x ,rm ,*y ,*x"))]
+ "TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_SSECVT:
+ if (which_alternative == 13)
+ return "movq2dq\t{%1, %0|%0, %1}";
+ else
+ return "movdq2q\t{%1, %0|%0, %1}";
+ case TYPE_SSEMOV:
+ if (get_attr_mode (insn) == MODE_TI)
+ return "movdqa\t{%1, %0|%0, %1}";
+ /* FALLTHRU */
+ case TYPE_MMXMOV:
+ /* Moves from and into integer register is done using movd opcode with
+ REX prefix. */
+ if (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1]))
+ return "movd\t{%1, %0|%0, %1}";
+ return "movq\t{%1, %0|%0, %1}";
+ case TYPE_SSELOG1:
+ case TYPE_MMXADD:
+ return "pxor\t%0, %0";
+ case TYPE_MULTI:
+ return "#";
+ case TYPE_LEA:
+ return "lea{q}\t{%a1, %0|%0, %a1}";
+ default:
+ gcc_assert (!flag_pic || LEGITIMATE_PIC_OPERAND_P (operands[1]));
+ if (get_attr_mode (insn) == MODE_SI)
+ return "mov{l}\t{%k1, %k0|%k0, %k1}";
+ else if (which_alternative == 2)
+ return "movabs{q}\t{%1, %0|%0, %1}";
+ else
+ return "mov{q}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "5")
+ (const_string "mmxadd")
+ (eq_attr "alternative" "6,7,8")
+ (const_string "mmxmov")
+ (eq_attr "alternative" "9")
+ (const_string "sselog1")
+ (eq_attr "alternative" "10,11,12")
+ (const_string "ssemov")
+ (eq_attr "alternative" "13,14")
+ (const_string "ssecvt")
+ (eq_attr "alternative" "4")
+ (const_string "multi")
+ (match_operand:DI 1 "pic_32bit_operand" "")
+ (const_string "lea")
+ ]
+ (const_string "imov")))
+ (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*,*,*,*,*,*")
+ (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*,*,*,*,*,*")
+ (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,DI,TI,TI,DI,DI,DI,DI")])
+
+;; Stores and loads of ax to arbitrary constant address.
+;; We fake an second form of instruction to force reload to load address
+;; into register when rax is not available
+(define_insn "*movabsdi_1_rex64"
+ [(set (mem:DI (match_operand:DI 0 "x86_64_movabs_operand" "i,r"))
+ (match_operand:DI 1 "nonmemory_operand" "a,er"))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 0)"
+ "@
+ movabs{q}\t{%1, %P0|%P0, %1}
+ mov{q}\t{%1, %a0|%a0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "memory" "store")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movabsdi_2_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=a,r")
+ (mem:DI (match_operand:DI 1 "x86_64_movabs_operand" "i,r")))]
+ "TARGET_64BIT && ix86_check_movabs (insn, 1)"
+ "@
+ movabs{q}\t{%P1, %0|%0, %P1}
+ mov{q}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+ (set_attr "length_immediate" "0")
+ (set_attr "memory" "load")
+ (set_attr "mode" "DI")])
+
+;; Convert impossible stores of immediate to existing instructions.
+;; First try to get scratch register and go through it. In case this
+;; fails, move by 32bit parts.
+(define_peephole2
+ [(match_scratch:DI 2 "r")
+ (set (match_operand:DI 0 "memory_operand" "")
+ (match_operand:DI 1 "immediate_operand" ""))]
+ "TARGET_64BIT && !symbolic_operand (operands[1], DImode)
+ && !x86_64_immediate_operand (operands[1], DImode)"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+;; We need to define this as both peepholer and splitter for case
+;; peephole2 pass is not run.
+;; "&& 1" is needed to keep it from matching the previous pattern.
+(define_peephole2
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (match_operand:DI 1 "immediate_operand" ""))]
+ "TARGET_64BIT && !symbolic_operand (operands[1], DImode)
+ && !x86_64_immediate_operand (operands[1], DImode) && 1"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "split_di (operands, 2, operands + 2, operands + 4);")
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (match_operand:DI 1 "immediate_operand" ""))]
+ "TARGET_64BIT && ((optimize > 0 && flag_peephole2)
+ ? flow2_completed : reload_completed)
+ && !symbolic_operand (operands[1], DImode)
+ && !x86_64_immediate_operand (operands[1], DImode)"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "split_di (operands, 2, operands + 2, operands + 4);")
+
+(define_insn "*swapdi_rex64"
+ [(set (match_operand:DI 0 "register_operand" "+r")
+ (match_operand:DI 1 "register_operand" "+r"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "TARGET_64BIT"
+ "xchg{q}\t%1, %0"
+ [(set_attr "type" "imov")
+ (set_attr "mode" "DI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE || TARGET_64BIT"
+{
+ if (TARGET_64BIT)
+ ix86_expand_move (TImode, operands);
+ else
+ ix86_expand_vector_move (TImode, operands);
+ DONE;
+})
+
+(define_insn "*movti_internal"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=x,x,m")
+ (match_operand:TI 1 "vector_move_operand" "C,xm,x"))]
+ "TARGET_SSE && !TARGET_64BIT
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "xorps\t%0, %0";
+ else
+ return "pxor\t%0, %0";
+ case 1:
+ case 2:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movdqa\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "sselog1,ssemov,ssemov")
+ (set (attr "mode")
+ (cond [(ior (eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (ne (symbol_ref "optimize_size") (const_int 0)))
+ (const_string "V4SF")
+ (and (eq_attr "alternative" "2")
+ (ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
+ (const_int 0)))
+ (const_string "V4SF")]
+ (const_string "TI")))])
+
+(define_insn "*movti_rex64"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o,x,x,xm")
+ (match_operand:TI 1 "general_operand" "riFo,riF,C,xm,x"))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ return "#";
+ case 2:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "xorps\t%0, %0";
+ else
+ return "pxor\t%0, %0";
+ case 3:
+ case 4:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movdqa\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "*,*,sselog1,ssemov,ssemov")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "2,3")
+ (if_then_else
+ (ne (symbol_ref "optimize_size")
+ (const_int 0))
+ (const_string "V4SF")
+ (const_string "TI"))
+ (eq_attr "alternative" "4")
+ (if_then_else
+ (ior (ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
+ (const_int 0))
+ (ne (symbol_ref "optimize_size")
+ (const_int 0)))
+ (const_string "V4SF")
+ (const_string "TI"))]
+ (const_string "DI")))])
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "general_operand" ""))]
+ "reload_completed && !SSE_REG_P (operands[0])
+ && !SSE_REG_P (operands[1])"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (SFmode, operands); DONE;")
+
+(define_insn "*pushsf"
+ [(set (match_operand:SF 0 "push_operand" "=<,<,<")
+ (match_operand:SF 1 "general_no_elim_operand" "f,rFm,x"))]
+ "!TARGET_64BIT"
+{
+ /* Anything else should be already split before reg-stack. */
+ gcc_assert (which_alternative == 1);
+ return "push{l}\t%1";
+}
+ [(set_attr "type" "multi,push,multi")
+ (set_attr "unit" "i387,*,*")
+ (set_attr "mode" "SF,SI,SF")])
+
+(define_insn "*pushsf_rex64"
+ [(set (match_operand:SF 0 "push_operand" "=X,X,X")
+ (match_operand:SF 1 "nonmemory_no_elim_operand" "f,rF,x"))]
+ "TARGET_64BIT"
+{
+ /* Anything else should be already split before reg-stack. */
+ gcc_assert (which_alternative == 1);
+ return "push{q}\t%q1";
+}
+ [(set_attr "type" "multi,push,multi")
+ (set_attr "unit" "i387,*,*")
+ (set_attr "mode" "SF,DI,SF")])
+
+(define_split
+ [(set (match_operand:SF 0 "push_operand" "")
+ (match_operand:SF 1 "memory_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[1]) == MEM
+ && constant_pool_reference_p (operands[1])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[1] = avoid_constant_pool_reference (operands[1]);")
+
+
+;; %%% Kill this when call knows how to work this out.
+(define_split
+ [(set (match_operand:SF 0 "push_operand" "")
+ (match_operand:SF 1 "any_fp_register_operand" ""))]
+ "!TARGET_64BIT"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (set (mem:SF (reg:SI SP_REG)) (match_dup 1))])
+
+(define_split
+ [(set (match_operand:SF 0 "push_operand" "")
+ (match_operand:SF 1 "any_fp_register_operand" ""))]
+ "TARGET_64BIT"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -8)))
+ (set (mem:SF (reg:DI SP_REG)) (match_dup 1))])
+
+(define_insn "*movsf_1"
+ [(set (match_operand:SF 0 "nonimmediate_operand"
+ "=f,m ,f,r ,m ,x,x,x ,m ,!*y,!rm,!*y")
+ (match_operand:SF 1 "general_operand"
+ "fm,f,G ,rmF,Fr,C ,x ,xm,x,rm ,*y ,*y"))]
+ "!(MEM_P (operands[0]) && MEM_P (operands[1]))
+ && (reload_in_progress || reload_completed
+ || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
+ || GET_CODE (operands[1]) != CONST_DOUBLE
+ || memory_operand (operands[0], SFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+
+ case 2:
+ return standard_80387_constant_opcode (operands[1]);
+
+ case 3:
+ case 4:
+ return "mov{l}\t{%1, %0|%0, %1}";
+ case 5:
+ if (get_attr_mode (insn) == MODE_TI)
+ return "pxor\t%0, %0";
+ else
+ return "xorps\t%0, %0";
+ case 6:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movss\t{%1, %0|%0, %1}";
+ case 7:
+ case 8:
+ return "movss\t{%1, %0|%0, %1}";
+
+ case 9:
+ case 10:
+ return "movd\t{%1, %0|%0, %1}";
+
+ case 11:
+ return "movq\t{%1, %0|%0, %1}";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,fmov,fmov,imov,imov,sselog1,ssemov,ssemov,ssemov,mmxmov,mmxmov,mmxmov")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "3,4,9,10")
+ (const_string "SI")
+ (eq_attr "alternative" "5")
+ (if_then_else
+ (and (and (ne (symbol_ref "TARGET_SSE_LOAD0_BY_PXOR")
+ (const_int 0))
+ (ne (symbol_ref "TARGET_SSE2")
+ (const_int 0)))
+ (eq (symbol_ref "optimize_size")
+ (const_int 0)))
+ (const_string "TI")
+ (const_string "V4SF"))
+ /* For architectures resolving dependencies on
+ whole SSE registers use APS move to break dependency
+ chains, otherwise use short move to avoid extra work.
+
+ Do the same for architectures resolving dependencies on
+ the parts. While in DF mode it is better to always handle
+ just register parts, the SF mode is different due to lack
+ of instructions to load just part of the register. It is
+ better to maintain the whole registers in single format
+ to avoid problems on using packed logical operations. */
+ (eq_attr "alternative" "6")
+ (if_then_else
+ (ior (ne (symbol_ref "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (const_int 0))
+ (ne (symbol_ref "TARGET_SSE_SPLIT_REGS")
+ (const_int 0)))
+ (const_string "V4SF")
+ (const_string "SF"))
+ (eq_attr "alternative" "11")
+ (const_string "DI")]
+ (const_string "SF")))])
+
+(define_insn "*swapsf"
+ [(set (match_operand:SF 0 "fp_register_operand" "+f")
+ (match_operand:SF 1 "fp_register_operand" "+f"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "reload_completed || TARGET_80387"
+{
+ if (STACK_TOP_P (operands[0]))
+ return "fxch\t%1";
+ else
+ return "fxch\t%0";
+}
+ [(set_attr "type" "fxch")
+ (set_attr "mode" "SF")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (DFmode, operands); DONE;")
+
+;; Size of pushdf is 3 (for sub) + 2 (for fstp) + memory operand size.
+;; Size of pushdf using integer instructions is 2+2*memory operand size
+;; On the average, pushdf using integers can be still shorter. Allow this
+;; pattern for optimize_size too.
+
+(define_insn "*pushdf_nointeger"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "push_operand" "=<,<,<,<")
+ (match_operand:DF 1 "general_no_elim_operand" "f,Fo,*r,Yt"))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "!TARGET_64BIT && !TARGET_INTEGER_DFMODE_MOVES"
+{
+ /* This insn should be already split before reg-stack. */
+ gcc_unreachable ();
+}
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387,*,*,*")
+ (set_attr "mode" "DF,SI,SI,DF")])
+
+(define_insn "*pushdf_integer"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "push_operand" "=<,<,<")
+ (match_operand:DF 1 "general_no_elim_operand" "f,rFo,Yt"))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_64BIT || TARGET_INTEGER_DFMODE_MOVES"
+{
+ /* This insn should be already split before reg-stack. */
+ gcc_unreachable ();
+}
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387,*,*")
+ (set_attr "mode" "DF,SI,DF")])
+
+;; %%% Kill this when call knows how to work this out.
+(define_split
+ [(set (match_operand:DF 0 "push_operand" "")
+ (match_operand:DF 1 "any_fp_register_operand" ""))]
+ "!TARGET_64BIT && reload_completed"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -8)))
+ (set (mem:DF (reg:SI SP_REG)) (match_dup 1))]
+ "")
+
+(define_split
+ [(set (match_operand:DF 0 "push_operand" "")
+ (match_operand:DF 1 "any_fp_register_operand" ""))]
+ "TARGET_64BIT && reload_completed"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -8)))
+ (set (mem:DF (reg:DI SP_REG)) (match_dup 1))]
+ "")
+
+(define_split
+ [(set (match_operand:DF 0 "push_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ "reload_completed"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+;; Moving is usually shorter when only FP registers are used. This separate
+;; movdf pattern avoids the use of integer registers for FP operations
+;; when optimizing for size.
+
+(define_insn "*movdf_nointeger"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand"
+ "=f,m,f,*r ,o ,Yt*x,Yt*x,Yt*x ,m ")
+ (match_operand:DF 1 "general_operand"
+ "fm,f,G,*roF,F*r,C ,Yt*x,mYt*x,Yt*x"))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ((optimize_size || !TARGET_INTEGER_DFMODE_MOVES) && !TARGET_64BIT)
+ && (reload_in_progress || reload_completed
+ || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
+ || GET_CODE (operands[1]) != CONST_DOUBLE
+ || memory_operand (operands[0], DFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+
+ case 2:
+ return standard_80387_constant_opcode (operands[1]);
+
+ case 3:
+ case 4:
+ return "#";
+ case 5:
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V4SF:
+ return "xorps\t%0, %0";
+ case MODE_V2DF:
+ return "xorpd\t%0, %0";
+ case MODE_TI:
+ return "pxor\t%0, %0";
+ default:
+ gcc_unreachable ();
+ }
+ case 6:
+ case 7:
+ case 8:
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V4SF:
+ return "movaps\t{%1, %0|%0, %1}";
+ case MODE_V2DF:
+ return "movapd\t{%1, %0|%0, %1}";
+ case MODE_TI:
+ return "movdqa\t{%1, %0|%0, %1}";
+ case MODE_DI:
+ return "movq\t{%1, %0|%0, %1}";
+ case MODE_DF:
+ return "movsd\t{%1, %0|%0, %1}";
+ case MODE_V1DF:
+ return "movlpd\t{%1, %0|%0, %1}";
+ case MODE_V2SF:
+ return "movlps\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,fmov,fmov,multi,multi,sselog1,ssemov,ssemov,ssemov")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "0,1,2")
+ (const_string "DF")
+ (eq_attr "alternative" "3,4")
+ (const_string "SI")
+
+ /* For SSE1, we have many fewer alternatives. */
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (cond [(eq_attr "alternative" "5,6")
+ (const_string "V4SF")
+ ]
+ (const_string "V2SF"))
+
+ /* xorps is one byte shorter. */
+ (eq_attr "alternative" "5")
+ (cond [(ne (symbol_ref "optimize_size")
+ (const_int 0))
+ (const_string "V4SF")
+ (ne (symbol_ref "TARGET_SSE_LOAD0_BY_PXOR")
+ (const_int 0))
+ (const_string "TI")
+ ]
+ (const_string "V2DF"))
+
+ /* For architectures resolving dependencies on
+ whole SSE registers use APD move to break dependency
+ chains, otherwise use short move to avoid extra work.
+
+ movaps encodes one byte shorter. */
+ (eq_attr "alternative" "6")
+ (cond
+ [(ne (symbol_ref "optimize_size")
+ (const_int 0))
+ (const_string "V4SF")
+ (ne (symbol_ref "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (const_int 0))
+ (const_string "V2DF")
+ ]
+ (const_string "DF"))
+ /* For architectures resolving dependencies on register
+ parts we may avoid extra work to zero out upper part
+ of register. */
+ (eq_attr "alternative" "7")
+ (if_then_else
+ (ne (symbol_ref "TARGET_SSE_SPLIT_REGS")
+ (const_int 0))
+ (const_string "V1DF")
+ (const_string "DF"))
+ ]
+ (const_string "DF")))])
+
+(define_insn "*movdf_integer"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand"
+ "=f,m,f,r ,o ,Yt*x,Yt*x,Yt*x,m ")
+ (match_operand:DF 1 "general_operand"
+ "fm,f,G,roF,Fr,C ,Yt*x,m ,Yt*x"))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ((!optimize_size && TARGET_INTEGER_DFMODE_MOVES) || TARGET_64BIT)
+ && (reload_in_progress || reload_completed
+ || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
+ || GET_CODE (operands[1]) != CONST_DOUBLE
+ || memory_operand (operands[0], DFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+
+ case 2:
+ return standard_80387_constant_opcode (operands[1]);
+
+ case 3:
+ case 4:
+ return "#";
+
+ case 5:
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V4SF:
+ return "xorps\t%0, %0";
+ case MODE_V2DF:
+ return "xorpd\t%0, %0";
+ case MODE_TI:
+ return "pxor\t%0, %0";
+ default:
+ gcc_unreachable ();
+ }
+ case 6:
+ case 7:
+ case 8:
+ switch (get_attr_mode (insn))
+ {
+ case MODE_V4SF:
+ return "movaps\t{%1, %0|%0, %1}";
+ case MODE_V2DF:
+ return "movapd\t{%1, %0|%0, %1}";
+ case MODE_TI:
+ return "movdqa\t{%1, %0|%0, %1}";
+ case MODE_DI:
+ return "movq\t{%1, %0|%0, %1}";
+ case MODE_DF:
+ return "movsd\t{%1, %0|%0, %1}";
+ case MODE_V1DF:
+ return "movlpd\t{%1, %0|%0, %1}";
+ case MODE_V2SF:
+ return "movlps\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+
+ default:
+ gcc_unreachable();
+ }
+}
+ [(set_attr "type" "fmov,fmov,fmov,multi,multi,sselog1,ssemov,ssemov,ssemov")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "0,1,2")
+ (const_string "DF")
+ (eq_attr "alternative" "3,4")
+ (const_string "SI")
+
+ /* For SSE1, we have many fewer alternatives. */
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (cond [(eq_attr "alternative" "5,6")
+ (const_string "V4SF")
+ ]
+ (const_string "V2SF"))
+
+ /* xorps is one byte shorter. */
+ (eq_attr "alternative" "5")
+ (cond [(ne (symbol_ref "optimize_size")
+ (const_int 0))
+ (const_string "V4SF")
+ (ne (symbol_ref "TARGET_SSE_LOAD0_BY_PXOR")
+ (const_int 0))
+ (const_string "TI")
+ ]
+ (const_string "V2DF"))
+
+ /* For architectures resolving dependencies on
+ whole SSE registers use APD move to break dependency
+ chains, otherwise use short move to avoid extra work.
+
+ movaps encodes one byte shorter. */
+ (eq_attr "alternative" "6")
+ (cond
+ [(ne (symbol_ref "optimize_size")
+ (const_int 0))
+ (const_string "V4SF")
+ (ne (symbol_ref "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (const_int 0))
+ (const_string "V2DF")
+ ]
+ (const_string "DF"))
+ /* For architectures resolving dependencies on register
+ parts we may avoid extra work to zero out upper part
+ of register. */
+ (eq_attr "alternative" "7")
+ (if_then_else
+ (ne (symbol_ref "TARGET_SSE_SPLIT_REGS")
+ (const_int 0))
+ (const_string "V1DF")
+ (const_string "DF"))
+ ]
+ (const_string "DF")))])
+
+(define_split
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ "reload_completed
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ! (ANY_FP_REG_P (operands[0]) ||
+ (GET_CODE (operands[0]) == SUBREG
+ && ANY_FP_REG_P (SUBREG_REG (operands[0]))))
+ && ! (ANY_FP_REG_P (operands[1]) ||
+ (GET_CODE (operands[1]) == SUBREG
+ && ANY_FP_REG_P (SUBREG_REG (operands[1]))))"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+(define_insn "*swapdf"
+ [(set (match_operand:DF 0 "fp_register_operand" "+f")
+ (match_operand:DF 1 "fp_register_operand" "+f"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "reload_completed || TARGET_80387"
+{
+ if (STACK_TOP_P (operands[0]))
+ return "fxch\t%1";
+ else
+ return "fxch\t%0";
+}
+ [(set_attr "type" "fxch")
+ (set_attr "mode" "DF")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ ""
+ "ix86_expand_move (XFmode, operands); DONE;")
+
+;; Size of pushdf is 3 (for sub) + 2 (for fstp) + memory operand size.
+;; Size of pushdf using integer instructions is 3+3*memory operand size
+;; Pushing using integer instructions is longer except for constants
+;; and direct memory references.
+;; (assuming that any given constant is pushed only once, but this ought to be
+;; handled elsewhere).
+
+(define_insn "*pushxf_nointeger"
+ [(set (match_operand:XF 0 "push_operand" "=X,X,X")
+ (match_operand:XF 1 "general_no_elim_operand" "f,Fo,*r"))]
+ "optimize_size"
+{
+ /* This insn should be already split before reg-stack. */
+ gcc_unreachable ();
+}
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387,*,*")
+ (set_attr "mode" "XF,SI,SI")])
+
+(define_insn "*pushxf_integer"
+ [(set (match_operand:XF 0 "push_operand" "=<,<")
+ (match_operand:XF 1 "general_no_elim_operand" "f,ro"))]
+ "!optimize_size"
+{
+ /* This insn should be already split before reg-stack. */
+ gcc_unreachable ();
+}
+ [(set_attr "type" "multi")
+ (set_attr "unit" "i387,*")
+ (set_attr "mode" "XF,SI")])
+
+(define_split
+ [(set (match_operand 0 "push_operand" "")
+ (match_operand 1 "general_operand" ""))]
+ "reload_completed
+ && (GET_MODE (operands[0]) == XFmode
+ || GET_MODE (operands[0]) == DFmode)
+ && !ANY_FP_REG_P (operands[1])"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+(define_split
+ [(set (match_operand:XF 0 "push_operand" "")
+ (match_operand:XF 1 "any_fp_register_operand" ""))]
+ "!TARGET_64BIT"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 2)))
+ (set (mem:XF (reg:SI SP_REG)) (match_dup 1))]
+ "operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12);")
+
+(define_split
+ [(set (match_operand:XF 0 "push_operand" "")
+ (match_operand:XF 1 "any_fp_register_operand" ""))]
+ "TARGET_64BIT"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (match_dup 2)))
+ (set (mem:XF (reg:DI SP_REG)) (match_dup 1))]
+ "operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12);")
+
+;; Do not use integer registers when optimizing for size
+(define_insn "*movxf_nointeger"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,*r,o")
+ (match_operand:XF 1 "general_operand" "fm,f,G,*roF,F*r"))]
+ "optimize_size
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && (reload_in_progress || reload_completed
+ || GET_CODE (operands[1]) != CONST_DOUBLE
+ || memory_operand (operands[0], XFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ /* There is no non-popping store to memory for XFmode. So if
+ we need one, follow the store with a load. */
+ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0\;fld%z0\t%y0";
+ else
+ return "fstp%z0\t%y0";
+
+ case 2:
+ return standard_80387_constant_opcode (operands[1]);
+
+ case 3: case 4:
+ return "#";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,fmov,fmov,multi,multi")
+ (set_attr "mode" "XF,XF,XF,SI,SI")])
+
+(define_insn "*movxf_integer"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,r,o")
+ (match_operand:XF 1 "general_operand" "fm,f,G,roF,Fr"))]
+ "!optimize_size
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && (reload_in_progress || reload_completed
+ || GET_CODE (operands[1]) != CONST_DOUBLE
+ || memory_operand (operands[0], XFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ /* There is no non-popping store to memory for XFmode. So if
+ we need one, follow the store with a load. */
+ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0\;fld%z0\t%y0";
+ else
+ return "fstp%z0\t%y0";
+
+ case 2:
+ return standard_80387_constant_opcode (operands[1]);
+
+ case 3: case 4:
+ return "#";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,fmov,fmov,multi,multi")
+ (set_attr "mode" "XF,XF,XF,SI,SI")])
+
+(define_split
+ [(set (match_operand 0 "nonimmediate_operand" "")
+ (match_operand 1 "general_operand" ""))]
+ "reload_completed
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && GET_MODE (operands[0]) == XFmode
+ && ! (ANY_FP_REG_P (operands[0]) ||
+ (GET_CODE (operands[0]) == SUBREG
+ && ANY_FP_REG_P (SUBREG_REG (operands[0]))))
+ && ! (ANY_FP_REG_P (operands[1]) ||
+ (GET_CODE (operands[1]) == SUBREG
+ && ANY_FP_REG_P (SUBREG_REG (operands[1]))))"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operand 1 "memory_operand" ""))]
+ "reload_completed
+ && GET_CODE (operands[1]) == MEM
+ && (GET_MODE (operands[0]) == XFmode
+ || GET_MODE (operands[0]) == SFmode || GET_MODE (operands[0]) == DFmode)
+ && constant_pool_reference_p (operands[1])"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ rtx c = avoid_constant_pool_reference (operands[1]);
+ rtx r = operands[0];
+
+ if (GET_CODE (r) == SUBREG)
+ r = SUBREG_REG (r);
+
+ if (SSE_REG_P (r))
+ {
+ if (!standard_sse_constant_p (c))
+ FAIL;
+ }
+ else if (FP_REG_P (r))
+ {
+ if (!standard_80387_constant_p (c))
+ FAIL;
+ }
+ else if (MMX_REG_P (r))
+ FAIL;
+
+ operands[1] = c;
+})
+
+(define_insn "swapxf"
+ [(set (match_operand:XF 0 "register_operand" "+f")
+ (match_operand:XF 1 "register_operand" "+f"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "TARGET_80387"
+{
+ if (STACK_TOP_P (operands[0]))
+ return "fxch\t%1";
+ else
+ return "fxch\t%0";
+}
+ [(set_attr "type" "fxch")
+ (set_attr "mode" "XF")])
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "nonimmediate_operand" ""))]
+ "TARGET_64BIT"
+{
+ ix86_expand_move (TFmode, operands);
+ DONE;
+})
+
+(define_insn "*movtf_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,o,x,x,xm")
+ (match_operand:TF 1 "general_operand" "riFo,riF,C,xm,x"))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ return "#";
+ case 2:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "xorps\t%0, %0";
+ else
+ return "pxor\t%0, %0";
+ case 3:
+ case 4:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movdqa\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "*,*,sselog1,ssemov,ssemov")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "2,3")
+ (if_then_else
+ (ne (symbol_ref "optimize_size")
+ (const_int 0))
+ (const_string "V4SF")
+ (const_string "TI"))
+ (eq_attr "alternative" "4")
+ (if_then_else
+ (ior (ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
+ (const_int 0))
+ (ne (symbol_ref "optimize_size")
+ (const_int 0)))
+ (const_string "V4SF")
+ (const_string "TI"))]
+ (const_string "DI")))])
+
+(define_split
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "general_operand" ""))]
+ "reload_completed && !SSE_REG_P (operands[0])
+ && !SSE_REG_P (operands[1])"
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+;; Zero extension instructions
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+{
+ if (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)
+ {
+ operands[1] = force_reg (HImode, operands[1]);
+ emit_insn (gen_zero_extendhisi2_and (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_insn "zero_extendhisi2_and"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_ZERO_EXTEND_WITH_AND && !optimize_size"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed && TARGET_ZERO_EXTEND_WITH_AND && !optimize_size"
+ [(parallel [(set (match_dup 0) (and:SI (match_dup 0) (const_int 65535)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_insn "*zero_extendhisi2_movzwl"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ "!TARGET_ZERO_EXTEND_WITH_AND || optimize_size"
+ "movz{wl|x}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+(define_expand "zero_extendqihi2"
+ [(parallel
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*zero_extendqihi2_and"
+ [(set (match_operand:HI 0 "register_operand" "=r,?&q")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_ZERO_EXTEND_WITH_AND && !optimize_size"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "HI")])
+
+(define_insn "*zero_extendqihi2_movzbw_and"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm,0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_ZERO_EXTEND_WITH_AND || optimize_size"
+ "#"
+ [(set_attr "type" "imovx,alu1")
+ (set_attr "mode" "HI")])
+
+; zero extend to SImode here to avoid partial register stalls
+(define_insn "*zero_extendqihi2_movzbl"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ "(!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && reload_completed"
+ "movz{bl|x}\t{%1, %k0|%k0, %k1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+;; For the movzbw case strip only the clobber
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && (!TARGET_ZERO_EXTEND_WITH_AND || optimize_size)
+ && (!REG_P (operands[1]) || ANY_QI_REG_P (operands[1]))"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))])
+
+;; When source and destination does not overlap, clear destination
+;; first and then do the movb
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && ANY_QI_REG_P (operands[0])
+ && (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)
+ && !reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 0) (const_int 0))
+ (set (strict_low_part (match_dup 2)) (match_dup 1))]
+ "operands[2] = gen_lowpart (QImode, operands[0]);")
+
+;; Rest is handled by single and.
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && true_regnum (operands[0]) == true_regnum (operands[1])"
+ [(parallel [(set (match_dup 0) (and:HI (match_dup 0) (const_int 255)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_expand "zero_extendqisi2"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*zero_extendqisi2_and"
+ [(set (match_operand:SI 0 "register_operand" "=r,?&q")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_ZERO_EXTEND_WITH_AND && !optimize_size"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")])
+
+(define_insn "*zero_extendqisi2_movzbw_and"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm,0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_ZERO_EXTEND_WITH_AND || optimize_size"
+ "#"
+ [(set_attr "type" "imovx,alu1")
+ (set_attr "mode" "SI")])
+
+(define_insn "*zero_extendqisi2_movzbw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ "(!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && reload_completed"
+ "movz{bl|x}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+;; For the movzbl case strip only the clobber
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && (!TARGET_ZERO_EXTEND_WITH_AND || optimize_size)
+ && (!REG_P (operands[1]) || ANY_QI_REG_P (operands[1]))"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))])
+
+;; When source and destination does not overlap, clear destination
+;; first and then do the movb
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && ANY_QI_REG_P (operands[0])
+ && (ANY_QI_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)
+ && (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)
+ && !reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 0) (const_int 0))
+ (set (strict_low_part (match_dup 2)) (match_dup 1))]
+ "operands[2] = gen_lowpart (QImode, operands[0]);")
+
+;; Rest is handled by single and.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && true_regnum (operands[0]) == true_regnum (operands[1])"
+ [(parallel [(set (match_dup 0) (and:SI (match_dup 0) (const_int 255)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+;; %%% Kill me once multi-word ops are sane.
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "rm")))]
+ ""
+ "if (!TARGET_64BIT)
+ {
+ emit_insn (gen_zero_extendsidi2_32 (operands[0], operands[1]));
+ DONE;
+ }
+ ")
+
+(define_insn "zero_extendsidi2_32"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,?r,?*o,?*y,?*Yi")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,rm,r,rm,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "!TARGET_64BIT"
+ "@
+ #
+ #
+ #
+ movd\t{%1, %0|%0, %1}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "mode" "SI,SI,SI,DI,TI")
+ (set_attr "type" "multi,multi,multi,mmxmov,ssemov")])
+
+(define_insn "zero_extendsidi2_rex64"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o,?*y,?*Yi")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "rm,0,rm,rm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_64BIT"
+ "@
+ mov\t{%k1, %k0|%k0, %k1}
+ #
+ movd\t{%1, %0|%0, %1}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imovx,imov,mmxmov,ssemov")
+ (set_attr "mode" "SI,DI,SI,SI")])
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (zero_extend:DI (match_dup 0)))]
+ "TARGET_64BIT"
+ [(set (match_dup 4) (const_int 0))]
+ "split_di (&operands[0], 1, &operands[3], &operands[4]);")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && reload_completed
+ && true_regnum (operands[0]) == true_regnum (operands[1])"
+ [(set (match_dup 4) (const_int 0))]
+ "split_di (&operands[0], 1, &operands[3], &operands[4]);")
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && reload_completed
+ && !SSE_REG_P (operands[0]) && !MMX_REG_P (operands[0])"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 4) (const_int 0))]
+ "split_di (&operands[0], 1, &operands[3], &operands[4]);")
+
+(define_insn "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_64BIT"
+ "movz{wl|x}\t{%1, %k0|%k0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "DI")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_64BIT"
+ "movz{bl|x}\t{%1, %k0|%k0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "DI")])
+
+;; Sign extension instructions
+
+(define_expand "extendsidi2"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+{
+ if (TARGET_64BIT)
+ {
+ emit_insn (gen_extendsidi2_rex64 (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_insn "*extendsidi2_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=*A,r,?r,?*o")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "0,0,r,r")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_scratch:SI 2 "=X,X,X,&r"))]
+ "!TARGET_64BIT"
+ "#")
+
+(define_insn "extendsidi2_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=*a,r")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "*0,rm")))]
+ "TARGET_64BIT"
+ "@
+ {cltq|cdqe}
+ movs{lq|x}\t{%1,%0|%0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "DI")
+ (set_attr "prefix_0f" "0")
+ (set_attr "modrm" "0,1")])
+
+(define_insn "extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ "TARGET_64BIT"
+ "movs{wq|x}\t{%1,%0|%0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "DI")])
+
+(define_insn "extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ "TARGET_64BIT"
+ "movs{bq|x}\t{%1,%0|%0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "DI")])
+
+;; Extend to memory case when source register does die.
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ "(reload_completed
+ && dead_or_set_p (insn, operands[1])
+ && !reg_mentioned_p (operands[1], operands[0]))"
+ [(set (match_dup 3) (match_dup 1))
+ (parallel [(set (match_dup 1) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 4) (match_dup 1))]
+ "split_di (&operands[0], 1, &operands[3], &operands[4]);")
+
+;; Extend to memory case when source register does not die.
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ split_di (&operands[0], 1, &operands[3], &operands[4]);
+
+ emit_move_insn (operands[3], operands[1]);
+
+ /* Generate a cltd if possible and doing so it profitable. */
+ if (true_regnum (operands[1]) == 0
+ && true_regnum (operands[2]) == 1
+ && (optimize_size || TARGET_USE_CLTD))
+ {
+ emit_insn (gen_ashrsi3_31 (operands[2], operands[1], GEN_INT (31)));
+ }
+ else
+ {
+ emit_move_insn (operands[2], operands[1]);
+ emit_insn (gen_ashrsi3_31 (operands[2], operands[2], GEN_INT (31)));
+ }
+ emit_move_insn (operands[4], operands[2]);
+ DONE;
+})
+
+;; Extend to register case. Optimize case where source and destination
+;; registers match and cases where we can use cltd.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ split_di (&operands[0], 1, &operands[3], &operands[4]);
+
+ if (true_regnum (operands[3]) != true_regnum (operands[1]))
+ emit_move_insn (operands[3], operands[1]);
+
+ /* Generate a cltd if possible and doing so it profitable. */
+ if (true_regnum (operands[3]) == 0
+ && (optimize_size || TARGET_USE_CLTD))
+ {
+ emit_insn (gen_ashrsi3_31 (operands[4], operands[3], GEN_INT (31)));
+ DONE;
+ }
+
+ if (true_regnum (operands[4]) != true_regnum (operands[1]))
+ emit_move_insn (operands[4], operands[1]);
+
+ emit_insn (gen_ashrsi3_31 (operands[4], operands[4], GEN_INT (31)));
+ DONE;
+})
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=*a,r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "*0,rm")))]
+ ""
+{
+ switch (get_attr_prefix_0f (insn))
+ {
+ case 0:
+ return "{cwtl|cwde}";
+ default:
+ return "movs{wl|x}\t{%1,%0|%0, %1}";
+ }
+}
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")
+ (set (attr "prefix_0f")
+ ;; movsx is short decodable while cwtl is vector decoded.
+ (if_then_else (and (eq_attr "cpu" "!k6")
+ (eq_attr "alternative" "0"))
+ (const_string "0")
+ (const_string "1")))
+ (set (attr "modrm")
+ (if_then_else (eq_attr "prefix_0f" "0")
+ (const_string "0")
+ (const_string "1")))])
+
+(define_insn "*extendhisi2_zext"
+ [(set (match_operand:DI 0 "register_operand" "=*a,r")
+ (zero_extend:DI
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "*0,rm"))))]
+ "TARGET_64BIT"
+{
+ switch (get_attr_prefix_0f (insn))
+ {
+ case 0:
+ return "{cwtl|cwde}";
+ default:
+ return "movs{wl|x}\t{%1,%k0|%k0, %1}";
+ }
+}
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")
+ (set (attr "prefix_0f")
+ ;; movsx is short decodable while cwtl is vector decoded.
+ (if_then_else (and (eq_attr "cpu" "!k6")
+ (eq_attr "alternative" "0"))
+ (const_string "0")
+ (const_string "1")))
+ (set (attr "modrm")
+ (if_then_else (eq_attr "prefix_0f" "0")
+ (const_string "0")
+ (const_string "1")))])
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=*a,r")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "*0,qm")))]
+ ""
+{
+ switch (get_attr_prefix_0f (insn))
+ {
+ case 0:
+ return "{cbtw|cbw}";
+ default:
+ return "movs{bw|x}\t{%1,%0|%0, %1}";
+ }
+}
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "HI")
+ (set (attr "prefix_0f")
+ ;; movsx is short decodable while cwtl is vector decoded.
+ (if_then_else (and (eq_attr "cpu" "!k6")
+ (eq_attr "alternative" "0"))
+ (const_string "0")
+ (const_string "1")))
+ (set (attr "modrm")
+ (if_then_else (eq_attr "prefix_0f" "0")
+ (const_string "0")
+ (const_string "1")))])
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ ""
+ "movs{bl|x}\t{%1,%0|%0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+(define_insn "*extendqisi2_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm"))))]
+ "TARGET_64BIT"
+ "movs{bl|x}\t{%1,%k0|%k0, %1}"
+ [(set_attr "type" "imovx")
+ (set_attr "mode" "SI")])
+
+;; Conversions between float and double.
+
+;; These are all no-ops in the model used for the 80387. So just
+;; emit moves.
+
+;; %%% Kill these when call knows how to work out a DFmode push earlier.
+(define_insn "*dummy_extendsfdf2"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "push_operand" "=<")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "fYt")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "0"
+ "#")
+
+(define_split
+ [(set (match_operand:DF 0 "push_operand" "")
+ (float_extend:DF (match_operand:SF 1 "fp_register_operand" "")))]
+ "!TARGET_64BIT"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -8)))
+ (set (mem:DF (reg:SI SP_REG)) (float_extend:DF (match_dup 1)))])
+
+(define_split
+ [(set (match_operand:DF 0 "push_operand" "")
+ (float_extend:DF (match_operand:SF 1 "fp_register_operand" "")))]
+ "TARGET_64BIT"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -8)))
+ (set (mem:DF (reg:DI SP_REG)) (float_extend:DF (match_dup 1)))])
+
+(define_insn "*dummy_extendsfxf2"
+ [(set (match_operand:XF 0 "push_operand" "=<")
+ (float_extend:XF (match_operand:SF 1 "nonimmediate_operand" "f")))]
+ "0"
+ "#")
+
+(define_split
+ [(set (match_operand:XF 0 "push_operand" "")
+ (float_extend:XF (match_operand:SF 1 "fp_register_operand" "")))]
+ ""
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 2)))
+ (set (mem:XF (reg:SI SP_REG)) (float_extend:XF (match_dup 1)))]
+ "operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12);")
+
+(define_split
+ [(set (match_operand:XF 0 "push_operand" "")
+ (float_extend:XF (match_operand:SF 1 "fp_register_operand" "")))]
+ "TARGET_64BIT"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (match_dup 2)))
+ (set (mem:DF (reg:DI SP_REG)) (float_extend:XF (match_dup 1)))]
+ "operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12);")
+
+(define_split
+ [(set (match_operand:XF 0 "push_operand" "")
+ (float_extend:XF (match_operand:DF 1 "fp_register_operand" "")))]
+ ""
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (match_dup 2)))
+ (set (mem:DF (reg:SI SP_REG)) (float_extend:XF (match_dup 1)))]
+ "operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12);")
+
+(define_split
+ [(set (match_operand:XF 0 "push_operand" "")
+ (float_extend:XF (match_operand:DF 1 "fp_register_operand" "")))]
+ "TARGET_64BIT"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (match_dup 2)))
+ (set (mem:XF (reg:DI SP_REG)) (float_extend:XF (match_dup 1)))]
+ "operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12);")
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (float_extend:DF (match_operand:SF 1 "general_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+{
+ /* ??? Needed for compress_float_constant since all fp constants
+ are LEGITIMATE_CONSTANT_P. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ if ((!TARGET_SSE2 || TARGET_MIX_SSE_I387)
+ && standard_80387_constant_p (operands[1]) > 0)
+ {
+ operands[1] = simplify_const_unary_operation
+ (FLOAT_EXTEND, DFmode, operands[1], SFmode);
+ emit_move_insn_1 (operands[0], operands[1]);
+ DONE;
+ }
+ operands[1] = validize_mem (force_const_mem (SFmode, operands[1]));
+ }
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+})
+
+(define_insn "*extendsfdf2_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,m,x")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "fm,f,xm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_MIX_SSE_I387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+
+ case 2:
+ return "cvtss2sd\t{%1, %0|%0, %1}";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,fmov,ssecvt")
+ (set_attr "mode" "SF,XF,DF")])
+
+(define_insn "*extendsfdf2_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=x")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "xm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "cvtss2sd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "DF")])
+
+(define_insn "*extendsfdf2_i387"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,m")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "fm,f")))]
+ "TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "SF,XF")])
+
+(define_expand "extendsfxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (float_extend:XF (match_operand:SF 1 "general_operand" "")))]
+ "TARGET_80387"
+{
+ /* ??? Needed for compress_float_constant since all fp constants
+ are LEGITIMATE_CONSTANT_P. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ if (standard_80387_constant_p (operands[1]) > 0)
+ {
+ operands[1] = simplify_const_unary_operation
+ (FLOAT_EXTEND, XFmode, operands[1], SFmode);
+ emit_move_insn_1 (operands[0], operands[1]);
+ DONE;
+ }
+ operands[1] = validize_mem (force_const_mem (SFmode, operands[1]));
+ }
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+})
+
+(define_insn "*extendsfxf2_i387"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m")
+ (float_extend:XF (match_operand:SF 1 "nonimmediate_operand" "fm,f")))]
+ "TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ /* There is no non-popping store to memory for XFmode. So if
+ we need one, follow the store with a load. */
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fstp%z0\t%y0\n\tfld%z0\t%y0";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "SF,XF")])
+
+(define_expand "extenddfxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (float_extend:XF (match_operand:DF 1 "general_operand" "")))]
+ "TARGET_80387"
+{
+ /* ??? Needed for compress_float_constant since all fp constants
+ are LEGITIMATE_CONSTANT_P. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ if (standard_80387_constant_p (operands[1]) > 0)
+ {
+ operands[1] = simplify_const_unary_operation
+ (FLOAT_EXTEND, XFmode, operands[1], DFmode);
+ emit_move_insn_1 (operands[0], operands[1]);
+ DONE;
+ }
+ operands[1] = validize_mem (force_const_mem (DFmode, operands[1]));
+ }
+ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+})
+
+(define_insn "*extenddfxf2_i387"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,m")
+ (float_extend:XF (match_operand:DF 1 "nonimmediate_operand" "fm,f")))]
+ "TARGET_80387
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_387_reg_move (insn, operands);
+
+ case 1:
+ /* There is no non-popping store to memory for XFmode. So if
+ we need one, follow the store with a load. */
+ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0\n\tfld%z0\t%y0";
+ else
+ return "fstp%z0\t%y0";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "DF,XF")])
+
+;; %%% This seems bad bad news.
+;; This cannot output into an f-reg because there is no way to be sure
+;; of truncating in that case. Otherwise this is just like a simple move
+;; insn. So we pretend we can output to a reg in order to get better
+;; register preferencing, but we really use a stack slot.
+
+;; Conversion from DFmode to SFmode.
+
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (float_truncate:SF
+ (match_operand:DF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+{
+ if (MEM_P (operands[0]) && MEM_P (operands[1]))
+ operands[1] = force_reg (DFmode, operands[1]);
+
+ if (TARGET_SSE2 && TARGET_SSE_MATH && !TARGET_MIX_SSE_I387)
+ ;
+ else if (flag_unsafe_math_optimizations)
+ ;
+ else
+ {
+ rtx temp = assign_386_stack_local (SFmode, SLOT_VIRTUAL);
+ emit_insn (gen_truncdfsf2_with_temp (operands[0], operands[1], temp));
+ DONE;
+ }
+})
+
+(define_expand "truncdfsf2_with_temp"
+ [(parallel [(set (match_operand:SF 0 "" "")
+ (float_truncate:SF (match_operand:DF 1 "" "")))
+ (clobber (match_operand:SF 2 "" ""))])]
+ "")
+
+(define_insn "*truncdfsf_fast_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=m,f,x")
+ (float_truncate:SF
+ (match_operand:DF 1 "nonimmediate_operand" "f ,f,xm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_MIX_SSE_I387 && flag_unsafe_math_optimizations"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+ case 1:
+ return output_387_reg_move (insn, operands);
+ case 2:
+ return "cvtsd2ss\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,fmov,ssecvt")
+ (set_attr "mode" "SF")])
+
+;; Yes, this one doesn't depend on flag_unsafe_math_optimizations,
+;; because nothing we do here is unsafe.
+(define_insn "*truncdfsf_fast_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=x")
+ (float_truncate:SF
+ (match_operand:DF 1 "nonimmediate_operand" "xm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "cvtsd2ss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "SF")])
+
+(define_insn "*truncdfsf_fast_i387"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=fm")
+ (float_truncate:SF
+ (match_operand:DF 1 "nonimmediate_operand" "f")))]
+ "TARGET_80387 && flag_unsafe_math_optimizations"
+ "* return output_387_reg_move (insn, operands);"
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "SF")])
+
+(define_insn "*truncdfsf_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=m,?fx*r,Yt")
+ (float_truncate:SF
+ (match_operand:DF 1 "nonimmediate_operand" "f ,f ,Ytm")))
+ (clobber (match_operand:SF 2 "memory_operand" "=X,m ,X"))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_MIX_SSE_I387"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+ case 1:
+ return "#";
+ case 2:
+ return "cvtsd2ss\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,multi,ssecvt")
+ (set_attr "unit" "*,i387,*")
+ (set_attr "mode" "SF")])
+
+(define_insn "*truncdfsf_i387"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=m,?fx*r")
+ (float_truncate:SF
+ (match_operand:DF 1 "nonimmediate_operand" "f,f")))
+ (clobber (match_operand:SF 2 "memory_operand" "=X,m"))]
+ "TARGET_80387"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+ case 1:
+ return "#";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fmov,multi")
+ (set_attr "unit" "*,i387")
+ (set_attr "mode" "SF")])
+
+(define_insn "*truncdfsf2_i387_1"
+ [(set (match_operand:SF 0 "memory_operand" "=m")
+ (float_truncate:SF
+ (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_80387
+ && !(TARGET_SSE2 && TARGET_SSE_MATH)
+ && !TARGET_MIX_SSE_I387"
+{
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "SF")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF
+ (match_operand:DF 1 "fp_register_operand" "")))
+ (clobber (match_operand 2 "" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+{
+ operands[1] = gen_rtx_REG (SFmode, true_regnum (operands[1]));
+})
+
+;; Conversion from XFmode to SFmode.
+
+(define_expand "truncxfsf2"
+ [(parallel [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_dup 2))])]
+ "TARGET_80387"
+{
+ if (flag_unsafe_math_optimizations)
+ {
+ rtx reg = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SFmode);
+ emit_insn (gen_truncxfsf2_i387_noop (reg, operands[1]));
+ if (reg != operands[0])
+ emit_move_insn (operands[0], reg);
+ DONE;
+ }
+ else
+ operands[2] = assign_386_stack_local (SFmode, SLOT_VIRTUAL);
+})
+
+(define_insn "*truncxfsf2_mixed"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=m,?f,?r,?x")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "f,f,f,f")))
+ (clobber (match_operand:SF 2 "memory_operand" "=X,m,m,m"))]
+ "TARGET_MIX_SSE_I387"
+{
+ gcc_assert (!which_alternative);
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov,multi,multi,multi")
+ (set_attr "unit" "*,i387,i387,i387")
+ (set_attr "mode" "SF")])
+
+(define_insn "truncxfsf2_i387_noop"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_80387 && flag_unsafe_math_optimizations"
+{
+ return output_387_reg_move (insn, operands);
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "SF")])
+
+(define_insn "*truncxfsf2_i387"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=m,?f,?r")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "f,f,f")))
+ (clobber (match_operand:SF 2 "memory_operand" "=X,m,m"))]
+ "TARGET_80387"
+{
+ gcc_assert (!which_alternative);
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov,multi,multi")
+ (set_attr "unit" "*,i387,i387")
+ (set_attr "mode" "SF")])
+
+(define_insn "*truncxfsf2_i387_1"
+ [(set (match_operand:SF 0 "memory_operand" "=m")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_80387"
+{
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "SF")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_operand:SF 2 "memory_operand" ""))]
+ "TARGET_80387 && reload_completed"
+ [(set (match_dup 2) (float_truncate:SF (match_dup 1)))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_split
+ [(set (match_operand:SF 0 "memory_operand" "")
+ (float_truncate:SF
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_operand:SF 2 "memory_operand" ""))]
+ "TARGET_80387"
+ [(set (match_dup 0) (float_truncate:SF (match_dup 1)))]
+ "")
+
+;; Conversion from XFmode to DFmode.
+
+(define_expand "truncxfdf2"
+ [(parallel [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_dup 2))])]
+ "TARGET_80387"
+{
+ if (flag_unsafe_math_optimizations)
+ {
+ rtx reg = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (DFmode);
+ emit_insn (gen_truncxfdf2_i387_noop (reg, operands[1]));
+ if (reg != operands[0])
+ emit_move_insn (operands[0], reg);
+ DONE;
+ }
+ else
+ operands[2] = assign_386_stack_local (DFmode, SLOT_VIRTUAL);
+})
+
+(define_insn "*truncxfdf2_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,?f,?r,?Yt")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "f,f,f,f")))
+ (clobber (match_operand:DF 2 "memory_operand" "=X,m,m,m"))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_MIX_SSE_I387"
+{
+ gcc_assert (!which_alternative);
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov,multi,multi,multi")
+ (set_attr "unit" "*,i387,i387,i387")
+ (set_attr "mode" "DF")])
+
+(define_insn "truncxfdf2_i387_noop"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float_truncate:DF (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_80387 && flag_unsafe_math_optimizations"
+{
+ return output_387_reg_move (insn, operands);
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "DF")])
+
+(define_insn "*truncxfdf2_i387"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,?f,?r")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "f,f,f")))
+ (clobber (match_operand:DF 2 "memory_operand" "=X,m,m"))]
+ "TARGET_80387"
+{
+ gcc_assert (!which_alternative);
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov,multi,multi")
+ (set_attr "unit" "*,i387,i387")
+ (set_attr "mode" "DF")])
+
+(define_insn "*truncxfdf2_i387_1"
+ [(set (match_operand:DF 0 "memory_operand" "=m")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "f")))]
+ "TARGET_80387"
+{
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+}
+ [(set_attr "type" "fmov")
+ (set_attr "mode" "DF")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_operand:DF 2 "memory_operand" ""))]
+ "TARGET_80387 && reload_completed"
+ [(set (match_dup 2) (float_truncate:DF (match_dup 1)))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_split
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (float_truncate:DF
+ (match_operand:XF 1 "register_operand" "")))
+ (clobber (match_operand:DF 2 "memory_operand" ""))]
+ "TARGET_80387"
+ [(set (match_dup 0) (float_truncate:DF (match_dup 1)))]
+ "")
+
+;; Signed conversion to DImode.
+
+(define_expand "fix_truncxfdi2"
+ [(parallel [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (fix:DI (match_operand:XF 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_80387"
+{
+ if (TARGET_FISTTP)
+ {
+ emit_insn (gen_fix_truncdi_fisttp_i387_1 (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_expand "fix_trunc<mode>di2"
+ [(parallel [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (fix:DI (match_operand:SSEMODEF 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_80387 || (TARGET_64BIT && SSE_FLOAT_MODE_P (<MODE>mode))"
+{
+ if (TARGET_FISTTP
+ && !(TARGET_64BIT && SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH))
+ {
+ emit_insn (gen_fix_truncdi_fisttp_i387_1 (operands[0], operands[1]));
+ DONE;
+ }
+ if (TARGET_64BIT && SSE_FLOAT_MODE_P (<MODE>mode))
+ {
+ rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (DImode);
+ emit_insn (gen_fix_trunc<mode>di_sse (out, operands[1]));
+ if (out != operands[0])
+ emit_move_insn (operands[0], out);
+ DONE;
+ }
+})
+
+;; Signed conversion to SImode.
+
+(define_expand "fix_truncxfsi2"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (fix:SI (match_operand:XF 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_80387"
+{
+ if (TARGET_FISTTP)
+ {
+ emit_insn (gen_fix_truncsi_fisttp_i387_1 (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_expand "fix_trunc<mode>si2"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (fix:SI (match_operand:SSEMODEF 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_80387 || SSE_FLOAT_MODE_P (<MODE>mode)"
+{
+ if (TARGET_FISTTP
+ && !(SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH))
+ {
+ emit_insn (gen_fix_truncsi_fisttp_i387_1 (operands[0], operands[1]));
+ DONE;
+ }
+ if (SSE_FLOAT_MODE_P (<MODE>mode))
+ {
+ rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SImode);
+ emit_insn (gen_fix_trunc<mode>si_sse (out, operands[1]));
+ if (out != operands[0])
+ emit_move_insn (operands[0], out);
+ DONE;
+ }
+})
+
+;; Signed conversion to HImode.
+
+(define_expand "fix_trunc<mode>hi2"
+ [(parallel [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (fix:HI (match_operand:X87MODEF 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_80387
+ && !(SSE_FLOAT_MODE_P (<MODE>mode) && (!TARGET_FISTTP || TARGET_SSE_MATH))"
+{
+ if (TARGET_FISTTP)
+ {
+ emit_insn (gen_fix_trunchi_fisttp_i387_1 (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+;; APPLE LOCAL begin 4176531
+;; Unsigned conversion to SImode.
+
+(define_expand "fixuns_trunc<mode>si2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "x")
+ (fix:SI (match_operand:SSEMODEF 1 "register_operand" "x")))]
+ "!TARGET_64BIT && SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH && TARGET_SSE2
+ && !optimize_size && (ix86_preferred_stack_boundary >= 128)"
+{
+ /* APPLE LOCAL 4424891 */
+ ix86_expand_convert_uns_<MODE>2SI_sse(operands); DONE;
+})
+
+;; Unsigned conversion to HImode.
+
+(define_insn "fixuns_truncdfhi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r")
+ (fix:HI (match_operand:DF 1 "nonimmediate_operand" "x,xm")))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "cvttsd2si\t{%1, %k0|%k0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,vector")])
+
+(define_insn "fixuns_truncsfhi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r")
+ (fix:HI (match_operand:SF 1 "register_operand" "x,xm")))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "cvttss2si\t{%1, %k0|%k0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "SF")
+ (set_attr "athlon_decode" "double,vector")])
+;; APPLE LOCAL end 4176531
+
+;; When SSE is available, it is always faster to use it!
+(define_insn "fix_truncsfdi_sse"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (fix:DI (match_operand:SF 1 "nonimmediate_operand" "x,xm")))]
+ "TARGET_64BIT && TARGET_SSE && (!TARGET_FISTTP || TARGET_SSE_MATH)"
+ "cvttss2si{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "SF")
+ (set_attr "athlon_decode" "double,vector")])
+
+(define_insn "fix_truncdfdi_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (fix:DI (match_operand:DF 1 "nonimmediate_operand" "Yt,Ytm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_64BIT && TARGET_SSE2 && (!TARGET_FISTTP || TARGET_SSE_MATH)"
+ "cvttsd2si{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,vector")])
+
+(define_insn "fix_truncsfsi_sse"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI (match_operand:SF 1 "nonimmediate_operand" "x,xm")))]
+ "TARGET_SSE && (!TARGET_FISTTP || TARGET_SSE_MATH)"
+ "cvttss2si\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,vector")])
+
+(define_insn "fix_truncdfsi_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI (match_operand:DF 1 "nonimmediate_operand" "Yt,Ytm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && (!TARGET_FISTTP || TARGET_SSE_MATH)"
+ "cvttsd2si\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,vector")])
+
+;; Avoid vector decoded forms of the instruction.
+(define_peephole2
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(match_scratch:DF 2 "Yt")
+ (set (match_operand:SSEMODEI24 0 "register_operand" "")
+ (fix:SSEMODEI24 (match_operand:DF 1 "memory_operand" "")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (fix:SSEMODEI24 (match_dup 2)))]
+ "")
+
+(define_peephole2
+ [(match_scratch:SF 2 "x")
+ (set (match_operand:SSEMODEI24 0 "register_operand" "")
+ (fix:SSEMODEI24 (match_operand:SF 1 "memory_operand" "")))]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (fix:SSEMODEI24 (match_dup 2)))]
+ "")
+
+(define_insn_and_split "fix_trunc<mode>_fisttp_i387_1"
+ [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "=m,?r")
+ (fix:X87MODEI (match_operand 1 "register_operand" "f,f")))]
+ "TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !((SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && (TARGET_64BIT || <MODE>mode != DImode))
+ && TARGET_SSE_MATH)
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ if (memory_operand (operands[0], VOIDmode))
+ emit_insn (gen_fix_trunc<mode>_i387_fisttp (operands[0], operands[1]));
+ else
+ {
+ operands[2] = assign_386_stack_local (<MODE>mode, SLOT_TEMP);
+ emit_insn (gen_fix_trunc<mode>_i387_fisttp_with_temp (operands[0],
+ operands[1],
+ operands[2]));
+ }
+ DONE;
+}
+ [(set_attr "type" "fisttp")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fix_trunc<mode>_i387_fisttp"
+ [(set (match_operand:X87MODEI 0 "memory_operand" "=m")
+ (fix:X87MODEI (match_operand 1 "register_operand" "f")))
+ (clobber (match_scratch:XF 2 "=&1f"))]
+ "TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !((SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && (TARGET_64BIT || <MODE>mode != DImode))
+ && TARGET_SSE_MATH)"
+ "* return output_fix_trunc (insn, operands, 1);"
+ [(set_attr "type" "fisttp")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fix_trunc<mode>_i387_fisttp_with_temp"
+ [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "=m,?r")
+ (fix:X87MODEI (match_operand 1 "register_operand" "f,f")))
+ (clobber (match_operand:X87MODEI 2 "memory_operand" "=m,m"))
+ (clobber (match_scratch:XF 3 "=&1f,&1f"))]
+ "TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !((SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && (TARGET_64BIT || <MODE>mode != DImode))
+ && TARGET_SSE_MATH)"
+ "#"
+ [(set_attr "type" "fisttp")
+ (set_attr "mode" "<MODE>")])
+
+(define_split
+ [(set (match_operand:X87MODEI 0 "register_operand" "")
+ (fix:X87MODEI (match_operand 1 "register_operand" "")))
+ (clobber (match_operand:X87MODEI 2 "memory_operand" ""))
+ (clobber (match_scratch 3 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 2) (fix:X87MODEI (match_dup 1)))
+ (clobber (match_dup 3))])
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_split
+ [(set (match_operand:X87MODEI 0 "memory_operand" "")
+ (fix:X87MODEI (match_operand 1 "register_operand" "")))
+ (clobber (match_operand:X87MODEI 2 "memory_operand" ""))
+ (clobber (match_scratch 3 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (fix:X87MODEI (match_dup 1)))
+ (clobber (match_dup 3))])]
+ "")
+
+;; See the comments in i386.h near OPTIMIZE_MODE_SWITCHING for the description
+;; of the machinery. Please note the clobber of FLAGS_REG. In i387 control
+;; word calculation (inserted by LCM in mode switching pass) a FLAGS_REG
+;; clobbering insns can be used. Look at emit_i387_cw_initialization ()
+;; function in i386.c.
+(define_insn_and_split "*fix_trunc<mode>_i387_1"
+ [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "=m,?r")
+ (fix:X87MODEI (match_operand 1 "register_operand" "f,f")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_80387 && !TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !(SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && (TARGET_64BIT || <MODE>mode != DImode))
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_TRUNC] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_TRUNC);
+ if (memory_operand (operands[0], VOIDmode))
+ emit_insn (gen_fix_trunc<mode>_i387 (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ {
+ operands[4] = assign_386_stack_local (<MODE>mode, SLOT_TEMP);
+ emit_insn (gen_fix_trunc<mode>_i387_with_temp (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ }
+ DONE;
+}
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fix_truncdi_i387"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (fix:DI (match_operand 1 "register_operand" "f")))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))
+ (clobber (match_scratch:XF 4 "=&1f"))]
+ "TARGET_80387 && !TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !(TARGET_64BIT && SSE_FLOAT_MODE_P (GET_MODE (operands[1])))"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "DI")])
+
+(define_insn "fix_truncdi_i387_with_temp"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,?r")
+ (fix:DI (match_operand 1 "register_operand" "f,f")))
+ (use (match_operand:HI 2 "memory_operand" "m,m"))
+ (use (match_operand:HI 3 "memory_operand" "m,m"))
+ (clobber (match_operand:DI 4 "memory_operand" "=m,m"))
+ (clobber (match_scratch:XF 5 "=&1f,&1f"))]
+ "TARGET_80387 && !TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !(TARGET_64BIT && SSE_FLOAT_MODE_P (GET_MODE (operands[1])))"
+ "#"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (fix:DI (match_operand 1 "register_operand" "")))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:DI 4 "memory_operand" ""))
+ (clobber (match_scratch 5 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 4) (fix:DI (match_dup 1)))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 5))])
+ (set (match_dup 0) (match_dup 4))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (fix:DI (match_operand 1 "register_operand" "")))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:DI 4 "memory_operand" ""))
+ (clobber (match_scratch 5 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (fix:DI (match_dup 1)))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 5))])]
+ "")
+
+(define_insn "fix_trunc<mode>_i387"
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "=m")
+ (fix:X87MODEI12 (match_operand 1 "register_operand" "f")))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_80387 && !TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fix_trunc<mode>_i387_with_temp"
+ [(set (match_operand:X87MODEI12 0 "nonimmediate_operand" "=m,?r")
+ (fix:X87MODEI12 (match_operand 1 "register_operand" "f,f")))
+ (use (match_operand:HI 2 "memory_operand" "m,m"))
+ (use (match_operand:HI 3 "memory_operand" "m,m"))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" "=m,m"))]
+ "TARGET_80387 && !TARGET_FISTTP
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))"
+ "#"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "<MODE>")])
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "register_operand" "")
+ (fix:X87MODEI12 (match_operand 1 "register_operand" "")))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 4) (fix:X87MODEI12 (match_dup 1)))
+ (use (match_dup 2))
+ (use (match_dup 3))])
+ (set (match_dup 0) (match_dup 4))]
+ "")
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "")
+ (fix:X87MODEI12 (match_operand 1 "register_operand" "")))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (fix:X87MODEI12 (match_dup 1)))
+ (use (match_dup 2))
+ (use (match_dup 3))])]
+ "")
+
+(define_insn "x86_fnstcw_1"
+ [(set (match_operand:HI 0 "memory_operand" "=m")
+ (unspec:HI [(reg:HI FPSR_REG)] UNSPEC_FSTCW))]
+ "TARGET_80387"
+ "fnstcw\t%0"
+ [(set_attr "length" "2")
+ (set_attr "mode" "HI")
+ (set_attr "unit" "i387")])
+
+(define_insn "x86_fldcw_1"
+ [(set (reg:HI FPSR_REG)
+ (unspec:HI [(match_operand:HI 0 "memory_operand" "m")] UNSPEC_FLDCW))]
+ "TARGET_80387"
+ "fldcw\t%0"
+ [(set_attr "length" "2")
+ (set_attr "mode" "HI")
+ (set_attr "unit" "i387")
+ (set_attr "athlon_decode" "vector")])
+
+;; Conversion between fixed point and floating point.
+
+;; Even though we only accept memory inputs, the backend _really_
+;; wants to be able to do this between registers.
+
+(define_expand "floathisf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float:SF (match_operand:HI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+{
+ if (TARGET_SSE_MATH)
+ {
+ emit_insn (gen_floatsisf2 (operands[0],
+ convert_to_mode (SImode, operands[1], 0)));
+ DONE;
+ }
+})
+
+(define_insn "*floathisf2_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (float:SF (match_operand:HI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387 && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "SF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_expand "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "")
+
+(define_insn "*floatsisf2_mixed"
+ [(set (match_operand:SF 0 "register_operand" "=f,?f,x,x")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "m,r,r,mr")))]
+ "TARGET_MIX_SSE_I387"
+ "@
+ fild%z1\t%1
+ #
+ cvtsi2ss\t{%1, %0|%0, %1}
+ cvtsi2ss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "fmov,multi,sseicvt,sseicvt")
+ (set_attr "mode" "SF")
+ (set_attr "unit" "*,i387,*,*")
+ (set_attr "athlon_decode" "*,*,vector,double")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatsisf2_sse"
+ [(set (match_operand:SF 0 "register_operand" "=x,x")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "r,mr")))]
+ "TARGET_SSE_MATH"
+ "cvtsi2ss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "SF")
+ (set_attr "athlon_decode" "vector,double")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatsisf2_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "SF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_expand "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (float:SF (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_64BIT && TARGET_SSE_MATH)"
+;; APPLE LOCAL begin 6382081
+{
+ if (!TARGET_64BIT)
+ {
+ rtx XFreg = gen_reg_rtx (XFmode);
+ rtx SFstack = assign_386_stack_local (SFmode, SLOT_VIRTUAL);
+ emit_insn (gen_floatdixf2 (copy_rtx (XFreg), operands[1]));
+ emit_insn (gen_truncxfsf2 (copy_rtx (SFstack), XFreg));
+ emit_move_insn (operands[0], SFstack);
+ DONE;
+ }
+})
+;; APPLE LOCAL end 6382081
+
+(define_insn "*floatdisf2_mixed"
+ [(set (match_operand:SF 0 "register_operand" "=f,?f,x,x")
+ (float:SF (match_operand:DI 1 "nonimmediate_operand" "m,r,r,mr")))]
+ "TARGET_64BIT && TARGET_MIX_SSE_I387"
+ "@
+ fild%z1\t%1
+ #
+ cvtsi2ss{q}\t{%1, %0|%0, %1}
+ cvtsi2ss{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "fmov,multi,sseicvt,sseicvt")
+ (set_attr "mode" "SF")
+ (set_attr "unit" "*,i387,*,*")
+ (set_attr "athlon_decode" "*,*,vector,double")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatdisf2_sse"
+ [(set (match_operand:SF 0 "register_operand" "=x,x")
+ (float:SF (match_operand:DI 1 "nonimmediate_operand" "r,mr")))]
+ "TARGET_64BIT && TARGET_SSE_MATH"
+ "cvtsi2ss{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "SF")
+ (set_attr "athlon_decode" "vector,double")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatdisf2_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (float:SF (match_operand:DI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "SF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_expand "floathidf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float:DF (match_operand:HI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+{
+ if (TARGET_SSE2 && TARGET_SSE_MATH)
+ {
+ emit_insn (gen_floatsidf2 (operands[0],
+ convert_to_mode (SImode, operands[1], 0)));
+ DONE;
+ }
+})
+
+(define_insn "*floathidf2_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (float:DF (match_operand:HI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387 && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "DF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_expand "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "")
+
+(define_insn "*floatsidf2_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=f,?f,x,x")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "m,r,r,mr")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_MIX_SSE_I387"
+ "@
+ fild%z1\t%1
+ #
+ cvtsi2sd\t{%1, %0|%0, %1}
+ cvtsi2sd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "fmov,multi,sseicvt,sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "unit" "*,i387,*,*")
+ (set_attr "athlon_decode" "*,*,double,direct")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatsidf2_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=x,x")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "r,mr")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "cvtsi2sd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,direct")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatsidf2_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "DF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_expand "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (float:DF (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_64BIT && TARGET_SSE2 && TARGET_SSE_MATH)"
+;; APPLE LOCAL begin 4424891
+{
+ if (!TARGET_64BIT && TARGET_SSE2 && TARGET_SSE_MATH)
+ {
+ ix86_expand_convert_sign_DI2DF_sse (operands); DONE;
+ }
+})
+;; APPLE LOCAL end 4424891
+
+(define_insn "*floatdidf2_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=f,?f,x,x")
+ (float:DF (match_operand:DI 1 "nonimmediate_operand" "m,r,r,mr")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_64BIT && TARGET_SSE2 && TARGET_MIX_SSE_I387"
+ "@
+ fild%z1\t%1
+ #
+ cvtsi2sd{q}\t{%1, %0|%0, %1}
+ cvtsi2sd{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "fmov,multi,sseicvt,sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "unit" "*,i387,*,*")
+ (set_attr "athlon_decode" "*,*,double,direct")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatdidf2_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=x,x")
+ (float:DF (match_operand:DI 1 "nonimmediate_operand" "r,mr")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_64BIT && TARGET_SSE2 && TARGET_SSE_MATH"
+ "cvtsi2sd{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,direct")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "*floatdidf2_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (float:DF (match_operand:DI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "DF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "floathixf2"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (float:XF (match_operand:HI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "XF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (float:XF (match_operand:SI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "XF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+(define_insn "floatdixf2"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (float:XF (match_operand:DI 1 "nonimmediate_operand" "m,?r")))]
+ "TARGET_80387"
+ "@
+ fild%z1\t%1
+ #"
+ [(set_attr "type" "fmov,multi")
+ (set_attr "mode" "XF")
+ (set_attr "unit" "*,i387")
+ (set_attr "fp_int_src" "true")])
+
+;; %%% Kill these when reload knows how to do it.
+(define_split
+ [(set (match_operand 0 "fp_register_operand" "")
+ (float (match_operand 1 "register_operand" "")))]
+ "reload_completed
+ && TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[0]))"
+ [(const_int 0)]
+{
+ operands[2] = ix86_force_to_memory (GET_MODE (operands[1]), operands[1]);
+ operands[2] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[2]);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[2]));
+ ix86_free_from_memory (GET_MODE (operands[1]));
+ DONE;
+})
+
+(define_expand "floatunssisf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SI 1 "register_operand" ""))]
+ "!TARGET_64BIT && TARGET_SSE_MATH"
+ "x86_emit_floatuns (operands); DONE;")
+
+;; APPLE LOCAL begin 4424891
+(define_expand "floatunssidf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:SI 1 "register_operand" ""))]
+ "!TARGET_64BIT && TARGET_SSE2 && TARGET_SSE_MATH
+ && (ix86_preferred_stack_boundary >= 128)"
+ "x86_emit_floatuns (operands); DONE;")
+;; APPLE LOCAL end 4424891
+
+(define_expand "floatunsdisf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))]
+ "TARGET_64BIT && TARGET_SSE_MATH"
+ "x86_emit_floatuns (operands); DONE;")
+
+(define_expand "floatunsdidf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DI 1 "register_operand" ""))]
+ ;; APPLE LOCAL begin 4176531
+ "(TARGET_64BIT || !optimize_size) && TARGET_SSE2 && TARGET_SSE_MATH
+ && (ix86_preferred_stack_boundary >= 128)"
+ ;; APPLE LOCAL end 4176531
+ "x86_emit_floatuns (operands); DONE;")
+
+;; SSE extract/set expanders
+
+
+;; Add instructions
+
+;; %%% splits for addditi3
+
+(define_expand "addti3"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (plus:TI (match_operand:TI 1 "nonimmediate_operand" "")
+ (match_operand:TI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "ix86_expand_binary_operator (PLUS, TImode, operands); DONE;")
+
+(define_insn "*addti3_1"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o")
+ (plus:TI (match_operand:TI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:TI 2 "x86_64_general_operand" "roe,re")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (PLUS, TImode, operands)"
+ "#")
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (plus:TI (match_operand:TI 1 "nonimmediate_operand" "")
+ (match_operand:TI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 1) (match_dup 2)]
+ UNSPEC_ADD_CARRY))
+ (set (match_dup 0) (plus:DI (match_dup 1) (match_dup 2)))])
+ (parallel [(set (match_dup 3)
+ (plus:DI (plus:DI (ltu:DI (reg:CC FLAGS_REG) (const_int 0))
+ (match_dup 4))
+ (match_dup 5)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_ti (operands+0, 1, operands+0, operands+3);
+ split_ti (operands+1, 1, operands+1, operands+4);
+ split_ti (operands+2, 1, operands+2, operands+5);")
+
+;; %%% splits for addsidi3
+; [(set (match_operand:DI 0 "nonimmediate_operand" "")
+; (plus:DI (match_operand:DI 1 "general_operand" "")
+; (zero_extend:DI (match_operand:SI 2 "general_operand" ""))))]
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (PLUS, DImode, operands); DONE;")
+
+(define_insn "*adddi3_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o")
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "general_operand" "roiF,riF")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && reload_completed"
+ [(parallel [(set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 1) (match_dup 2)]
+ UNSPEC_ADD_CARRY))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+ (parallel [(set (match_dup 3)
+ (plus:SI (plus:SI (ltu:SI (reg:CC FLAGS_REG) (const_int 0))
+ (match_dup 4))
+ (match_dup 5)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_di (operands+0, 1, operands+0, operands+3);
+ split_di (operands+1, 1, operands+1, operands+4);
+ split_di (operands+2, 1, operands+2, operands+5);")
+
+(define_insn "adddi3_carry_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (plus:DI (plus:DI (match_operand:DI 3 "ix86_carry_flag_operator" "")
+ (match_operand:DI 1 "nonimmediate_operand" "%0,0"))
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
+ "adc{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "DI")])
+
+(define_insn "*adddi3_cc_rex64"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC [(match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm")]
+ UNSPEC_ADD_CARRY))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
+ "add{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_insn "addqi3_carry"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (plus:QI (plus:QI (match_operand:QI 3 "ix86_carry_flag_operator" "")
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0"))
+ (match_operand:QI 2 "general_operand" "qi,qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (PLUS, QImode, operands)"
+ "adc{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "QI")])
+
+(define_insn "addhi3_carry"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (plus:HI (plus:HI (match_operand:HI 3 "ix86_carry_flag_operator" "")
+ (match_operand:HI 1 "nonimmediate_operand" "%0,0"))
+ (match_operand:HI 2 "general_operand" "ri,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (PLUS, HImode, operands)"
+ "adc{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "HI")])
+
+(define_insn "addsi3_carry"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (plus:SI (plus:SI (match_operand:SI 3 "ix86_carry_flag_operator" "")
+ (match_operand:SI 1 "nonimmediate_operand" "%0,0"))
+ (match_operand:SI 2 "general_operand" "ri,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (PLUS, SImode, operands)"
+ "adc{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*addsi3_carry_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (plus:SI (match_operand:SI 3 "ix86_carry_flag_operator" "")
+ (match_operand:SI 1 "nonimmediate_operand" "%0"))
+ (match_operand:SI 2 "general_operand" "rim"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)"
+ "adc{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*addsi3_cc"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC [(match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")]
+ UNSPEC_ADD_CARRY))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "ix86_binary_operator_ok (PLUS, SImode, operands)"
+ "add{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "addqi3_cc"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC [(match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qi,qm")]
+ UNSPEC_ADD_CARRY))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (plus:QI (match_dup 1) (match_dup 2)))]
+ "ix86_binary_operator_ok (PLUS, QImode, operands)"
+ "add{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_expand "addsi3"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "ix86_expand_binary_operator (PLUS, SImode, operands); DONE;")
+
+(define_insn "*lea_1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "no_seg_address_operand" "p"))]
+ "!TARGET_64BIT"
+ "lea{l}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn "*lea_1_rex64"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (subreg:SI (match_operand:DI 1 "no_seg_address_operand" "p") 0))]
+ "TARGET_64BIT"
+ "lea{l}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn "*lea_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (subreg:SI (match_operand:DI 1 "no_seg_address_operand" "p") 0)))]
+ "TARGET_64BIT"
+ "lea{l}\t{%a1, %k0|%k0, %a1}"
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn "*lea_2_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "no_seg_address_operand" "p"))]
+ "TARGET_64BIT"
+ "lea{q}\t{%a1, %0|%0, %a1}"
+ [(set_attr "type" "lea")
+ (set_attr "mode" "DI")])
+
+;; The lea patterns for non-Pmodes needs to be matched by several
+;; insns converted to real lea by splitters.
+
+(define_insn_and_split "*lea_general_1"
+ [(set (match_operand 0 "register_operand" "=r")
+ (plus (plus (match_operand 1 "index_register_operand" "l")
+ (match_operand 2 "register_operand" "r"))
+ (match_operand 3 "immediate_operand" "i")))]
+ "(GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode
+ || (TARGET_64BIT && GET_MODE (operands[0]) == SImode))
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && GET_MODE (operands[0]) == GET_MODE (operands[2])
+ && (GET_MODE (operands[0]) == GET_MODE (operands[3])
+ || GET_MODE (operands[3]) == VOIDmode)"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx pat;
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[2] = gen_lowpart (Pmode, operands[2]);
+ operands[3] = gen_lowpart (Pmode, operands[3]);
+ pat = gen_rtx_PLUS (Pmode, gen_rtx_PLUS (Pmode, operands[1], operands[2]),
+ operands[3]);
+ if (Pmode != SImode)
+ pat = gen_rtx_SUBREG (SImode, pat, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat));
+ DONE;
+}
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*lea_general_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (plus:SI (match_operand:SI 1 "index_register_operand" "l")
+ (match_operand:SI 2 "register_operand" "r"))
+ (match_operand:SI 3 "immediate_operand" "i"))))]
+ "TARGET_64BIT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (plus:DI (plus:DI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)) 0)))]
+{
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[2] = gen_lowpart (Pmode, operands[2]);
+ operands[3] = gen_lowpart (Pmode, operands[3]);
+}
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*lea_general_2"
+ [(set (match_operand 0 "register_operand" "=r")
+ (plus (mult (match_operand 1 "index_register_operand" "l")
+ (match_operand 2 "const248_operand" "i"))
+ (match_operand 3 "nonmemory_operand" "ri")))]
+ "(GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode
+ || (TARGET_64BIT && GET_MODE (operands[0]) == SImode))
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && (GET_MODE (operands[0]) == GET_MODE (operands[3])
+ || GET_MODE (operands[3]) == VOIDmode)"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx pat;
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[3] = gen_lowpart (Pmode, operands[3]);
+ pat = gen_rtx_PLUS (Pmode, gen_rtx_MULT (Pmode, operands[1], operands[2]),
+ operands[3]);
+ if (Pmode != SImode)
+ pat = gen_rtx_SUBREG (SImode, pat, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat));
+ DONE;
+}
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*lea_general_2_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (mult:SI (match_operand:SI 1 "index_register_operand" "l")
+ (match_operand:SI 2 "const248_operand" "n"))
+ (match_operand:SI 3 "nonmemory_operand" "ri"))))]
+ "TARGET_64BIT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (plus:DI (mult:DI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)) 0)))]
+{
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[3] = gen_lowpart (Pmode, operands[3]);
+}
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*lea_general_3"
+ [(set (match_operand 0 "register_operand" "=r")
+ (plus (plus (mult (match_operand 1 "index_register_operand" "l")
+ (match_operand 2 "const248_operand" "i"))
+ (match_operand 3 "register_operand" "r"))
+ (match_operand 4 "immediate_operand" "i")))]
+ "(GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode
+ || (TARGET_64BIT && GET_MODE (operands[0]) == SImode))
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && GET_MODE (operands[0]) == GET_MODE (operands[3])"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx pat;
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[3] = gen_lowpart (Pmode, operands[3]);
+ operands[4] = gen_lowpart (Pmode, operands[4]);
+ pat = gen_rtx_PLUS (Pmode,
+ gen_rtx_PLUS (Pmode, gen_rtx_MULT (Pmode, operands[1],
+ operands[2]),
+ operands[3]),
+ operands[4]);
+ if (Pmode != SImode)
+ pat = gen_rtx_SUBREG (SImode, pat, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat));
+ DONE;
+}
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*lea_general_3_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 1 "index_register_operand" "l")
+ (match_operand:SI 2 "const248_operand" "n"))
+ (match_operand:SI 3 "register_operand" "r"))
+ (match_operand:SI 4 "immediate_operand" "i"))))]
+ "TARGET_64BIT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (plus:DI (plus:DI (mult:DI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (match_dup 4)) 0)))]
+{
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[3] = gen_lowpart (Pmode, operands[3]);
+ operands[4] = gen_lowpart (Pmode, operands[4]);
+}
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")])
+
+(define_insn "*adddi_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rm,r")
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,r")
+ (match_operand:DI 2 "x86_64_general_operand" "rme,re,le")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ return "lea{q}\t{%a2, %0|%0, %a2}";
+
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{q}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{q}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ /* Avoid overflows. */
+ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1)))
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{q}\t{%2, %0|%0, %2}";
+ }
+ return "add{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "lea")
+ ; Current assemblers are broken and do not allow @GOTOFF in
+ ; ought but a memory context.
+ (match_operand:DI 2 "pic_symbolic_operand" "")
+ (const_string "lea")
+ (match_operand:DI 2 "incdec_operand" "")
+ (const_string "incdec")
+ ]
+ (const_string "alu")))
+ (set_attr "mode" "DI")])
+
+;; Convert lea to the lea pattern to avoid flags dependency.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "x86_64_nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])"
+ [(set (match_dup 0)
+ (plus:DI (match_dup 1)
+ (match_dup 2)))]
+ "")
+
+(define_insn "*adddi_2_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "rme,re"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=r,rm")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (PLUS, DImode, operands)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{q}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{q}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* ???? We ought to handle there the 32bit case too
+ - do we need new constraint? */
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ /* Avoid overflows. */
+ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1)))
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{q}\t{%2, %0|%0, %2}";
+ }
+ return "add{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:DI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "DI")])
+
+(define_insn "*adddi_3_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (neg:DI (match_operand:DI 2 "x86_64_general_operand" "rme"))
+ (match_operand:DI 1 "x86_64_general_operand" "%0")))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCZmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{q}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{q}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* ???? We ought to handle there the 32bit case too
+ - do we need new constraint? */
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ /* Avoid overflows. */
+ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1)))
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{q}\t{%2, %0|%0, %2}";
+ }
+ return "add{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:DI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "DI")])
+
+; For comparisons against 1, -1 and 128, we may generate better code
+; by converting cmp to add, inc or dec as done by peephole2. This pattern
+; is matched then. We can't accept general immediate, because for
+; case of overflows, the result is messed up.
+; This pattern also don't hold of 0x8000000000000000, since the value overflows
+; when negated.
+; Also carry flag is reversed compared to cmp, so this conversion is valid
+; only for comparisons not depending on it.
+(define_insn "*adddi_4_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:DI 2 "x86_64_immediate_operand" "e")))
+ (clobber (match_scratch:DI 0 "=rm"))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCGCmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == constm1_rtx)
+ return "inc{q}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == const1_rtx);
+ return "dec{q}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if ((INTVAL (operands[2]) == -128
+ || (INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) != 128))
+ /* Avoid overflows. */
+ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1))))
+ return "sub{q}\t{%2, %0|%0, %2}";
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "add{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:DI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "DI")])
+
+(define_insn "*adddi_5_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%0")
+ (match_operand:DI 2 "x86_64_general_operand" "rme"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCGOCmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{q}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{q}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ /* Avoid overflows. */
+ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1)))
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{q}\t{%2, %0|%0, %2}";
+ }
+ return "add{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:DI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "DI")])
+
+
+(define_insn "*addsi_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm,r")
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,r")
+ (match_operand:SI 2 "general_operand" "rmni,rni,lni")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (PLUS, SImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ return "lea{l}\t{%a2, %0|%0, %a2}";
+
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %0|%0, %2}";
+ }
+ return "add{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "lea")
+ ; Current assemblers are broken and do not allow @GOTOFF in
+ ; ought but a memory context.
+ (match_operand:SI 2 "pic_symbolic_operand" "")
+ (const_string "lea")
+ (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ ]
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+;; Convert lea to the lea pattern to avoid flags dependency.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (plus (match_operand 1 "register_operand" "")
+ (match_operand 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])"
+ [(const_int 0)]
+{
+ rtx pat;
+ /* In -fPIC mode the constructs like (const (unspec [symbol_ref]))
+ may confuse gen_lowpart. */
+ if (GET_MODE (operands[0]) != Pmode)
+ {
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[2] = gen_lowpart (Pmode, operands[2]);
+ }
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ pat = gen_rtx_PLUS (Pmode, operands[1], operands[2]);
+ if (Pmode != SImode)
+ pat = gen_rtx_SUBREG (SImode, pat, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat));
+ DONE;
+})
+
+;; It may seem that nonimmediate operand is proper one for operand 1.
+;; The addsi_1 pattern allows nonimmediate operand at that place and
+;; we take care in ix86_binary_operator_ok to not allow two memory
+;; operands so proper swapping will be done in reload. This allow
+;; patterns constructed from addsi_1 to match.
+(define_insn "addsi_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,r")
+ (match_operand:SI 2 "general_operand" "rmni,lni"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ return "lea{l}\t{%a2, %k0|%k0, %a2}";
+
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%k0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%k0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %k0|%k0, %2}";
+ }
+ return "add{l}\t{%2, %k0|%k0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "1")
+ (const_string "lea")
+ ; Current assemblers are broken and do not allow @GOTOFF in
+ ; ought but a memory context.
+ (match_operand:SI 2 "pic_symbolic_operand" "")
+ (const_string "lea")
+ (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ ]
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+;; Convert lea to the lea pattern to avoid flags dependency.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (plus:DI (match_dup 1) (match_dup 2)) 0)))]
+{
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[2] = gen_lowpart (Pmode, operands[2]);
+})
+
+(define_insn "*addsi_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "rmni,rni"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=r,rm")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (PLUS, SImode, operands)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %0|%0, %2}";
+ }
+ return "add{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+(define_insn "*addsi_2_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rmni"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (PLUS, SImode, operands)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%k0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%k0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %k0|%k0, %2}";
+ }
+ return "add{l}\t{%2, %k0|%k0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*addsi_3"
+ [(set (reg FLAGS_REG)
+ (compare (neg:SI (match_operand:SI 2 "general_operand" "rmni"))
+ (match_operand:SI 1 "nonimmediate_operand" "%0")))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCZmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %0|%0, %2}";
+ }
+ return "add{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+(define_insn "*addsi_3_zext"
+ [(set (reg FLAGS_REG)
+ (compare (neg:SI (match_operand:SI 2 "general_operand" "rmni"))
+ (match_operand:SI 1 "nonimmediate_operand" "%0")))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCZmode)
+ && ix86_binary_operator_ok (PLUS, SImode, operands)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%k0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%k0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %k0|%k0, %2}";
+ }
+ return "add{l}\t{%2, %k0|%k0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+; For comparisons against 1, -1 and 128, we may generate better code
+; by converting cmp to add, inc or dec as done by peephole2. This pattern
+; is matched then. We can't accept general immediate, because for
+; case of overflows, the result is messed up.
+; This pattern also don't hold of 0x80000000, since the value overflows
+; when negated.
+; Also carry flag is reversed compared to cmp, so this conversion is valid
+; only for comparisons not depending on it.
+(define_insn "*addsi_4"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (clobber (match_scratch:SI 0 "=rm"))]
+ "ix86_match_ccmode (insn, CCGCmode)
+ && (INTVAL (operands[2]) & 0xffffffff) != 0x80000000"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == constm1_rtx)
+ return "inc{l}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == const1_rtx);
+ return "dec{l}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if ((INTVAL (operands[2]) == -128
+ || (INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) != 128)))
+ return "sub{l}\t{%2, %0|%0, %2}";
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "add{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*addsi_5"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rmni"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)
+ /* Current assemblers are broken and do not allow @GOTOFF in
+ ought but a memory context. */
+ && ! pic_symbolic_operand (operands[2], VOIDmode)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (operands[2] == const1_rtx)
+ return "inc{l}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{l}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %0|%0, %2}";
+ }
+ return "add{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:SI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+(define_expand "addhi3"
+ [(parallel [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (PLUS, HImode, operands); DONE;")
+
+;; %%% After Dave's SUBREG_BYTE stuff goes in, re-enable incb %ah
+;; type optimizations enabled by define-splits. This is not important
+;; for PII, and in fact harmful because of partial register stalls.
+
+(define_insn "*addhi_1_lea"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r,r")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0,r")
+ (match_operand:HI 2 "general_operand" "ri,rm,lni")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (PLUS, HImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ return "#";
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{w}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{w}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{w}\t{%2, %0|%0, %2}";
+ }
+ return "add{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (eq_attr "alternative" "2")
+ (const_string "lea")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu"))))
+ (set_attr "mode" "HI,HI,SI")])
+
+(define_insn "*addhi_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (PLUS, HImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{w}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{w}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{w}\t{%2, %0|%0, %2}";
+ }
+ return "add{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "HI")])
+
+(define_insn "*addhi_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "rmni,rni"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=r,rm")
+ (plus:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (PLUS, HImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{w}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{w}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{w}\t{%2, %0|%0, %2}";
+ }
+ return "add{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "HI")])
+
+(define_insn "*addhi_3"
+ [(set (reg FLAGS_REG)
+ (compare (neg:HI (match_operand:HI 2 "general_operand" "rmni"))
+ (match_operand:HI 1 "nonimmediate_operand" "%0")))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCZmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{w}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{w}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{w}\t{%2, %0|%0, %2}";
+ }
+ return "add{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "HI")])
+
+; See comments above addsi_4 for details.
+(define_insn "*addhi_4"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:HI 2 "const_int_operand" "n")))
+ (clobber (match_scratch:HI 0 "=rm"))]
+ "ix86_match_ccmode (insn, CCGCmode)
+ && (INTVAL (operands[2]) & 0xffff) != 0x8000"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == constm1_rtx)
+ return "inc{w}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == const1_rtx);
+ return "dec{w}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if ((INTVAL (operands[2]) == -128
+ || (INTVAL (operands[2]) > 0
+ && INTVAL (operands[2]) != 128)))
+ return "sub{w}\t{%2, %0|%0, %2}";
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "add{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "SI")])
+
+
+(define_insn "*addhi_5"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0")
+ (match_operand:HI 2 "general_operand" "rmni"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{w}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return "dec{w}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{w}\t{%2, %0|%0, %2}";
+ }
+ return "add{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "HI")])
+
+(define_expand "addqi3"
+ [(parallel [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (PLUS, QImode, operands); DONE;")
+
+;; %%% Potential partial reg stall on alternative 2. What to do?
+(define_insn "*addqi_1_lea"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q,r,r")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0,r")
+ (match_operand:QI 2 "general_operand" "qn,qmn,rn,ln")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (PLUS, QImode, operands)"
+{
+ int widen = (which_alternative == 2);
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ return "#";
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return widen ? "inc{l}\t%k0" : "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return widen ? "dec{l}\t%k0" : "dec{b}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ if (widen)
+ return "sub{l}\t{%2, %k0|%k0, %2}";
+ else
+ return "sub{b}\t{%2, %0|%0, %2}";
+ }
+ if (widen)
+ return "add{l}\t{%k2, %k0|%k0, %k2}";
+ else
+ return "add{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (eq_attr "alternative" "3")
+ (const_string "lea")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu"))))
+ (set_attr "mode" "QI,QI,SI,SI")])
+
+(define_insn "*addqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q,r")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn,rn")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (PLUS, QImode, operands)"
+{
+ int widen = (which_alternative == 2);
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return widen ? "inc{l}\t%k0" : "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx);
+ return widen ? "dec{l}\t%k0" : "dec{b}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'.
+ Exceptions: -128 encodes smaller than 128, so swap sign and op. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ if (widen)
+ return "sub{l}\t{%2, %k0|%k0, %2}";
+ else
+ return "sub{b}\t{%2, %0|%0, %2}";
+ }
+ if (widen)
+ return "add{l}\t{%k2, %k0|%k0, %k2}";
+ else
+ return "add{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI,QI,SI")])
+
+(define_insn "*addqi_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,q"))
+ (plus:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "qn,qnm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[1] == const1_rtx)
+ return "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[1] == constm1_rtx);
+ return "dec{b}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0)
+ {
+ operands[1] = GEN_INT (-INTVAL (operands[1]));
+ return "sub{b}\t{%1, %0|%0, %1}";
+ }
+ return "add{b}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 1 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu1")))
+ (set (attr "memory")
+ (if_then_else (match_operand 1 "memory_operand" "")
+ (const_string "load")
+ (const_string "none")))
+ (set_attr "mode" "QI")])
+
+(define_insn "*addqi_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qmni,qni"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm")
+ (plus:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (PLUS, QImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255));
+ return "dec{b}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subb $4,%al' rather than `addb $-4, %al'. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{b}\t{%2, %0|%0, %2}";
+ }
+ return "add{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI")])
+
+(define_insn "*addqi_3"
+ [(set (reg FLAGS_REG)
+ (compare (neg:QI (match_operand:QI 2 "general_operand" "qmni"))
+ (match_operand:QI 1 "nonimmediate_operand" "%0")))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCZmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255));
+ return "dec{b}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subb $4,%al' rather than `addb $-4, %al'. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{b}\t{%2, %0|%0, %2}";
+ }
+ return "add{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI")])
+
+; See comments above addsi_4 for details.
+(define_insn "*addqi_4"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "n")))
+ (clobber (match_scratch:QI 0 "=qm"))]
+ "ix86_match_ccmode (insn, CCGCmode)
+ && (INTVAL (operands[2]) & 0xff) != 0x80"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255))
+ return "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == const1_rtx);
+ return "dec{b}\t%0";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (INTVAL (operands[2]) < 0)
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "add{b}\t{%2, %0|%0, %2}";
+ }
+ return "sub{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:HI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI")])
+
+
+(define_insn "*addqi_5"
+ [(set (reg FLAGS_REG)
+ (compare
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0")
+ (match_operand:QI 2 "general_operand" "qmni"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{b}\t%0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255));
+ return "dec{b}\t%0";
+ }
+
+ default:
+ /* Make things pretty and `subb $4,%al' rather than `addb $-4, %al'. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{b}\t{%2, %0|%0, %2}";
+ }
+ return "add{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI")])
+
+
+(define_insn "addqi_ext_1"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (plus:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:QI 2 "general_operand" "Qmn")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{b}\t%h0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255));
+ return "dec{b}\t%h0";
+ }
+
+ default:
+ return "add{b}\t{%2, %h0|%h0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI")])
+
+(define_insn "*addqi_ext_1_rex64"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (plus:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:QI 2 "nonmemory_operand" "Qn")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_INCDEC:
+ if (operands[2] == const1_rtx)
+ return "inc{b}\t%h0";
+ else
+ {
+ gcc_assert (operands[2] == constm1_rtx
+ || (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 255));
+ return "dec{b}\t%h0";
+ }
+
+ default:
+ return "add{b}\t{%2, %h0|%h0, %2}";
+ }
+}
+ [(set (attr "type")
+ (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (const_string "incdec")
+ (const_string "alu")))
+ (set_attr "mode" "QI")])
+
+(define_insn "*addqi_ext_2"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (plus:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "%0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extract:SI
+ (match_operand 2 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "add{b}\t{%h2, %h0|%h0, %h2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "addxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (plus:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (plus:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "")
+
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (plus:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "")
+
+;; Subtract instructions
+
+;; %%% splits for subditi3
+
+(define_expand "subti3"
+ [(parallel [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (minus:TI (match_operand:TI 1 "nonimmediate_operand" "")
+ (match_operand:TI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "ix86_expand_binary_operator (MINUS, TImode, operands); DONE;")
+
+(define_insn "*subti3_1"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o")
+ (minus:TI (match_operand:TI 1 "nonimmediate_operand" "0,0")
+ (match_operand:TI 2 "x86_64_general_operand" "roe,re")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (MINUS, TImode, operands)"
+ "#")
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (minus:TI (match_operand:TI 1 "nonimmediate_operand" "")
+ (match_operand:TI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (reg:CC FLAGS_REG) (compare:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (minus:DI (match_dup 1) (match_dup 2)))])
+ (parallel [(set (match_dup 3)
+ (minus:DI (match_dup 4)
+ (plus:DI (ltu:DI (reg:CC FLAGS_REG) (const_int 0))
+ (match_dup 5))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_ti (operands+0, 1, operands+0, operands+3);
+ split_ti (operands+1, 1, operands+1, operands+4);
+ split_ti (operands+2, 1, operands+2, operands+5);")
+
+;; %%% splits for subsidi3
+
+(define_expand "subdi3"
+ [(parallel [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (minus:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "ix86_expand_binary_operator (MINUS, DImode, operands); DONE;")
+
+(define_insn "*subdi3_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o")
+ (minus:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:DI 2 "general_operand" "roiF,riF")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)"
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (minus:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && reload_completed"
+ [(parallel [(set (reg:CC FLAGS_REG) (compare:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (minus:SI (match_dup 1) (match_dup 2)))])
+ (parallel [(set (match_dup 3)
+ (minus:SI (match_dup 4)
+ (plus:SI (ltu:SI (reg:CC FLAGS_REG) (const_int 0))
+ (match_dup 5))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_di (operands+0, 1, operands+0, operands+3);
+ split_di (operands+1, 1, operands+1, operands+4);
+ split_di (operands+2, 1, operands+2, operands+5);")
+
+(define_insn "subdi3_carry_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (minus:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (plus:DI (match_operand:DI 3 "ix86_carry_flag_operator" "")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)"
+ "sbb{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "DI")])
+
+(define_insn "*subdi_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (minus:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)"
+ "sub{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_insn "*subdi_2_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (minus:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (minus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (MINUS, DImode, operands)"
+ "sub{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_insn "*subdi_3_rex63"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm")))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (minus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCmode)
+ && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_insn "subqi3_carry"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (minus:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (plus:QI (match_operand:QI 3 "ix86_carry_flag_operator" "")
+ (match_operand:QI 2 "general_operand" "qi,qm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (MINUS, QImode, operands)"
+ "sbb{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "QI")])
+
+(define_insn "subhi3_carry"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (minus:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (plus:HI (match_operand:HI 3 "ix86_carry_flag_operator" "")
+ (match_operand:HI 2 "general_operand" "ri,rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (MINUS, HImode, operands)"
+ "sbb{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "HI")])
+
+(define_insn "subsi3_carry"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (minus:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (plus:SI (match_operand:SI 3 "ix86_carry_flag_operator" "")
+ (match_operand:SI 2 "general_operand" "ri,rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sbb{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "SI")])
+
+(define_insn "subsi3_carry_zext"
+ [(set (match_operand:DI 0 "register_operand" "=rm,r")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0")
+ (plus:SI (match_operand:SI 3 "ix86_carry_flag_operator" "")
+ (match_operand:SI 2 "general_operand" "ri,rm")))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sbb{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "mode" "SI")])
+
+(define_expand "subsi3"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (minus:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "ix86_expand_binary_operator (MINUS, SImode, operands); DONE;")
+
+(define_insn "*subsi_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (minus:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsi_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "rim"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsi_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (minus:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsi_2_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (match_dup 1)
+ (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsi_3"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCmode)
+ && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsi_3_zext"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "general_operand" "rim")))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (match_dup 1)
+ (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCmode)
+ && ix86_binary_operator_ok (MINUS, SImode, operands)"
+ "sub{l}\t{%2, %1|%1, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_expand "subhi3"
+ [(parallel [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (minus:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (MINUS, HImode, operands); DONE;")
+
+(define_insn "*subhi_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (minus:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (MINUS, HImode, operands)"
+ "sub{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_insn "*subhi_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (minus:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (minus:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (MINUS, HImode, operands)"
+ "sub{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_insn "*subhi_3"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (minus:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCmode)
+ && ix86_binary_operator_ok (MINUS, HImode, operands)"
+ "sub{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_expand "subqi3"
+ [(parallel [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (minus:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (MINUS, QImode, operands); DONE;")
+
+(define_insn "*subqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (minus:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (MINUS, QImode, operands)"
+ "sub{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*subqi_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,q"))
+ (minus:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "qn,qmn")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "sub{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*subqi_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (minus:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "general_operand" "qi,qm"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=qm,q")
+ (minus:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (MINUS, QImode, operands)"
+ "sub{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*subqi_3"
+ [(set (reg FLAGS_REG)
+ (compare (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "general_operand" "qi,qm")))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=qm,q")
+ (minus:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCmode)
+ && ix86_binary_operator_ok (MINUS, QImode, operands)"
+ "sub{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "subxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (minus:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (minus:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "")
+
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (minus:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "")
+
+;; Multiply instructions
+
+(define_expand "muldi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*muldi3_1_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (mult:DI (match_operand:DI 1 "nonimmediate_operand" "%rm,rm,0")
+ (match_operand:DI 2 "x86_64_general_operand" "K,e,mr")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "@
+ imul{q}\t{%2, %1, %0|%0, %1, %2}
+ imul{q}\t{%2, %1, %0|%0, %1, %2}
+ imul{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "imul")
+ (set_attr "prefix_0f" "0,0,1")
+ (set (attr "athlon_decode")
+ (cond [(eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (eq_attr "alternative" "1")
+ (const_string "vector")
+ (and (eq_attr "alternative" "2")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "vector")]
+ (const_string "direct")))
+ (set_attr "mode" "DI")])
+
+(define_expand "mulsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*mulsi3_1"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (mult:SI (match_operand:SI 1 "nonimmediate_operand" "%rm,rm,0")
+ (match_operand:SI 2 "general_operand" "K,i,mr")))
+ (clobber (reg:CC FLAGS_REG))]
+ "GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM"
+ "@
+ imul{l}\t{%2, %1, %0|%0, %1, %2}
+ imul{l}\t{%2, %1, %0|%0, %1, %2}
+ imul{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "imul")
+ (set_attr "prefix_0f" "0,0,1")
+ (set (attr "athlon_decode")
+ (cond [(eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (eq_attr "alternative" "1")
+ (const_string "vector")
+ (and (eq_attr "alternative" "2")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "vector")]
+ (const_string "direct")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*mulsi3_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (zero_extend:DI
+ (mult:SI (match_operand:SI 1 "nonimmediate_operand" "%rm,rm,0")
+ (match_operand:SI 2 "general_operand" "K,i,mr"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "@
+ imul{l}\t{%2, %1, %k0|%k0, %1, %2}
+ imul{l}\t{%2, %1, %k0|%k0, %1, %2}
+ imul{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "imul")
+ (set_attr "prefix_0f" "0,0,1")
+ (set (attr "athlon_decode")
+ (cond [(eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (eq_attr "alternative" "1")
+ (const_string "vector")
+ (and (eq_attr "alternative" "2")
+ (match_operand 1 "memory_operand" ""))
+ (const_string "vector")]
+ (const_string "direct")))
+ (set_attr "mode" "SI")])
+
+(define_expand "mulhi3"
+ [(parallel [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_HIMODE_MATH"
+ "")
+
+(define_insn "*mulhi3_1"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "%rm,rm,0")
+ (match_operand:HI 2 "general_operand" "K,i,mr")))
+ (clobber (reg:CC FLAGS_REG))]
+ "GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM"
+ "@
+ imul{w}\t{%2, %1, %0|%0, %1, %2}
+ imul{w}\t{%2, %1, %0|%0, %1, %2}
+ imul{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "imul")
+ (set_attr "prefix_0f" "0,0,1")
+ (set (attr "athlon_decode")
+ (cond [(eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (eq_attr "alternative" "1,2")
+ (const_string "vector")]
+ (const_string "direct")))
+ (set_attr "mode" "HI")])
+
+(define_expand "mulqi3"
+ [(parallel [(set (match_operand:QI 0 "register_operand" "")
+ (mult:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_QIMODE_MATH"
+ "")
+
+(define_insn "*mulqi3_1"
+ [(set (match_operand:QI 0 "register_operand" "=a")
+ (mult:QI (match_operand:QI 1 "nonimmediate_operand" "%0")
+ (match_operand:QI 2 "nonimmediate_operand" "qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "mul{b}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "direct")))
+ (set_attr "mode" "QI")])
+
+(define_expand "umulqihi3"
+ [(parallel [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" ""))
+ (zero_extend:HI
+ (match_operand:QI 2 "register_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_QIMODE_MATH"
+ "")
+
+(define_insn "*umulqihi3_1"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:HI (match_operand:QI 2 "nonimmediate_operand" "qm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "mul{b}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "direct")))
+ (set_attr "mode" "QI")])
+
+(define_expand "mulqihi3"
+ [(parallel [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" ""))
+ (sign_extend:HI (match_operand:QI 2 "register_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_QIMODE_MATH"
+ "")
+
+(define_insn "*mulqihi3_insn"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:HI (match_operand:QI 2 "nonimmediate_operand" "qm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "imul{b}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "direct")))
+ (set_attr "mode" "QI")])
+
+(define_expand "umulditi3"
+ [(parallel [(set (match_operand:TI 0 "register_operand" "")
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "nonimmediate_operand" ""))
+ (zero_extend:TI
+ (match_operand:DI 2 "register_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*umulditi3_insn"
+ [(set (match_operand:TI 0 "register_operand" "=A")
+ (mult:TI (zero_extend:TI (match_operand:DI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:TI (match_operand:DI 2 "nonimmediate_operand" "rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "mul{q}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "DI")])
+
+;; We can't use this pattern in 64bit mode, since it results in two separate 32bit registers
+(define_expand "umulsidi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "!TARGET_64BIT"
+ "")
+
+(define_insn "*umulsidi3_insn"
+ [(set (match_operand:DI 0 "register_operand" "=A")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "mul{l}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "SI")])
+
+(define_expand "mulditi3"
+ [(parallel [(set (match_operand:TI 0 "register_operand" "")
+ (mult:TI (sign_extend:TI
+ (match_operand:DI 1 "nonimmediate_operand" ""))
+ (sign_extend:TI
+ (match_operand:DI 2 "register_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*mulditi3_insn"
+ [(set (match_operand:TI 0 "register_operand" "=A")
+ (mult:TI (sign_extend:TI (match_operand:DI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:TI (match_operand:DI 2 "nonimmediate_operand" "rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "imul{q}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "DI")])
+
+(define_expand "mulsidi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "!TARGET_64BIT"
+ "")
+
+(define_insn "*mulsidi3_insn"
+ [(set (match_operand:DI 0 "register_operand" "=A")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "imul{l}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "SI")])
+
+(define_expand "umuldi3_highpart"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "nonimmediate_operand" ""))
+ (zero_extend:TI
+ (match_operand:DI 2 "register_operand" "")))
+ (const_int 64))))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*umuldi3_highpart_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "nonimmediate_operand" "%a"))
+ (zero_extend:TI
+ (match_operand:DI 2 "nonimmediate_operand" "rm")))
+ (const_int 64))))
+ (clobber (match_scratch:DI 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "mul{q}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "DI")])
+
+(define_expand "umulsi3_highpart"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*umulsi3_highpart_insn"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "%a"))
+ (zero_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
+ "GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM"
+ "mul{l}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*umulsi3_highpart_zext"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "%a"))
+ (zero_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (const_int 32)))))
+ (clobber (match_scratch:SI 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "mul{l}\t%2"
+ [(set_attr "type" "imul")
+ (set_attr "length_immediate" "0")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "SI")])
+
+(define_expand "smuldi3_highpart"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "=d")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (sign_extend:TI
+ (match_operand:DI 1 "nonimmediate_operand" ""))
+ (sign_extend:TI
+ (match_operand:DI 2 "register_operand" "")))
+ (const_int 64))))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*smuldi3_highpart_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (sign_extend:TI
+ (match_operand:DI 1 "nonimmediate_operand" "%a"))
+ (sign_extend:TI
+ (match_operand:DI 2 "nonimmediate_operand" "rm")))
+ (const_int 64))))
+ (clobber (match_scratch:DI 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "imul{q}\t%2"
+ [(set_attr "type" "imul")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "DI")])
+
+(define_expand "smulsi3_highpart"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*smulsi3_highpart_insn"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "%a"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
+ "GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM"
+ "imul{l}\t%2"
+ [(set_attr "type" "imul")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*smulsi3_highpart_zext"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "%a"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (const_int 32)))))
+ (clobber (match_scratch:SI 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "imul{l}\t%2"
+ [(set_attr "type" "imul")
+ (set (attr "athlon_decode")
+ (if_then_else (eq_attr "cpu" "athlon")
+ (const_string "vector")
+ (const_string "double")))
+ (set_attr "mode" "SI")])
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "mulxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (mult:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "")
+
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (mult:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "")
+
+;; Divide instructions
+
+(define_insn "divqi3"
+ [(set (match_operand:QI 0 "register_operand" "=a")
+ (div:QI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:QI 2 "nonimmediate_operand" "qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "idiv{b}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "QI")])
+
+(define_insn "udivqi3"
+ [(set (match_operand:QI 0 "register_operand" "=a")
+ (udiv:QI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:QI 2 "nonimmediate_operand" "qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "div{b}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "QI")])
+
+;; The patterns that match these are at the end of this file.
+
+(define_expand "divxf3"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (div:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
+ "TARGET_80387"
+ "")
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (div:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "")
+
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (div:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "")
+
+;; Remainder instructions.
+
+(define_expand "divmoddi4"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (div:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonimmediate_operand" "")))
+ (set (match_operand:DI 3 "register_operand" "")
+ (mod:DI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+;; Allow to come the parameter in eax or edx to avoid extra moves.
+;; Penalize eax case slightly because it results in worse scheduling
+;; of code.
+(define_insn "*divmoddi4_nocltd_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=&a,?a")
+ (div:DI (match_operand:DI 2 "register_operand" "1,0")
+ (match_operand:DI 3 "nonimmediate_operand" "rm,rm")))
+ (set (match_operand:DI 1 "register_operand" "=&d,&d")
+ (mod:DI (match_dup 2) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && !optimize_size && !TARGET_USE_CLTD"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_insn "*divmoddi4_cltd_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (div:DI (match_operand:DI 2 "register_operand" "a")
+ (match_operand:DI 3 "nonimmediate_operand" "rm")))
+ (set (match_operand:DI 1 "register_operand" "=&d")
+ (mod:DI (match_dup 2) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (optimize_size || TARGET_USE_CLTD)"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_insn "*divmoddi_noext_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (div:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:DI 3 "register_operand" "=d")
+ (mod:DI (match_dup 1) (match_dup 2)))
+ (use (match_operand:DI 4 "register_operand" "3"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "idiv{q}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (div:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonimmediate_operand" "")))
+ (set (match_operand:DI 3 "register_operand" "")
+ (mod:DI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashiftrt:DI (match_dup 4) (const_int 63)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (match_dup 0)
+ (div:DI (reg:DI 0) (match_dup 2)))
+ (set (match_dup 3)
+ (mod:DI (reg:DI 0) (match_dup 2)))
+ (use (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ /* Avoid use of cltd in favor of a mov+shift. */
+ if (!TARGET_USE_CLTD && !optimize_size)
+ {
+ if (true_regnum (operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ else
+ emit_move_insn (operands[3], operands[1]);
+ operands[4] = operands[3];
+ }
+ else
+ {
+ gcc_assert (!true_regnum (operands[1]));
+ operands[4] = operands[1];
+ }
+})
+
+
+(define_expand "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (div:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonimmediate_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+;; Allow to come the parameter in eax or edx to avoid extra moves.
+;; Penalize eax case slightly because it results in worse scheduling
+;; of code.
+(define_insn "*divmodsi4_nocltd"
+ [(set (match_operand:SI 0 "register_operand" "=&a,?a")
+ (div:SI (match_operand:SI 2 "register_operand" "1,0")
+ (match_operand:SI 3 "nonimmediate_operand" "rm,rm")))
+ (set (match_operand:SI 1 "register_operand" "=&d,&d")
+ (mod:SI (match_dup 2) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))]
+ "!optimize_size && !TARGET_USE_CLTD"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_insn "*divmodsi4_cltd"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (div:SI (match_operand:SI 2 "register_operand" "a")
+ (match_operand:SI 3 "nonimmediate_operand" "rm")))
+ (set (match_operand:SI 1 "register_operand" "=&d")
+ (mod:SI (match_dup 2) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))]
+ "optimize_size || TARGET_USE_CLTD"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_insn "*divmodsi_noext"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (div:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (use (match_operand:SI 4 "register_operand" "3"))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "idiv{l}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "SI")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (div:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonimmediate_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 4) (const_int 31)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (match_dup 0)
+ (div:SI (reg:SI 0) (match_dup 2)))
+ (set (match_dup 3)
+ (mod:SI (reg:SI 0) (match_dup 2)))
+ (use (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ /* Avoid use of cltd in favor of a mov+shift. */
+ if (!TARGET_USE_CLTD && !optimize_size)
+ {
+ if (true_regnum (operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+ else
+ emit_move_insn (operands[3], operands[1]);
+ operands[4] = operands[3];
+ }
+ else
+ {
+ gcc_assert (!true_regnum (operands[1]));
+ operands[4] = operands[1];
+ }
+})
+;; %%% Split me.
+(define_insn "divmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (div:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:HI 3 "register_operand" "=&d")
+ (mod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "cwtd\;idiv{w}\t%2"
+ [(set_attr "type" "multi")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "SI")])
+
+(define_insn "udivmoddi4"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (udiv:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:DI 3 "register_operand" "=&d")
+ (umod:DI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "xor{q}\t%3, %3\;div{q}\t%2"
+ [(set_attr "type" "multi")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "DI")])
+
+(define_insn "*udivmoddi4_noext"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (udiv:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:DI 3 "register_operand" "=d")
+ (umod:DI (match_dup 1) (match_dup 2)))
+ (use (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "div{q}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (udiv:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonimmediate_operand" "")))
+ (set (match_operand:DI 3 "register_operand" "")
+ (umod:DI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 3) (const_int 0))
+ (parallel [(set (match_dup 0)
+ (udiv:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (umod:DI (match_dup 1) (match_dup 2)))
+ (use (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_insn "udivmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:SI 3 "register_operand" "=&d")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "xor{l}\t%3, %3\;div{l}\t%2"
+ [(set_attr "type" "multi")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "SI")])
+
+(define_insn "*udivmodsi4_noext"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (use (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "div{l}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "SI")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (udiv:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonimmediate_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(set (match_dup 3) (const_int 0))
+ (parallel [(set (match_dup 0)
+ (udiv:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (use (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_expand "udivmodhi4"
+ [(set (match_dup 4) (const_int 0))
+ (parallel [(set (match_operand:HI 0 "register_operand" "")
+ (udiv:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "nonimmediate_operand" "")))
+ (set (match_operand:HI 3 "register_operand" "")
+ (umod:HI (match_dup 1) (match_dup 2)))
+ (use (match_dup 4))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_HIMODE_MATH"
+ "operands[4] = gen_reg_rtx (HImode);")
+
+(define_insn "*udivmodhi_noext"
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (udiv:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "nonimmediate_operand" "rm")))
+ (set (match_operand:HI 3 "register_operand" "=d")
+ (umod:HI (match_dup 1) (match_dup 2)))
+ (use (match_operand:HI 4 "register_operand" "3"))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "div{w}\t%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "HI")])
+
+;; We cannot use div/idiv for double division, because it causes
+;; "division by zero" on the overflow and that's not what we expect
+;; from truncate. Because true (non truncating) double division is
+;; never generated, we can't create this insn anyway.
+;
+;(define_insn ""
+; [(set (match_operand:SI 0 "register_operand" "=a")
+; (truncate:SI
+; (udiv:DI (match_operand:DI 1 "register_operand" "A")
+; (zero_extend:DI
+; (match_operand:SI 2 "nonimmediate_operand" "rm")))))
+; (set (match_operand:SI 3 "register_operand" "=d")
+; (truncate:SI
+; (umod:DI (match_dup 1) (zero_extend:DI (match_dup 2)))))
+; (clobber (reg:CC FLAGS_REG))]
+; ""
+; "div{l}\t{%2, %0|%0, %2}"
+; [(set_attr "type" "idiv")])
+
+;;- Logical AND instructions
+
+;; On Pentium, "test imm, reg" is pairable only with eax, ax, and al.
+;; Note that this excludes ah.
+
+(define_insn "*testdi_1_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:DI (match_operand:DI 0 "nonimmediate_operand" "%!*a,r,!*a,r,rm")
+ (match_operand:DI 1 "x86_64_szext_general_operand" "Z,Z,e,e,re"))
+ (const_int 0)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ test{l}\t{%k1, %k0|%k0, %k1}
+ test{l}\t{%k1, %k0|%k0, %k1}
+ test{q}\t{%1, %0|%0, %1}
+ test{q}\t{%1, %0|%0, %1}
+ test{q}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,0,1,1")
+ (set_attr "mode" "SI,SI,DI,DI,DI")
+ (set_attr "pent_pair" "uv,np,uv,np,uv")])
+
+(define_insn "testsi_1"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:SI (match_operand:SI 0 "nonimmediate_operand" "%!*a,r,rm")
+ (match_operand:SI 1 "general_operand" "in,in,rin"))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "test{l}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,1")
+ (set_attr "mode" "SI")
+ (set_attr "pent_pair" "uv,np,uv")])
+
+(define_expand "testsi_ccno_1"
+ [(set (reg:CCNO FLAGS_REG)
+ (compare:CCNO
+ (and:SI (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" ""))
+ (const_int 0)))]
+ ""
+ "")
+
+(define_insn "*testhi_1"
+ [(set (reg FLAGS_REG)
+ (compare (and:HI (match_operand:HI 0 "nonimmediate_operand" "%!*a,r,rm")
+ (match_operand:HI 1 "general_operand" "n,n,rn"))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "test{w}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,1")
+ (set_attr "mode" "HI")
+ (set_attr "pent_pair" "uv,np,uv")])
+
+(define_expand "testqi_ccz_1"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (and:QI (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "nonmemory_operand" ""))
+ (const_int 0)))]
+ ""
+ "")
+
+(define_insn "*testqi_1_maybe_si"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm,r")
+ (match_operand:QI 1 "general_operand" "n,n,qn,n"))
+ (const_int 0)))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn,
+ GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) >= 0 ? CCNOmode : CCZmode)"
+{
+ if (which_alternative == 3)
+ {
+ if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < 0)
+ operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+ return "test{l}\t{%1, %k0|%k0, %1}";
+ }
+ return "test{b}\t{%1, %0|%0, %1}";
+}
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,1,1")
+ (set_attr "mode" "QI,QI,QI,SI")
+ (set_attr "pent_pair" "uv,np,uv,np")])
+
+(define_insn "*testqi_1"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm")
+ (match_operand:QI 1 "general_operand" "n,n,qn"))
+ (const_int 0)))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,1")
+ (set_attr "mode" "QI")
+ (set_attr "pent_pair" "uv,np,uv")])
+
+(define_expand "testqi_ext_ccno_0"
+ [(set (reg:CCNO FLAGS_REG)
+ (compare:CCNO
+ (and:SI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 1 "const_int_operand" ""))
+ (const_int 0)))]
+ ""
+ "")
+
+(define_insn "*testqi_ext_0"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:SI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 1 "const_int_operand" "n"))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t{%1, %h0|%h0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "mode" "QI")
+ (set_attr "length_immediate" "1")
+ (set_attr "pent_pair" "np")])
+
+(define_insn "*testqi_ext_1"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:SI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand:QI 1 "general_operand" "Qm")))
+ (const_int 0)))]
+ "!TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "test{b}\t{%1, %h0|%h0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "mode" "QI")])
+
+(define_insn "*testqi_ext_1_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:SI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand:QI 1 "register_operand" "Q")))
+ (const_int 0)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t{%1, %h0|%h0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "mode" "QI")])
+
+(define_insn "*testqi_ext_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:SI
+ (zero_extract:SI
+ (match_operand 0 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8)))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t{%h1, %h0|%h0, %h1}"
+ [(set_attr "type" "test")
+ (set_attr "mode" "QI")])
+
+;; Combine likes to form bit extractions for some tests. Humor it.
+(define_insn "*testqi_ext_3"
+ [(set (reg FLAGS_REG)
+ (compare (zero_extract:SI
+ (match_operand 0 "nonimmediate_operand" "rm")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (const_int 0)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[2]) >= 0
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32
+ && (GET_MODE (operands[0]) == SImode
+ || (TARGET_64BIT && GET_MODE (operands[0]) == DImode)
+ || GET_MODE (operands[0]) == HImode
+ || GET_MODE (operands[0]) == QImode)"
+ "#")
+
+(define_insn "*testqi_ext_3_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (zero_extract:DI
+ (match_operand 0 "nonimmediate_operand" "rm")
+ (match_operand:DI 1 "const_int_operand" "")
+ (match_operand:DI 2 "const_int_operand" ""))
+ (const_int 0)))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCNOmode)
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[2]) >= 0
+ /* Ensure that resulting mask is zero or sign extended operand. */
+ && (INTVAL (operands[1]) + INTVAL (operands[2]) <= 32
+ || (INTVAL (operands[1]) + INTVAL (operands[2]) == 64
+ && INTVAL (operands[1]) > 32))
+ && (GET_MODE (operands[0]) == SImode
+ || GET_MODE (operands[0]) == DImode
+ || GET_MODE (operands[0]) == HImode
+ || GET_MODE (operands[0]) == QImode)"
+ "#")
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(zero_extract
+ (match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_int 0)]))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+{
+ rtx val = operands[2];
+ HOST_WIDE_INT len = INTVAL (operands[3]);
+ HOST_WIDE_INT pos = INTVAL (operands[4]);
+ HOST_WIDE_INT mask;
+ enum machine_mode mode, submode;
+
+ mode = GET_MODE (val);
+ if (GET_CODE (val) == MEM)
+ {
+ /* ??? Combine likes to put non-volatile mem extractions in QImode
+ no matter the size of the test. So find a mode that works. */
+ if (! MEM_VOLATILE_P (val))
+ {
+ mode = smallest_mode_for_size (pos + len, MODE_INT);
+ val = adjust_address (val, mode, 0);
+ }
+ }
+ else if (GET_CODE (val) == SUBREG
+ && (submode = GET_MODE (SUBREG_REG (val)),
+ GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (submode))
+ && pos + len <= GET_MODE_BITSIZE (submode))
+ {
+ /* Narrow a paradoxical subreg to prevent partial register stalls. */
+ mode = submode;
+ val = SUBREG_REG (val);
+ }
+ else if (mode == HImode && pos + len <= 8)
+ {
+ /* Small HImode tests can be converted to QImode. */
+ mode = QImode;
+ val = gen_lowpart (QImode, val);
+ }
+
+ if (len == HOST_BITS_PER_WIDE_INT)
+ mask = -1;
+ else
+ mask = ((HOST_WIDE_INT)1 << len) - 1;
+ mask <<= pos;
+
+ operands[2] = gen_rtx_AND (mode, val, gen_int_mode (mask, mode));
+})
+
+;; Convert HImode/SImode test instructions with immediate to QImode ones.
+;; i386 does not allow to encode test with 8bit sign extended immediate, so
+;; this is relatively important trick.
+;; Do the conversion only post-reload to avoid limiting of the register class
+;; to QI regs.
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
+ "reload_completed
+ && QI_REG_P (operands[2])
+ && GET_MODE (operands[2]) != QImode
+ && ((ix86_match_ccmode (insn, CCZmode)
+ && !(INTVAL (operands[3]) & ~(255 << 8)))
+ || (ix86_match_ccmode (insn, CCNOmode)
+ && !(INTVAL (operands[3]) & ~(127 << 8))))"
+ [(set (match_dup 0)
+ (match_op_dup 1
+ [(and:SI (zero_extract:SI (match_dup 2) (const_int 8) (const_int 8))
+ (match_dup 3))
+ (const_int 0)]))]
+ "operands[2] = gen_lowpart (SImode, operands[2]);
+ operands[3] = gen_int_mode (INTVAL (operands[3]) >> 8, SImode);")
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
+ "reload_completed
+ && GET_MODE (operands[2]) != QImode
+ && (!REG_P (operands[2]) || ANY_QI_REG_P (operands[2]))
+ && ((ix86_match_ccmode (insn, CCZmode)
+ && !(INTVAL (operands[3]) & ~255))
+ || (ix86_match_ccmode (insn, CCNOmode)
+ && !(INTVAL (operands[3]) & ~127)))"
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:QI (match_dup 2) (match_dup 3))
+ (const_int 0)]))]
+ "operands[2] = gen_lowpart (QImode, operands[2]);
+ operands[3] = gen_lowpart (QImode, operands[3]);")
+
+
+;; %%% This used to optimize known byte-wide and operations to memory,
+;; and sometimes to QImode registers. If this is considered useful,
+;; it should be done with splitters.
+
+(define_expand "anddi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (and:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "x86_64_szext_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "ix86_expand_binary_operator (AND, DImode, operands); DONE;")
+
+(define_insn "*anddi_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rm,r,r")
+ (and:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,0,qm")
+ (match_operand:DI 2 "x86_64_szext_general_operand" "Z,re,rm,L")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (AND, DImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ {
+ enum machine_mode mode;
+
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+ if (INTVAL (operands[2]) == 0xff)
+ mode = QImode;
+ else
+ {
+ gcc_assert (INTVAL (operands[2]) == 0xffff);
+ mode = HImode;
+ }
+
+ operands[1] = gen_lowpart (mode, operands[1]);
+ if (mode == QImode)
+ return "movz{bq|x}\t{%1,%0|%0, %1}";
+ else
+ return "movz{wq|x}\t{%1,%0|%0, %1}";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ if (get_attr_mode (insn) == MODE_SI)
+ return "and{l}\t{%k2, %k0|%k0, %k2}";
+ else
+ return "and{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set_attr "type" "alu,alu,alu,imovx")
+ (set_attr "length_immediate" "*,*,*,0")
+ (set_attr "mode" "SI,DI,DI,DI")])
+
+(define_insn "*anddi_2"
+ [(set (reg FLAGS_REG)
+ (compare (and:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:DI 2 "x86_64_szext_general_operand" "Z,rem,re"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=r,r,rm")
+ (and:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, DImode, operands)"
+ "@
+ and{l}\t{%k2, %k0|%k0, %k2}
+ and{q}\t{%2, %0|%0, %2}
+ and{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI,DI,DI")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (AND, SImode, operands); DONE;")
+
+(define_insn "*andsi_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r,r")
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,qm")
+ (match_operand:SI 2 "general_operand" "ri,rm,L")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (AND, SImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ {
+ enum machine_mode mode;
+
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+ if (INTVAL (operands[2]) == 0xff)
+ mode = QImode;
+ else
+ {
+ gcc_assert (INTVAL (operands[2]) == 0xffff);
+ mode = HImode;
+ }
+
+ operands[1] = gen_lowpart (mode, operands[1]);
+ if (mode == QImode)
+ return "movz{bl|x}\t{%1,%0|%0, %1}";
+ else
+ return "movz{wl|x}\t{%1,%0|%0, %1}";
+ }
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ return "and{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set_attr "type" "alu,alu,imovx")
+ (set_attr "length_immediate" "*,*,0")
+ (set_attr "mode" "SI")])
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (and (match_dup 0)
+ (const_int -65536)))
+ (clobber (reg:CC FLAGS_REG))]
+ "optimize_size || (TARGET_FAST_PREFIX && !TARGET_PARTIAL_REG_STALL)"
+ [(set (strict_low_part (match_dup 1)) (const_int 0))]
+ "operands[1] = gen_lowpart (HImode, operands[0]);")
+
+(define_split
+ [(set (match_operand 0 "ext_register_operand" "")
+ (and (match_dup 0)
+ (const_int -256)))
+ (clobber (reg:CC FLAGS_REG))]
+ "(optimize_size || !TARGET_PARTIAL_REG_STALL) && reload_completed"
+ [(set (strict_low_part (match_dup 1)) (const_int 0))]
+ "operands[1] = gen_lowpart (QImode, operands[0]);")
+
+(define_split
+ [(set (match_operand 0 "ext_register_operand" "")
+ (and (match_dup 0)
+ (const_int -65281)))
+ (clobber (reg:CC FLAGS_REG))]
+ "(optimize_size || !TARGET_PARTIAL_REG_STALL) && reload_completed"
+ [(parallel [(set (zero_extract:SI (match_dup 0)
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI (match_dup 0)
+ (const_int 8)
+ (const_int 8))
+ (zero_extract:SI (match_dup 0)
+ (const_int 8)
+ (const_int 8))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (SImode, operands[0]);")
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+(define_insn "*andsi_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (AND, SImode, operands)"
+ "and{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*andsi_2"
+ [(set (reg FLAGS_REG)
+ (compare (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "rim,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=r,rm")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, SImode, operands)"
+ "and{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+(define_insn "*andsi_2_zext"
+ [(set (reg FLAGS_REG)
+ (compare (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, SImode, operands)"
+ "and{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_expand "andhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (and:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (AND, HImode, operands); DONE;")
+
+(define_insn "*andhi_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r,r")
+ (and:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0,qm")
+ (match_operand:HI 2 "general_operand" "ri,rm,L")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (AND, HImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOVX:
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+ gcc_assert (INTVAL (operands[2]) == 0xff);
+ return "movz{bl|x}\t{%b1, %k0|%k0, %b1}";
+
+ default:
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+
+ return "and{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set_attr "type" "alu,alu,imovx")
+ (set_attr "length_immediate" "*,*,0")
+ (set_attr "mode" "HI,HI,SI")])
+
+(define_insn "*andhi_2"
+ [(set (reg FLAGS_REG)
+ (compare (and:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "rim,ri"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=r,rm")
+ (and:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, HImode, operands)"
+ "and{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_expand "andqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (and:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (AND, QImode, operands); DONE;")
+
+;; %%% Potential partial reg stall on alternative 2. What to do?
+(define_insn "*andqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q,r")
+ (and:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qi,qmi,ri")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (AND, QImode, operands)"
+ "@
+ and{b}\t{%2, %0|%0, %2}
+ and{b}\t{%2, %0|%0, %2}
+ and{l}\t{%k2, %k0|%k0, %k2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI,QI,SI")])
+
+(define_insn "*andqi_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,q"))
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "qi,qmi")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "and{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*andqi_2_maybe_si"
+ [(set (reg FLAGS_REG)
+ (compare (and:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi,i"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm,*r")
+ (and:QI (match_dup 1) (match_dup 2)))]
+ "ix86_binary_operator_ok (AND, QImode, operands)
+ && ix86_match_ccmode (insn,
+ GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >= 0 ? CCNOmode : CCZmode)"
+{
+ if (which_alternative == 2)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
+ return "and{l}\t{%2, %k0|%k0, %2}";
+ }
+ return "and{b}\t{%2, %0|%0, %2}";
+}
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI,QI,SI")])
+
+(define_insn "*andqi_2"
+ [(set (reg FLAGS_REG)
+ (compare (and:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm")
+ (and:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, QImode, operands)"
+ "and{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*andqi_2_slp"
+ [(set (reg FLAGS_REG)
+ (compare (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "+q,qm")
+ (match_operand:QI 1 "nonimmediate_operand" "qmi,qi"))
+ (const_int 0)))
+ (set (strict_low_part (match_dup 0))
+ (and:QI (match_dup 0) (match_dup 1)))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "and{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+;; ??? A bug in recog prevents it from recognizing a const_int as an
+;; operand to zero_extend in andqi_ext_1. It was checking explicitly
+;; for a QImode operand, which of course failed.
+
+(define_insn "andqi_ext_0"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (and:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 2 "const_int_operand" "n")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "and{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "QI")])
+
+;; Generated by peephole translating test to and. This shows up
+;; often in fp comparisons.
+
+(define_insn "*andqi_ext_0_cc"
+ [(set (reg FLAGS_REG)
+ (compare
+ (and:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (and:SI
+ (zero_extract:SI
+ (match_dup 1)
+ (const_int 8)
+ (const_int 8))
+ (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "and{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*andqi_ext_1"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (and:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand:QI 2 "general_operand" "Qm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ "and{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*andqi_ext_1_rex64"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (and:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand 2 "ext_register_operand" "Q"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "and{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*andqi_ext_2"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (and:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "%0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extract:SI
+ (match_operand 2 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "and{b}\t{%h2, %h0|%h0, %h2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+;; Convert wide AND instructions with immediate operand to shorter QImode
+;; equivalents when possible.
+;; Don't do the splitting with memory operands, since it introduces risk
+;; of memory mismatch stalls. We may want to do the splitting for optimizing
+;; for size, but that can (should?) be handled by generic code instead.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (and (match_operand 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && QI_REG_P (operands[0])
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && !(~INTVAL (operands[2]) & ~(255 << 8))
+ && GET_MODE (operands[0]) != QImode"
+ [(parallel [(set (zero_extract:SI (match_dup 0) (const_int 8) (const_int 8))
+ (and:SI (zero_extract:SI (match_dup 1)
+ (const_int 8) (const_int 8))
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_int_mode ((INTVAL (operands[2]) >> 8) & 0xff, SImode);")
+
+;; Since AND can be encoded with sign extended immediate, this is only
+;; profitable when 7th bit is not set.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (and (match_operand 1 "general_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && ANY_QI_REG_P (operands[0])
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && !(~INTVAL (operands[2]) & ~255)
+ && !(INTVAL (operands[2]) & 128)
+ && GET_MODE (operands[0]) != QImode"
+ [(parallel [(set (strict_low_part (match_dup 0))
+ (and:QI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (QImode, operands[0]);
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ operands[2] = gen_lowpart (QImode, operands[2]);")
+
+;; Logical inclusive OR instructions
+
+;; %%% This used to optimize known byte-wide and operations to memory.
+;; If this is considered useful, it should be done with splitters.
+
+(define_expand "iordi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (ior:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "ix86_expand_binary_operator (IOR, DImode, operands); DONE;")
+
+(define_insn "*iordi_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (ior:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rme")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && ix86_binary_operator_ok (IOR, DImode, operands)"
+ "or{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_insn "*iordi_2_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (ior:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "rem,re"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=r,rm")
+ (ior:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, DImode, operands)"
+ "or{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_insn "*iordi_3_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (ior:DI (match_operand:DI 1 "nonimmediate_operand" "%0")
+ (match_operand:DI 2 "x86_64_general_operand" "rem"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, DImode, operands)"
+ "or{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (ior:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (IOR, SImode, operands); DONE;")
+
+(define_insn "*iorsi_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "ri,rmi")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (IOR, SImode, operands)"
+ "or{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+(define_insn "*iorsi_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=rm")
+ (zero_extend:DI
+ (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (IOR, SImode, operands)"
+ "or{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*iorsi_1_zext_imm"
+ [(set (match_operand:DI 0 "register_operand" "=rm")
+ (ior:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%0"))
+ (match_operand:DI 2 "x86_64_zext_immediate_operand" "Z")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "or{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*iorsi_2"
+ [(set (reg FLAGS_REG)
+ (compare (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "rim,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=r,rm")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, SImode, operands)"
+ "or{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+;; ??? Special case for immediate operand is missing - it is tricky.
+(define_insn "*iorsi_2_zext"
+ [(set (reg FLAGS_REG)
+ (compare (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (ior:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, SImode, operands)"
+ "or{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*iorsi_2_zext_imm"
+ [(set (reg FLAGS_REG)
+ (compare (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand 2 "x86_64_zext_immediate_operand" "Z"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (zero_extend:DI (match_dup 1)) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, SImode, operands)"
+ "or{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*iorsi_3"
+ [(set (reg FLAGS_REG)
+ (compare (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "or{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_expand "iorhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (ior:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (IOR, HImode, operands); DONE;")
+
+(define_insn "*iorhi_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,m")
+ (ior:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "rmi,ri")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (IOR, HImode, operands)"
+ "or{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_insn "*iorhi_2"
+ [(set (reg FLAGS_REG)
+ (compare (ior:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "rim,ri"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=r,rm")
+ (ior:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, HImode, operands)"
+ "or{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_insn "*iorhi_3"
+ [(set (reg FLAGS_REG)
+ (compare (ior:HI (match_operand:HI 1 "nonimmediate_operand" "%0")
+ (match_operand:HI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "or{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_expand "iorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (ior:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (IOR, QImode, operands); DONE;")
+
+;; %%% Potential partial reg stall on alternative 2. What to do?
+(define_insn "*iorqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=q,m,r")
+ (ior:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qmi,qi,ri")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (IOR, QImode, operands)"
+ "@
+ or{b}\t{%2, %0|%0, %2}
+ or{b}\t{%2, %0|%0, %2}
+ or{l}\t{%k2, %k0|%k0, %k2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI,QI,SI")])
+
+(define_insn "*iorqi_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+q,m"))
+ (ior:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "qmi,qi")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "or{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*iorqi_2"
+ [(set (reg FLAGS_REG)
+ (compare (ior:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm")
+ (ior:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (IOR, QImode, operands)"
+ "or{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*iorqi_2_slp"
+ [(set (reg FLAGS_REG)
+ (compare (ior:QI (match_operand:QI 0 "nonimmediate_operand" "+q,qm")
+ (match_operand:QI 1 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (strict_low_part (match_dup 0))
+ (ior:QI (match_dup 0) (match_dup 1)))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "or{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*iorqi_3"
+ [(set (reg FLAGS_REG)
+ (compare (ior:QI (match_operand:QI 1 "nonimmediate_operand" "%0")
+ (match_operand:QI 2 "general_operand" "qim"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "or{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "iorqi_ext_0"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (ior:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 2 "const_int_operand" "n")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "or{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*iorqi_ext_1"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (ior:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand:QI 2 "general_operand" "Qm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "or{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*iorqi_ext_1_rex64"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (ior:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand 2 "ext_register_operand" "Q"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "or{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*iorqi_ext_2"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (ior:SI
+ (zero_extract:SI (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extract:SI (match_operand 2 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))))
+ (clobber (reg:CC FLAGS_REG))]
+ "(!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "ior{b}\t{%h2, %h0|%h0, %h2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (ior (match_operand 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && QI_REG_P (operands[0])
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && !(INTVAL (operands[2]) & ~(255 << 8))
+ && GET_MODE (operands[0]) != QImode"
+ [(parallel [(set (zero_extract:SI (match_dup 0) (const_int 8) (const_int 8))
+ (ior:SI (zero_extract:SI (match_dup 1)
+ (const_int 8) (const_int 8))
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_int_mode ((INTVAL (operands[2]) >> 8) & 0xff, SImode);")
+
+;; Since OR can be encoded with sign extended immediate, this is only
+;; profitable when 7th bit is set.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (ior (match_operand 1 "general_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && ANY_QI_REG_P (operands[0])
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && !(INTVAL (operands[2]) & ~255)
+ && (INTVAL (operands[2]) & 128)
+ && GET_MODE (operands[0]) != QImode"
+ [(parallel [(set (strict_low_part (match_dup 0))
+ (ior:QI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (QImode, operands[0]);
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ operands[2] = gen_lowpart (QImode, operands[2]);")
+
+;; Logical XOR instructions
+
+;; %%% This used to optimize known byte-wide and operations to memory.
+;; If this is considered useful, it should be done with splitters.
+
+(define_expand "xordi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (xor:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "x86_64_general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "ix86_expand_binary_operator (XOR, DImode, operands); DONE;")
+
+(define_insn "*xordi_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (xor:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "re,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && ix86_binary_operator_ok (XOR, DImode, operands)"
+ "@
+ xor{q}\t{%2, %0|%0, %2}
+ xor{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI,DI")])
+
+(define_insn "*xordi_2_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (xor:DI (match_operand:DI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DI 2 "x86_64_general_operand" "rem,re"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=r,rm")
+ (xor:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, DImode, operands)"
+ "@
+ xor{q}\t{%2, %0|%0, %2}
+ xor{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI,DI")])
+
+(define_insn "*xordi_3_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (xor:DI (match_operand:DI 1 "nonimmediate_operand" "%0")
+ (match_operand:DI 2 "x86_64_general_operand" "rem"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT
+ && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, DImode, operands)"
+ "xor{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "DI")])
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (xor:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (XOR, SImode, operands); DONE;")
+
+(define_insn "*xorsi_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (XOR, SImode, operands)"
+ "xor{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+;; Add speccase for immediates
+(define_insn "*xorsi_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (XOR, SImode, operands)"
+ "xor{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*xorsi_1_zext_imm"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%0"))
+ (match_operand:DI 2 "x86_64_zext_immediate_operand" "Z")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (XOR, SImode, operands)"
+ "xor{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*xorsi_2"
+ [(set (reg FLAGS_REG)
+ (compare (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "rim,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=r,rm")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, SImode, operands)"
+ "xor{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+;; See comment for addsi_1_zext why we do use nonimmediate_operand
+;; ??? Special case for immediate operand is missing - it is tricky.
+(define_insn "*xorsi_2_zext"
+ [(set (reg FLAGS_REG)
+ (compare (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (xor:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, SImode, operands)"
+ "xor{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*xorsi_2_zext_imm"
+ [(set (reg FLAGS_REG)
+ (compare (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand 2 "x86_64_zext_immediate_operand" "Z"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (zero_extend:DI (match_dup 1)) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, SImode, operands)"
+ "xor{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_insn "*xorsi_3"
+ [(set (reg FLAGS_REG)
+ (compare (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0")
+ (match_operand:SI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "xor{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "SI")])
+
+(define_expand "xorhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (xor:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (XOR, HImode, operands); DONE;")
+
+(define_insn "*xorhi_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,m")
+ (xor:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "rmi,ri")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (XOR, HImode, operands)"
+ "xor{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_insn "*xorhi_2"
+ [(set (reg FLAGS_REG)
+ (compare (xor:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "rim,ri"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=r,rm")
+ (xor:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, HImode, operands)"
+ "xor{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_insn "*xorhi_3"
+ [(set (reg FLAGS_REG)
+ (compare (xor:HI (match_operand:HI 1 "nonimmediate_operand" "%0")
+ (match_operand:HI 2 "general_operand" "rim"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "xor{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "HI")])
+
+(define_expand "xorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (xor:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (XOR, QImode, operands); DONE;")
+
+;; %%% Potential partial reg stall on alternative 2. What to do?
+(define_insn "*xorqi_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=q,m,r")
+ (xor:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qmi,qi,ri")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (XOR, QImode, operands)"
+ "@
+ xor{b}\t{%2, %0|%0, %2}
+ xor{b}\t{%2, %0|%0, %2}
+ xor{l}\t{%k2, %k0|%k0, %k2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI,QI,SI")])
+
+(define_insn "*xorqi_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,q"))
+ (xor:QI (match_dup 0)
+ (match_operand:QI 1 "general_operand" "qi,qmi")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "xor{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_insn "xorqi_ext_0"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 2 "const_int_operand" "n")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "xor{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_ext_1"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand:QI 2 "general_operand" "Qm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "xor{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_ext_1_rex64"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extend:SI
+ (match_operand 2 "ext_register_operand" "Q"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "xor{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_ext_2"
+ [(set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (zero_extract:SI (match_operand 2 "ext_register_operand" "Q")
+ (const_int 8)
+ (const_int 8))))
+ (clobber (reg:CC FLAGS_REG))]
+ "(!TARGET_PARTIAL_REG_STALL || optimize_size)"
+ "xor{b}\t{%h2, %h0|%h0, %h2}"
+ [(set_attr "type" "alu")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_cc_1"
+ [(set (reg FLAGS_REG)
+ (compare
+ (xor:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm")
+ (xor:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (XOR, QImode, operands)"
+ "xor{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_2_slp"
+ [(set (reg FLAGS_REG)
+ (compare (xor:QI (match_operand:QI 0 "nonimmediate_operand" "+q,qm")
+ (match_operand:QI 1 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (strict_low_part (match_dup 0))
+ (xor:QI (match_dup 0) (match_dup 1)))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "xor{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_cc_2"
+ [(set (reg FLAGS_REG)
+ (compare
+ (xor:QI (match_operand:QI 1 "nonimmediate_operand" "%0")
+ (match_operand:QI 2 "general_operand" "qim"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "xor{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_cc_ext_1"
+ [(set (reg FLAGS_REG)
+ (compare
+ (xor:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:QI 2 "general_operand" "qmn"))
+ (const_int 0)))
+ (set (zero_extract:SI (match_operand 0 "ext_register_operand" "=q")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI (match_dup 1) (const_int 8) (const_int 8))
+ (match_dup 2)))]
+ "!TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
+ "xor{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_insn "*xorqi_cc_ext_1_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (xor:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "0")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:QI 2 "nonmemory_operand" "Qn"))
+ (const_int 0)))
+ (set (zero_extract:SI (match_operand 0 "ext_register_operand" "=Q")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI (match_dup 1) (const_int 8) (const_int 8))
+ (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
+ "xor{b}\t{%2, %h0|%h0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
+(define_expand "xorqi_cc_ext_1"
+ [(parallel [
+ (set (reg:CCNO FLAGS_REG)
+ (compare:CCNO
+ (xor:SI
+ (zero_extract:SI
+ (match_operand 1 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8))
+ (match_operand:QI 2 "general_operand" ""))
+ (const_int 0)))
+ (set (zero_extract:SI (match_operand 0 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8))
+ (xor:SI
+ (zero_extract:SI (match_dup 1) (const_int 8) (const_int 8))
+ (match_dup 2)))])]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (xor (match_operand 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && QI_REG_P (operands[0])
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && !(INTVAL (operands[2]) & ~(255 << 8))
+ && GET_MODE (operands[0]) != QImode"
+ [(parallel [(set (zero_extract:SI (match_dup 0) (const_int 8) (const_int 8))
+ (xor:SI (zero_extract:SI (match_dup 1)
+ (const_int 8) (const_int 8))
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_int_mode ((INTVAL (operands[2]) >> 8) & 0xff, SImode);")
+
+;; Since XOR can be encoded with sign extended immediate, this is only
+;; profitable when 7th bit is set.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (xor (match_operand 1 "general_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && ANY_QI_REG_P (operands[0])
+ && (!TARGET_PARTIAL_REG_STALL || optimize_size)
+ && !(INTVAL (operands[2]) & ~255)
+ && (INTVAL (operands[2]) & 128)
+ && GET_MODE (operands[0]) != QImode"
+ [(parallel [(set (strict_low_part (match_dup 0))
+ (xor:QI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (QImode, operands[0]);
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ operands[2] = gen_lowpart (QImode, operands[2]);")
+
+;; Negation instructions
+
+(define_expand "negti2"
+ [(parallel [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (neg:TI (match_operand:TI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "ix86_expand_unary_operator (NEG, TImode, operands); DONE;")
+
+(define_insn "*negti2_1"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=ro")
+ (neg:TI (match_operand:TI 1 "nonimmediate_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && ix86_unary_operator_ok (NEG, TImode, operands)"
+ "#")
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (neg:TI (match_operand:TI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (neg:DI (match_dup 2)) (const_int 0)))
+ (set (match_dup 0) (neg:DI (match_dup 2)))])
+ (parallel
+ [(set (match_dup 1)
+ (plus:DI (plus:DI (ltu:DI (reg:CC FLAGS_REG) (const_int 0))
+ (match_dup 3))
+ (const_int 0)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel
+ [(set (match_dup 1)
+ (neg:DI (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_ti (operands+1, 1, operands+2, operands+3);
+ split_ti (operands+0, 1, operands+0, operands+1);")
+
+(define_expand "negdi2"
+ [(parallel [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (neg:DI (match_operand:DI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "ix86_expand_unary_operator (NEG, DImode, operands); DONE;")
+
+(define_insn "*negdi2_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro")
+ (neg:DI (match_operand:DI 1 "general_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT
+ && ix86_unary_operator_ok (NEG, DImode, operands)"
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (neg:DI (match_operand:DI 1 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && reload_completed"
+ [(parallel
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (neg:SI (match_dup 2)) (const_int 0)))
+ (set (match_dup 0) (neg:SI (match_dup 2)))])
+ (parallel
+ [(set (match_dup 1)
+ (plus:SI (plus:SI (ltu:SI (reg:CC FLAGS_REG) (const_int 0))
+ (match_dup 3))
+ (const_int 0)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel
+ [(set (match_dup 1)
+ (neg:SI (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_di (operands+1, 1, operands+2, operands+3);
+ split_di (operands+0, 1, operands+0, operands+1);")
+
+(define_insn "*negdi2_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (neg:DI (match_operand:DI 1 "nonimmediate_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands)"
+ "neg{q}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "DI")])
+
+;; The problem with neg is that it does not perform (compare x 0),
+;; it really performs (compare 0 x), which leaves us with the zero
+;; flag being the only useful item.
+
+(define_insn "*negdi2_cmpz_rex64"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (neg:DI (match_operand:DI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (neg:DI (match_dup 1)))]
+ "TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands)"
+ "neg{q}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "DI")])
+
+
+(define_expand "negsi2"
+ [(parallel [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (neg:SI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "ix86_expand_unary_operator (NEG, SImode, operands); DONE;")
+
+(define_insn "*negsi2_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (neg:SI (match_operand:SI 1 "nonimmediate_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_unary_operator_ok (NEG, SImode, operands)"
+ "neg{l}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "SI")])
+
+;; Combine is quite creative about this pattern.
+(define_insn "*negsi2_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (neg:DI (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 32)))
+ (const_int 32)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_unary_operator_ok (NEG, SImode, operands)"
+ "neg{l}\t%k0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "SI")])
+
+;; The problem with neg is that it does not perform (compare x 0),
+;; it really performs (compare 0 x), which leaves us with the zero
+;; flag being the only useful item.
+
+(define_insn "*negsi2_cmpz"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (neg:SI (match_operand:SI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (neg:SI (match_dup 1)))]
+ "ix86_unary_operator_ok (NEG, SImode, operands)"
+ "neg{l}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "SI")])
+
+(define_insn "*negsi2_cmpz_zext"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (lshiftrt:DI
+ (neg:DI (ashift:DI
+ (match_operand:DI 1 "register_operand" "0")
+ (const_int 32)))
+ (const_int 32))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (neg:DI (ashift:DI (match_dup 1)
+ (const_int 32)))
+ (const_int 32)))]
+ "TARGET_64BIT && ix86_unary_operator_ok (NEG, SImode, operands)"
+ "neg{l}\t%k0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "SI")])
+
+(define_expand "neghi2"
+ [(parallel [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (neg:HI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_unary_operator (NEG, HImode, operands); DONE;")
+
+(define_insn "*neghi2_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (neg:HI (match_operand:HI 1 "nonimmediate_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_unary_operator_ok (NEG, HImode, operands)"
+ "neg{w}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "HI")])
+
+(define_insn "*neghi2_cmpz"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (neg:HI (match_operand:HI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (neg:HI (match_dup 1)))]
+ "ix86_unary_operator_ok (NEG, HImode, operands)"
+ "neg{w}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "HI")])
+
+(define_expand "negqi2"
+ [(parallel [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (neg:QI (match_operand:QI 1 "nonimmediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_unary_operator (NEG, QImode, operands); DONE;")
+
+(define_insn "*negqi2_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (neg:QI (match_operand:QI 1 "nonimmediate_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_unary_operator_ok (NEG, QImode, operands)"
+ "neg{b}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "QI")])
+
+(define_insn "*negqi2_cmpz"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (neg:QI (match_operand:QI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (neg:QI (match_dup 1)))]
+ "ix86_unary_operator_ok (NEG, QImode, operands)"
+ "neg{b}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "QI")])
+
+;; Changing of sign for FP values is doable using integer unit too.
+
+(define_expand "negsf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (neg:SF (match_operand:SF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_fp_absneg_operator (NEG, SFmode, operands); DONE;")
+
+(define_expand "abssf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (abs:SF (match_operand:SF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_fp_absneg_operator (ABS, SFmode, operands); DONE;")
+
+(define_insn "*absnegsf2_mixed"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=x ,x,f,rm")
+ (match_operator:SF 3 "absneg_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "0 ,x,0,0 ")]))
+ (use (match_operand:V4SF 2 "nonimmediate_operand" "xm ,0,X,X "))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_SSE_MATH && TARGET_MIX_SSE_I387
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), SFmode, operands)"
+ "#")
+
+(define_insn "*absnegsf2_sse"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=x,x,rm")
+ (match_operator:SF 3 "absneg_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "0 ,x,0")]))
+ (use (match_operand:V4SF 2 "nonimmediate_operand" "xm,0,X"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_SSE_MATH
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), SFmode, operands)"
+ "#")
+
+(define_insn "*absnegsf2_i387"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,rm")
+ (match_operator:SF 3 "absneg_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "0,0")]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_80387 && !TARGET_SSE_MATH
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), SFmode, operands)"
+ "#")
+
+(define_expand "copysignsf3"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "nonmemory_operand" "")
+ (match_operand:SF 2 "register_operand" "")]
+ "TARGET_SSE_MATH"
+{
+ ix86_expand_copysign (operands);
+ DONE;
+})
+
+(define_insn_and_split "copysignsf3_const"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (unspec:SF
+ [(match_operand:V4SF 1 "vector_move_operand" "xmC")
+ (match_operand:SF 2 "register_operand" "0")
+ (match_operand:V4SF 3 "nonimmediate_operand" "xm")]
+ UNSPEC_COPYSIGN))]
+ "TARGET_SSE_MATH"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ ix86_split_copysign_const (operands);
+ DONE;
+})
+
+(define_insn "copysignsf3_var"
+ [(set (match_operand:SF 0 "register_operand" "=x, x, x, x,x")
+ (unspec:SF
+ [(match_operand:SF 2 "register_operand" " x, 0, 0, x,x")
+ (match_operand:SF 3 "register_operand" " 1, 1, x, 1,x")
+ (match_operand:V4SF 4 "nonimmediate_operand" " X,xm,xm, 0,0")
+ (match_operand:V4SF 5 "nonimmediate_operand" " 0,xm, 1,xm,1")]
+ UNSPEC_COPYSIGN))
+ (clobber (match_scratch:V4SF 1 "=x, x, x, x,x"))]
+ "TARGET_SSE_MATH"
+ "#")
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (unspec:SF
+ [(match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")
+ (match_operand:V4SF 4 "" "")
+ (match_operand:V4SF 5 "" "")]
+ UNSPEC_COPYSIGN))
+ (clobber (match_scratch:V4SF 1 ""))]
+ "TARGET_SSE_MATH && reload_completed"
+ [(const_int 0)]
+{
+ ix86_split_copysign_var (operands);
+ DONE;
+})
+
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (neg:DF (match_operand:DF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "ix86_expand_fp_absneg_operator (NEG, DFmode, operands); DONE;")
+
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (abs:DF (match_operand:DF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "ix86_expand_fp_absneg_operator (ABS, DFmode, operands); DONE;")
+
+(define_insn "*absnegdf2_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=x,x,f,rm")
+ (match_operator:DF 3 "absneg_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "0 ,x,0,0")]))
+ (use (match_operand:V2DF 2 "nonimmediate_operand" "xm,0,X,X"))
+ (clobber (reg:CC FLAGS_REG))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), DFmode, operands)"
+ "#")
+
+(define_insn "*absnegdf2_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=x,x,rm")
+ (match_operator:DF 3 "absneg_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "0 ,x,0 ")]))
+ (use (match_operand:V2DF 2 "nonimmediate_operand" "xm,0,X "))
+ (clobber (reg:CC FLAGS_REG))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), DFmode, operands)"
+ "#")
+
+(define_insn "*absnegdf2_i387"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,rm")
+ (match_operator:DF 3 "absneg_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "0,0")]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH)
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), DFmode, operands)"
+ "#")
+
+(define_expand "copysigndf3"
+ [(match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "nonmemory_operand" "")
+ (match_operand:DF 2 "register_operand" "")]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+{
+ ix86_expand_copysign (operands);
+ DONE;
+})
+
+(define_insn_and_split "copysigndf3_const"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (unspec:DF
+ [(match_operand:V2DF 1 "vector_move_operand" "xmC")
+ (match_operand:DF 2 "register_operand" "0")
+ (match_operand:V2DF 3 "nonimmediate_operand" "xm")]
+ UNSPEC_COPYSIGN))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ ix86_split_copysign_const (operands);
+ DONE;
+})
+
+(define_insn "copysigndf3_var"
+ [(set (match_operand:DF 0 "register_operand" "=x, x, x, x,x")
+ (unspec:DF
+ [(match_operand:DF 2 "register_operand" " x, 0, 0, x,x")
+ (match_operand:DF 3 "register_operand" " 1, 1, x, 1,x")
+ (match_operand:V2DF 4 "nonimmediate_operand" " X,xm,xm, 0,0")
+ (match_operand:V2DF 5 "nonimmediate_operand" " 0,xm, 1,xm,1")]
+ UNSPEC_COPYSIGN))
+ (clobber (match_scratch:V2DF 1 "=x, x, x, x,x"))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "#")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF
+ [(match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")
+ (match_operand:V2DF 4 "" "")
+ (match_operand:V2DF 5 "" "")]
+ UNSPEC_COPYSIGN))
+ (clobber (match_scratch:V2DF 1 ""))]
+ "TARGET_SSE2 && TARGET_SSE_MATH && reload_completed"
+ [(const_int 0)]
+{
+ ix86_split_copysign_var (operands);
+ DONE;
+})
+
+(define_expand "negxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (neg:XF (match_operand:XF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "ix86_expand_fp_absneg_operator (NEG, XFmode, operands); DONE;")
+
+(define_expand "absxf2"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "")
+ (abs:XF (match_operand:XF 1 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "ix86_expand_fp_absneg_operator (ABS, XFmode, operands); DONE;")
+
+(define_insn "*absnegxf2_i387"
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,?rm")
+ (match_operator:XF 3 "absneg_operator"
+ [(match_operand:XF 1 "nonimmediate_operand" "0,0")]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_80387
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), XFmode, operands)"
+ "#")
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_expand "negtf2"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (neg:TF (match_operand:TF 1 "nonimmediate_operand" "")))]
+ "TARGET_64BIT"
+ "ix86_expand_fp_absneg_operator (NEG, TFmode, operands); DONE;")
+
+(define_expand "abstf2"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (abs:TF (match_operand:TF 1 "nonimmediate_operand" "")))]
+ "TARGET_64BIT"
+ "ix86_expand_fp_absneg_operator (ABS, TFmode, operands); DONE;")
+
+(define_insn "*absnegtf2_sse"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=x,x,m")
+ (match_operator:TF 3 "absneg_operator"
+ [(match_operand:TF 1 "nonimmediate_operand" "0, x,0")]))
+ (use (match_operand:TF 2 "nonimmediate_operand" "xm,0,X"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT
+ && ix86_unary_operator_ok (GET_CODE (operands[3]), TFmode, operands)"
+ "#")
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; Splitters for fp abs and neg.
+
+(define_split
+ [(set (match_operand 0 "fp_register_operand" "")
+ (match_operator 1 "absneg_operator" [(match_dup 0)]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 0)]))])
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator 3 "absneg_operator"
+ [(match_operand 1 "register_operand" "")]))
+ (use (match_operand 2 "nonimmediate_operand" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed && SSE_REG_P (operands[0])"
+ [(set (match_dup 0) (match_dup 3))]
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ enum machine_mode vmode = GET_MODE (operands[2]);
+ rtx tmp;
+
+ operands[0] = simplify_gen_subreg (vmode, operands[0], mode, 0);
+ operands[1] = simplify_gen_subreg (vmode, operands[1], mode, 0);
+ if (operands_match_p (operands[0], operands[2]))
+ {
+ tmp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = tmp;
+ }
+ if (GET_CODE (operands[3]) == ABS)
+ tmp = gen_rtx_AND (vmode, operands[1], operands[2]);
+ else
+ tmp = gen_rtx_XOR (vmode, operands[1], operands[2]);
+ operands[3] = tmp;
+})
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operator:SF 1 "absneg_operator" [(match_dup 0)]))
+ (use (match_operand:V4SF 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ rtx tmp;
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ if (GET_CODE (operands[1]) == ABS)
+ {
+ tmp = gen_int_mode (0x7fffffff, SImode);
+ tmp = gen_rtx_AND (SImode, operands[0], tmp);
+ }
+ else
+ {
+ tmp = gen_int_mode (0x80000000, SImode);
+ tmp = gen_rtx_XOR (SImode, operands[0], tmp);
+ }
+ operands[1] = tmp;
+})
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operator:DF 1 "absneg_operator" [(match_dup 0)]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ rtx tmp;
+ if (TARGET_64BIT)
+ {
+ tmp = gen_lowpart (DImode, operands[0]);
+ tmp = gen_rtx_ZERO_EXTRACT (DImode, tmp, const1_rtx, GEN_INT (63));
+ operands[0] = tmp;
+
+ if (GET_CODE (operands[1]) == ABS)
+ tmp = const0_rtx;
+ else
+ tmp = gen_rtx_NOT (DImode, tmp);
+ }
+ else
+ {
+ operands[0] = gen_highpart (SImode, operands[0]);
+ if (GET_CODE (operands[1]) == ABS)
+ {
+ tmp = gen_int_mode (0x7fffffff, SImode);
+ tmp = gen_rtx_AND (SImode, operands[0], tmp);
+ }
+ else
+ {
+ tmp = gen_int_mode (0x80000000, SImode);
+ tmp = gen_rtx_XOR (SImode, operands[0], tmp);
+ }
+ }
+ operands[1] = tmp;
+})
+
+(define_split
+ [(set (match_operand:XF 0 "register_operand" "")
+ (match_operator:XF 1 "absneg_operator" [(match_dup 0)]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ rtx tmp;
+ operands[0] = gen_rtx_REG (SImode,
+ true_regnum (operands[0])
+ + (TARGET_64BIT ? 1 : 2));
+ if (GET_CODE (operands[1]) == ABS)
+ {
+ tmp = GEN_INT (0x7fff);
+ tmp = gen_rtx_AND (SImode, operands[0], tmp);
+ }
+ else
+ {
+ tmp = GEN_INT (0x8000);
+ tmp = gen_rtx_XOR (SImode, operands[0], tmp);
+ }
+ operands[1] = tmp;
+})
+
+(define_split
+ [(set (match_operand 0 "memory_operand" "")
+ (match_operator 1 "absneg_operator" [(match_dup 0)]))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ int size = mode == XFmode ? 10 : GET_MODE_SIZE (mode);
+ rtx tmp;
+
+ /* APPLE LOCAL begin radar 4117515 */
+ if (size == 4)
+ {
+ operands[0] = adjust_address (operands[0], SImode, 0);
+ if (GET_CODE (operands[1]) == ABS)
+ {
+ tmp = gen_int_mode (0x7fffffff, SImode);
+ tmp = gen_rtx_AND (SImode, operands[0], tmp);
+ }
+ else
+ {
+ tmp = gen_int_mode (0x80000000, SImode);
+ tmp = gen_rtx_XOR (SImode, operands[0], tmp);
+ }
+ }
+ else
+ {
+ operands[0] = adjust_address (operands[0], QImode, size - 1);
+ if (GET_CODE (operands[1]) == ABS)
+ {
+ tmp = gen_int_mode (0x7f, QImode);
+ tmp = gen_rtx_AND (QImode, operands[0], tmp);
+ }
+ else
+ {
+ tmp = gen_int_mode (0x80, QImode);
+ tmp = gen_rtx_XOR (QImode, operands[0], tmp);
+ }
+ }
+ /* APPLE LOCAL end radar 4117515 */
+ operands[1] = tmp;
+})
+
+;; Conditionalize these after reload. If they match before reload, we
+;; lose the clobber and ability to use integer instructions.
+
+(define_insn "*negsf2_1"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "register_operand" "0")))]
+ "TARGET_80387 && (reload_completed || !TARGET_SSE_MATH)"
+ "fchs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "SF")])
+
+(define_insn "*negdf2_1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "register_operand" "0")))]
+ "TARGET_80387 && (reload_completed || !(TARGET_SSE2 && TARGET_SSE_MATH))"
+ "fchs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "DF")])
+
+(define_insn "*negxf2_1"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "register_operand" "0")))]
+ "TARGET_80387"
+ "fchs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "XF")])
+
+(define_insn "*abssf2_1"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
+ "TARGET_80387 && (reload_completed || !TARGET_SSE_MATH)"
+ "fabs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "SF")])
+
+(define_insn "*absdf2_1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "register_operand" "0")))]
+ "TARGET_80387 && (reload_completed || !(TARGET_SSE2 && TARGET_SSE_MATH))"
+ "fabs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "DF")])
+
+(define_insn "*absxf2_1"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "register_operand" "0")))]
+ "TARGET_80387"
+ "fabs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "DF")])
+
+(define_insn "*negextendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "TARGET_80387 && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)"
+ "fchs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "DF")])
+
+(define_insn "*negextenddfxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (neg:XF (float_extend:XF
+ (match_operand:DF 1 "register_operand" "0"))))]
+ "TARGET_80387"
+ "fchs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "XF")])
+
+(define_insn "*negextendsfxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (neg:XF (float_extend:XF
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "TARGET_80387"
+ "fchs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "XF")])
+
+(define_insn "*absextendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "TARGET_80387 && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)"
+ "fabs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "DF")])
+
+(define_insn "*absextenddfxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (abs:XF (float_extend:XF
+ (match_operand:DF 1 "register_operand" "0"))))]
+ "TARGET_80387"
+ "fabs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "XF")])
+
+(define_insn "*absextendsfxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (abs:XF (float_extend:XF
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "TARGET_80387"
+ "fabs"
+ [(set_attr "type" "fsgn")
+ (set_attr "mode" "XF")])
+
+;; One complement instructions
+
+(define_expand "one_cmpldi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (not:DI (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_64BIT"
+ "ix86_expand_unary_operator (NOT, DImode, operands); DONE;")
+
+(define_insn "*one_cmpldi2_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (not:DI (match_operand:DI 1 "nonimmediate_operand" "0")))]
+ "TARGET_64BIT && ix86_unary_operator_ok (NOT, DImode, operands)"
+ "not{q}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "DI")])
+
+(define_insn "*one_cmpldi2_2_rex64"
+ [(set (reg FLAGS_REG)
+ (compare (not:DI (match_operand:DI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (not:DI (match_dup 1)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_unary_operator_ok (NOT, DImode, operands)"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:DI (match_operand:DI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:DI 1 "nonimmediate_operand" "")
+ (not:DI (match_dup 3)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2
+ [(xor:DI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:DI (match_dup 3) (const_int -1)))])]
+ "")
+
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (not:SI (match_operand:SI 1 "nonimmediate_operand" "")))]
+ ""
+ "ix86_expand_unary_operator (NOT, SImode, operands); DONE;")
+
+(define_insn "*one_cmplsi2_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (not:SI (match_operand:SI 1 "nonimmediate_operand" "0")))]
+ "ix86_unary_operator_ok (NOT, SImode, operands)"
+ "not{l}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "SI")])
+
+;; ??? Currently never generated - xor is used instead.
+(define_insn "*one_cmplsi2_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (not:SI (match_operand:SI 1 "register_operand" "0"))))]
+ "TARGET_64BIT && ix86_unary_operator_ok (NOT, SImode, operands)"
+ "not{l}\t%k0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "SI")])
+
+(define_insn "*one_cmplsi2_2"
+ [(set (reg FLAGS_REG)
+ (compare (not:SI (match_operand:SI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (not:SI (match_dup 1)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_unary_operator_ok (NOT, SImode, operands)"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")])
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:SI (match_operand:SI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:SI 1 "nonimmediate_operand" "")
+ (not:SI (match_dup 3)))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:SI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:SI (match_dup 3) (const_int -1)))])]
+ "")
+
+;; ??? Currently never generated - xor is used instead.
+(define_insn "*one_cmplsi2_2_zext"
+ [(set (reg FLAGS_REG)
+ (compare (not:SI (match_operand:SI 1 "register_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (not:SI (match_dup 1))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)
+ && ix86_unary_operator_ok (NOT, SImode, operands)"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "SI")])
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:SI (match_operand:SI 3 "register_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:DI 1 "register_operand" "")
+ (zero_extend:DI (not:SI (match_dup 3))))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:SI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (zero_extend:DI (xor:SI (match_dup 3) (const_int -1))))])]
+ "")
+
+(define_expand "one_cmplhi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (not:HI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_unary_operator (NOT, HImode, operands); DONE;")
+
+(define_insn "*one_cmplhi2_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (not:HI (match_operand:HI 1 "nonimmediate_operand" "0")))]
+ "ix86_unary_operator_ok (NOT, HImode, operands)"
+ "not{w}\t%0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "HI")])
+
+(define_insn "*one_cmplhi2_2"
+ [(set (reg FLAGS_REG)
+ (compare (not:HI (match_operand:HI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (not:HI (match_dup 1)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_unary_operator_ok (NEG, HImode, operands)"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "HI")])
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:HI (match_operand:HI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (not:HI (match_dup 3)))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:HI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:HI (match_dup 3) (const_int -1)))])]
+ "")
+
+;; %%% Potential partial reg stall on alternative 1. What to do?
+(define_expand "one_cmplqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (not:QI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_unary_operator (NOT, QImode, operands); DONE;")
+
+(define_insn "*one_cmplqi2_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,r")
+ (not:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")))]
+ "ix86_unary_operator_ok (NOT, QImode, operands)"
+ "@
+ not{b}\t%0
+ not{l}\t%k0"
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "QI,SI")])
+
+(define_insn "*one_cmplqi2_2"
+ [(set (reg FLAGS_REG)
+ (compare (not:QI (match_operand:QI 1 "nonimmediate_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (not:QI (match_dup 1)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_unary_operator_ok (NOT, QImode, operands)"
+ "#"
+ [(set_attr "type" "alu1")
+ (set_attr "mode" "QI")])
+
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:QI (match_operand:QI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:QI 1 "nonimmediate_operand" "")
+ (not:QI (match_dup 3)))]
+ "ix86_match_ccmode (insn, CCNOmode)"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:QI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:QI (match_dup 3) (const_int -1)))])]
+ "")
+
+;; Arithmetic shift instructions
+
+;; DImode shifts are implemented using the i386 "shift double" opcode,
+;; which is written as "sh[lr]d[lw] imm,reg,reg/mem". If the shift count
+;; is variable, then the count is in %cl and the "imm" operand is dropped
+;; from the assembler input.
+;;
+;; This instruction shifts the target reg/mem as usual, but instead of
+;; shifting in zeros, bits are shifted in from reg operand. If the insn
+;; is a left shift double, bits are taken from the high order bits of
+;; reg, else if the insn is a shift right double, bits are taken from the
+;; low order bits of reg. So if %eax is "1234" and %edx is "5678",
+;; "shldl $8,%edx,%eax" leaves %edx unchanged and sets %eax to "2345".
+;;
+;; Since sh[lr]d does not change the `reg' operand, that is done
+;; separately, making all shifts emit pairs of shift double and normal
+;; shift. Since sh[lr]d does not shift more than 31 bits, and we wish to
+;; support a 63 bit shift, each shift where the count is in a reg expands
+;; to a pair of shifts, a branch, a shift by 32 and a label.
+;;
+;; If the shift count is a constant, we need never emit more than one
+;; shift pair, instead using moves and sign extension for counts greater
+;; than 31.
+
+(define_expand "ashlti3"
+ [(parallel [(set (match_operand:TI 0 "register_operand" "")
+ (ashift:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+{
+ if (! immediate_operand (operands[2], QImode))
+ {
+ emit_insn (gen_ashlti3_1 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ ix86_expand_binary_operator (ASHIFT, TImode, operands);
+ DONE;
+})
+
+/* APPLE LOCAL begin 6440204 */
+/* Moved here from sse.md so this pattern gets recognized before ashlti3_2. Ugh. */
+(define_insn "sse2_ashlti3"
+ [(set (match_operand:TI 0 "register_operand" "=x")
+ (ashift:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_0_to_255_mul_8_operand" "n")))]
+ "TARGET_SSE2"
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) / 8);
+ return "pslldq\t{%2, %0|%0, %2}";
+}
+ [(set_attr "type" "sseishft")
+ (set_attr "mode" "TI")])
+/* APPLE LOCAL end 6440204 */
+
+(define_insn "ashlti3_1"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (ashift:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "c")))
+ (clobber (match_scratch:DI 3 "=&r"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_insn "*ashlti3_2"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (ashift:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:QI 2 "immediate_operand" "O")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (ashift:TI (match_operand:TI 1 "nonmemory_operand" "")
+ (match_operand:QI 2 "register_operand" "")))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+ "ix86_split_ashl (operands, operands[3], TImode); DONE;")
+
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (ashift:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+ "ix86_split_ashl (operands, NULL_RTX, TImode); DONE;")
+
+(define_insn "x86_64_shld"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "+r*m,r*m")
+ (ior:DI (ashift:DI (match_dup 0)
+ (match_operand:QI 2 "nonmemory_operand" "J,c"))
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r,r")
+ (minus:QI (const_int 64) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "@
+ shld{q}\t{%2, %1, %0|%0, %1, %2}
+ shld{q}\t{%s2%1, %0|%0, %1, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "prefix_0f" "1")
+ (set_attr "mode" "DI")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "x86_64_shift_adj"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (and:QI (match_operand:QI 2 "register_operand" "")
+ (const_int 64))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (ne (reg:CCZ FLAGS_REG) (const_int 0))
+ (match_operand:DI 1 "register_operand" "")
+ (match_dup 0)))
+ (set (match_dup 1)
+ (if_then_else:DI (ne (reg:CCZ FLAGS_REG) (const_int 0))
+ (match_operand:DI 3 "register_operand" "r")
+ (match_dup 1)))]
+ "TARGET_64BIT"
+ "")
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "shiftdi_operand" "")
+ (ashift:DI (match_operand:DI 1 "ashldi_input_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))]
+ ""
+ "ix86_expand_binary_operator (ASHIFT, DImode, operands); DONE;")
+
+(define_insn "*ashldi3_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r")
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "0,l")
+ (match_operand:QI 2 "nonmemory_operand" "cJ,M")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ASHIFT, DImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ return "add{q}\t{%0, %0|%0, %0}";
+
+ case TYPE_LEA:
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+ gcc_assert ((unsigned HOST_WIDE_INT) INTVAL (operands[2]) <= 3);
+ operands[1] = gen_rtx_MULT (DImode, operands[1],
+ GEN_INT (1 << INTVAL (operands[2])));
+ return "lea{q}\t{%a1, %0|%0, %a1}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{q}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{q}\t%0";
+ else
+ return "sal{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "1")
+ (const_string "lea")
+ (and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "DI")])
+
+;; Convert lea to the lea pattern to avoid flags dependency.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "index_register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])"
+ [(set (match_dup 0)
+ (mult:DI (match_dup 1)
+ (match_dup 2)))]
+ "operands[2] = gen_int_mode (1 << INTVAL (operands[2]), DImode);")
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashldi3_cmp_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "immediate_operand" "e"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, DImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || (TARGET_DOUBLE_WITH_ADD && REG_P (operands[0])))))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{q}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{q}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{q}\t%0";
+ else
+ return "sal{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "DI")])
+
+(define_insn "*ashldi3_cconly_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "immediate_operand" "e"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, DImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || TARGET_DOUBLE_WITH_ADD)))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{q}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{q}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{q}\t%0";
+ else
+ return "sal{q}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "DI")])
+
+(define_insn "*ashldi3_1"
+ [(set (match_operand:DI 0 "register_operand" "=&r,r")
+ (ashift:DI (match_operand:DI 1 "reg_or_pm1_operand" "n,0")
+ (match_operand:QI 2 "nonmemory_operand" "Jc,Jc")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+;; By default we don't ask for a scratch register, because when DImode
+;; values are manipulated, registers are already at a premium. But if
+;; we have one handy, we won't turn it away.
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "nonmemory_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (match_dup 3)]
+ "!TARGET_64BIT && TARGET_CMOVE"
+ [(const_int 0)]
+ "ix86_split_ashl (operands, operands[3], DImode); DONE;")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "nonmemory_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && ((optimize > 0 && flag_peephole2)
+ ? flow2_completed : reload_completed)"
+ [(const_int 0)]
+ "ix86_split_ashl (operands, NULL_RTX, DImode); DONE;")
+
+(define_insn "x86_shld_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "+r*m,r*m")
+ (ior:SI (ashift:SI (match_dup 0)
+ (match_operand:QI 2 "nonmemory_operand" "I,c"))
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (minus:QI (const_int 32) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "@
+ shld{l}\t{%2, %1, %0|%0, %1, %2}
+ shld{l}\t{%s2%1, %0|%0, %1, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "prefix_0f" "1")
+ (set_attr "mode" "SI")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "x86_shift_adj_1"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (and:QI (match_operand:QI 2 "register_operand" "")
+ (const_int 32))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (ne (reg:CCZ FLAGS_REG) (const_int 0))
+ (match_operand:SI 1 "register_operand" "")
+ (match_dup 0)))
+ (set (match_dup 1)
+ (if_then_else:SI (ne (reg:CCZ FLAGS_REG) (const_int 0))
+ (match_operand:SI 3 "register_operand" "r")
+ (match_dup 1)))]
+ "TARGET_CMOVE"
+ "")
+
+(define_expand "x86_shift_adj_2"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:SI 1 "register_operand" ""))
+ (use (match_operand:QI 2 "register_operand" ""))]
+ ""
+{
+ rtx label = gen_label_rtx ();
+ rtx tmp;
+
+ emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
+
+ tmp = gen_rtx_REG (CCZmode, FLAGS_REG);
+ tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx);
+ tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
+ JUMP_LABEL (tmp) = label;
+
+ emit_move_insn (operands[0], operands[1]);
+ ix86_expand_clear (operands[1]);
+
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+
+ DONE;
+})
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (ASHIFT, SImode, operands); DONE;")
+
+(define_insn "*ashlsi3_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "0,l")
+ (match_operand:QI 2 "nonmemory_operand" "cI,M")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFT, SImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ return "add{l}\t{%0, %0|%0, %0}";
+
+ case TYPE_LEA:
+ return "#";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{l}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{l}\t%0";
+ else
+ return "sal{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "1")
+ (const_string "lea")
+ (and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "SI")])
+
+;; Convert lea to the lea pattern to avoid flags dependency.
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (ashift (match_operand 1 "index_register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 4"
+ [(const_int 0)]
+{
+ rtx pat;
+ enum machine_mode mode = GET_MODE (operands[0]);
+
+ if (GET_MODE_SIZE (mode) < 4)
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ if (mode != Pmode)
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[2] = gen_int_mode (1 << INTVAL (operands[2]), Pmode);
+
+ pat = gen_rtx_MULT (Pmode, operands[1], operands[2]);
+ if (Pmode != SImode)
+ pat = gen_rtx_SUBREG (SImode, pat, 0);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat));
+ DONE;
+})
+
+;; Rare case of shifting RSP is handled by generating move and shift
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (ashift (match_operand 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])"
+ [(const_int 0)]
+{
+ rtx pat, clob;
+ emit_move_insn (operands[0], operands[1]);
+ pat = gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ASHIFT (GET_MODE (operands[0]),
+ operands[0], operands[2]));
+ clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clob)));
+ DONE;
+})
+
+(define_insn "*ashlsi3_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (ashift:SI (match_operand:SI 1 "register_operand" "0,l")
+ (match_operand:QI 2 "nonmemory_operand" "cI,M"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ASHIFT, SImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{l}\t{%k0, %k0|%k0, %k0}";
+
+ case TYPE_LEA:
+ return "#";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{l}\t{%b2, %k0|%k0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{l}\t%k0";
+ else
+ return "sal{l}\t{%2, %k0|%k0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "1")
+ (const_string "lea")
+ (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "SI")])
+
+;; Convert lea to the lea pattern to avoid flags dependency.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (ashift (match_operand 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed
+ && true_regnum (operands[0]) != true_regnum (operands[1])"
+ [(set (match_dup 0) (zero_extend:DI
+ (subreg:SI (mult:SI (match_dup 1)
+ (match_dup 2)) 0)))]
+{
+ operands[1] = gen_lowpart (Pmode, operands[1]);
+ operands[2] = gen_int_mode (1 << INTVAL (operands[2]), Pmode);
+})
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashlsi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, SImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || (TARGET_DOUBLE_WITH_ADD && REG_P (operands[0])))))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{l}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{l}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{l}\t%0";
+ else
+ return "sal{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashlsi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, SImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || TARGET_DOUBLE_WITH_ADD)))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{l}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{l}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{l}\t%0";
+ else
+ return "sal{l}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashlsi3_cmp_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (ashift:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, SImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || TARGET_DOUBLE_WITH_ADD)))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{l}\t{%k0, %k0|%k0, %k0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{l}\t{%b2, %k0|%k0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{l}\t%k0";
+ else
+ return "sal{l}\t{%2, %k0|%k0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "SI")])
+
+(define_expand "ashlhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (ASHIFT, HImode, operands); DONE;")
+
+(define_insn "*ashlhi3_1_lea"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "0,l")
+ (match_operand:QI 2 "nonmemory_operand" "cI,M")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (ASHIFT, HImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ return "#";
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{w}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{w}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{w}\t%0";
+ else
+ return "sal{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "1")
+ (const_string "lea")
+ (and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "HI,SI")])
+
+(define_insn "*ashlhi3_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "cI")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (ASHIFT, HImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{w}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{w}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{w}\t%0";
+ else
+ return "sal{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "HI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashlhi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashift:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || (TARGET_DOUBLE_WITH_ADD && REG_P (operands[0])))))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{w}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{w}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{w}\t%0";
+ else
+ return "sal{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "HI")])
+
+(define_insn "*ashlhi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || TARGET_DOUBLE_WITH_ADD)))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{w}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{w}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{w}\t%0";
+ else
+ return "sal{w}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "HI")])
+
+(define_expand "ashlqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (ashift:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (ASHIFT, QImode, operands); DONE;")
+
+;; %%% Potential partial reg stall on alternative 2. What to do?
+
+(define_insn "*ashlqi3_1_lea"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,r,r")
+ (ashift:QI (match_operand:QI 1 "nonimmediate_operand" "0,0,l")
+ (match_operand:QI 2 "nonmemory_operand" "cI,cI,M")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (ASHIFT, QImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_LEA:
+ return "#";
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ if (REG_P (operands[1]) && !ANY_QI_REG_P (operands[1]))
+ return "add{l}\t{%k0, %k0|%k0, %k0}";
+ else
+ return "add{b}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ {
+ if (get_attr_mode (insn) == MODE_SI)
+ return "sal{l}\t{%b2, %k0|%k0, %b2}";
+ else
+ return "sal{b}\t{%b2, %0|%0, %b2}";
+ }
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ {
+ if (get_attr_mode (insn) == MODE_SI)
+ return "sal{l}\t%0";
+ else
+ return "sal{b}\t%0";
+ }
+ else
+ {
+ if (get_attr_mode (insn) == MODE_SI)
+ return "sal{l}\t{%2, %k0|%k0, %2}";
+ else
+ return "sal{b}\t{%2, %0|%0, %2}";
+ }
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "lea")
+ (and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "QI,SI,SI")])
+
+(define_insn "*ashlqi3_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,r")
+ (ashift:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "cI,cI")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_PARTIAL_REG_STALL
+ && ix86_binary_operator_ok (ASHIFT, QImode, operands)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ if (REG_P (operands[1]) && !ANY_QI_REG_P (operands[1]))
+ return "add{l}\t{%k0, %k0|%k0, %k0}";
+ else
+ return "add{b}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ {
+ if (get_attr_mode (insn) == MODE_SI)
+ return "sal{l}\t{%b2, %k0|%k0, %b2}";
+ else
+ return "sal{b}\t{%b2, %0|%0, %b2}";
+ }
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ {
+ if (get_attr_mode (insn) == MODE_SI)
+ return "sal{l}\t%0";
+ else
+ return "sal{b}\t%0";
+ }
+ else
+ {
+ if (get_attr_mode (insn) == MODE_SI)
+ return "sal{l}\t{%2, %k0|%k0, %2}";
+ else
+ return "sal{b}\t{%2, %0|%0, %2}";
+ }
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "QI,SI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashlqi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (ashift:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, QImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || (TARGET_DOUBLE_WITH_ADD && REG_P (operands[0])))))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{b}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{b}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{b}\t%0";
+ else
+ return "sal{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "QI")])
+
+(define_insn "*ashlqi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashift:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFT, QImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL
+ || (operands[2] == const1_rtx
+ && (TARGET_SHIFT1
+ || TARGET_DOUBLE_WITH_ADD)))"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ gcc_assert (operands[2] == const1_rtx);
+ return "add{b}\t{%0, %0|%0, %0}";
+
+ default:
+ if (REG_P (operands[2]))
+ return "sal{b}\t{%b2, %0|%0, %b2}";
+ else if (operands[2] == const1_rtx
+ && (TARGET_SHIFT1 || optimize_size))
+ return "sal{b}\t%0";
+ else
+ return "sal{b}\t{%2, %0|%0, %2}";
+ }
+}
+ [(set (attr "type")
+ (cond [(and (and (ne (symbol_ref "TARGET_DOUBLE_WITH_ADD")
+ (const_int 0))
+ (match_operand 0 "register_operand" ""))
+ (match_operand 2 "const1_operand" ""))
+ (const_string "alu")
+ ]
+ (const_string "ishift")))
+ (set_attr "mode" "QI")])
+
+;; See comment above `ashldi3' about how this works.
+
+(define_expand "ashrti3"
+ [(parallel [(set (match_operand:TI 0 "register_operand" "")
+ (ashiftrt:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+{
+ if (! immediate_operand (operands[2], QImode))
+ {
+ emit_insn (gen_ashrti3_1 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ ix86_expand_binary_operator (ASHIFTRT, TImode, operands);
+ DONE;
+})
+
+(define_insn "ashrti3_1"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (ashiftrt:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "c")))
+ (clobber (match_scratch:DI 3 "=&r"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_insn "*ashrti3_2"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (ashiftrt:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:QI 2 "immediate_operand" "O")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (ashiftrt:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "register_operand" "")))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+ "ix86_split_ashr (operands, operands[3], TImode); DONE;")
+
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (ashiftrt:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+ "ix86_split_ashr (operands, NULL_RTX, TImode); DONE;")
+
+(define_insn "x86_64_shrd"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "+r*m,r*m")
+ (ior:DI (ashiftrt:DI (match_dup 0)
+ (match_operand:QI 2 "nonmemory_operand" "J,c"))
+ (ashift:DI (match_operand:DI 1 "register_operand" "r,r")
+ (minus:QI (const_int 64) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "@
+ shrd{q}\t{%2, %1, %0|%0, %1, %2}
+ shrd{q}\t{%s2%1, %0|%0, %1, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "prefix_0f" "1")
+ (set_attr "mode" "DI")
+ (set_attr "athlon_decode" "vector")])
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "shiftdi_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "shiftdi_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))]
+ ""
+ "ix86_expand_binary_operator (ASHIFTRT, DImode, operands); DONE;")
+
+(define_insn "*ashrdi3_63_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=*d,rm")
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "*a,0")
+ (match_operand:DI 2 "const_int_operand" "i,i")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && INTVAL (operands[2]) == 63
+ && (TARGET_USE_CLTD || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)"
+ "@
+ {cqto|cqo}
+ sar{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "imovx,ishift")
+ (set_attr "prefix_0f" "0,*")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "modrm" "0,1")
+ (set_attr "mode" "DI")])
+
+(define_insn "*ashrdi3_1_one_bit_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "sar{q}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:DI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrdi3_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,rm")
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "J,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)"
+ "@
+ sar{q}\t{%2, %0|%0, %2}
+ sar{q}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "DI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrdi3_one_bit_cmp_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)"
+ "sar{q}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:DI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrdi3_one_bit_cconly_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)"
+ "sar{q}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrdi3_cmp_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "DI")])
+
+(define_insn "*ashrdi3_cconly_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "DI")])
+
+(define_insn "*ashrdi3_1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "Jc")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+;; By default we don't ask for a scratch register, because when DImode
+;; values are manipulated, registers are already at a premium. But if
+;; we have one handy, we won't turn it away.
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (match_dup 3)]
+ "!TARGET_64BIT && TARGET_CMOVE"
+ [(const_int 0)]
+ "ix86_split_ashr (operands, operands[3], DImode); DONE;")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && ((optimize > 0 && flag_peephole2)
+ ? flow2_completed : reload_completed)"
+ [(const_int 0)]
+ "ix86_split_ashr (operands, NULL_RTX, DImode); DONE;")
+
+(define_insn "x86_shrd_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "+r*m,r*m")
+ (ior:SI (ashiftrt:SI (match_dup 0)
+ (match_operand:QI 2 "nonmemory_operand" "I,c"))
+ (ashift:SI (match_operand:SI 1 "register_operand" "r,r")
+ (minus:QI (const_int 32) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "@
+ shrd{l}\t{%2, %1, %0|%0, %1, %2}
+ shrd{l}\t{%s2%1, %0|%0, %1, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "prefix_0f" "1")
+ (set_attr "pent_pair" "np")
+ (set_attr "mode" "SI")])
+
+(define_expand "x86_shift_adj_3"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:SI 1 "register_operand" ""))
+ (use (match_operand:QI 2 "register_operand" ""))]
+ ""
+{
+ rtx label = gen_label_rtx ();
+ rtx tmp;
+
+ emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
+
+ tmp = gen_rtx_REG (CCZmode, FLAGS_REG);
+ tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx);
+ tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
+ JUMP_LABEL (tmp) = label;
+
+ emit_move_insn (operands[0], operands[1]);
+ emit_insn (gen_ashrsi3_31 (operands[1], operands[1], GEN_INT (31)));
+
+ emit_label (label);
+ LABEL_NUSES (label) = 1;
+
+ DONE;
+})
+
+(define_insn "ashrsi3_31"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=*d,rm")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "*a,0")
+ (match_operand:SI 2 "const_int_operand" "i,i")))
+ (clobber (reg:CC FLAGS_REG))]
+ "INTVAL (operands[2]) == 31 && (TARGET_USE_CLTD || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "@
+ {cltd|cdq}
+ sar{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "imovx,ishift")
+ (set_attr "prefix_0f" "0,*")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "modrm" "0,1")
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashrsi3_31_zext"
+ [(set (match_operand:DI 0 "register_operand" "=*d,r")
+ (zero_extend:DI (ashiftrt:SI (match_operand:SI 1 "register_operand" "*a,0")
+ (match_operand:SI 2 "const_int_operand" "i,i"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (TARGET_USE_CLTD || optimize_size)
+ && INTVAL (operands[2]) == 31
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "@
+ {cltd|cdq}
+ sar{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "imovx,ishift")
+ (set_attr "prefix_0f" "0,*")
+ (set_attr "length_immediate" "0,*")
+ (set_attr "modrm" "0,1")
+ (set_attr "mode" "SI")])
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (ASHIFTRT, SImode, operands); DONE;")
+
+(define_insn "*ashrsi3_1_one_bit"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, SImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "sar{l}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrsi3_1_one_bit_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "sar{l}\t%k0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+(define_insn "*ashrsi3_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,rm")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "@
+ sar{l}\t{%2, %0|%0, %2}
+ sar{l}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashrsi3_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "@
+ sar{l}\t{%2, %k0|%k0, %2}
+ sar{l}\t{%b2, %k0|%k0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrsi3_one_bit_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "sar{l}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrsi3_one_bit_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "sar{l}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+(define_insn "*ashrsi3_one_bit_cmp_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (ashiftrt:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)"
+ "sar{l}\t%k0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrsi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashrsi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashrsi3_cmp_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (ashiftrt:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_expand "ashrhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (ASHIFTRT, HImode, operands); DONE;")
+
+(define_insn "*ashrhi3_1_one_bit"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "sar{w}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrhi3_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,rm")
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, HImode, operands)"
+ "@
+ sar{w}\t{%2, %0|%0, %2}
+ sar{w}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "HI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrhi3_one_bit_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, HImode, operands)"
+ "sar{w}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrhi3_one_bit_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, HImode, operands)"
+ "sar{w}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrhi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "HI")])
+
+(define_insn "*ashrhi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "HI")])
+
+(define_expand "ashrqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (ASHIFTRT, QImode, operands); DONE;")
+
+(define_insn "*ashrqi3_1_one_bit"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, QImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "sar{b}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrqi3_1_one_bit_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
+ (ashiftrt:QI (match_dup 0)
+ (match_operand:QI 1 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, QImode, operands)
+ && (! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "sar{b}\t%0"
+ [(set_attr "type" "ishift1")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrqi3_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,qm")
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ASHIFTRT, QImode, operands)"
+ "@
+ sar{b}\t{%2, %0|%0, %2}
+ sar{b}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "QI")])
+
+(define_insn "*ashrqi3_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,qm"))
+ (ashiftrt:QI (match_dup 0)
+ (match_operand:QI 1 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ sar{b}\t{%1, %0|%0, %1}
+ sar{b}\t{%b1, %0|%0, %b1}"
+ [(set_attr "type" "ishift1")
+ (set_attr "mode" "QI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrqi3_one_bit_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (ashiftrt:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, QImode, operands)"
+ "sar{b}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*ashrqi3_one_bit_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (ASHIFTRT, QImode, operands)"
+ "sar{b}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*ashrqi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (ashiftrt:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, QImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "QI")])
+
+(define_insn "*ashrqi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (ASHIFTRT, QImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "sar{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "QI")])
+
+
+;; Logical shift instructions
+
+;; See comment above `ashldi3' about how this works.
+
+(define_expand "lshrti3"
+ [(parallel [(set (match_operand:TI 0 "register_operand" "")
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+{
+ if (! immediate_operand (operands[2], QImode))
+ {
+ emit_insn (gen_lshrti3_1 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ ix86_expand_binary_operator (LSHIFTRT, TImode, operands);
+ DONE;
+})
+
+(define_insn "lshrti3_1"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "c")))
+ (clobber (match_scratch:DI 3 "=&r"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+;; APPLE LOCAL begin mainline 5951842
+;; This pattern must be defined before *lshrti3_2 to prevent
+;; combine pass from converting sse2_lshrti3 to *lshrti3_2.
+
+(define_insn "sse2_lshrti3"
+ [(set (match_operand:TI 0 "register_operand" "=x")
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_0_to_255_mul_8_operand" "n")))]
+ "TARGET_SSE2"
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) / 8);
+ return "psrldq\t{%2, %0|%0, %2}";
+}
+ [(set_attr "type" "sseishft")
+ (set_attr "prefix_data16" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end mainline 5951842
+
+(define_insn "*lshrti3_2"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "0")
+ (match_operand:QI 2 "immediate_operand" "O")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "register_operand" "")))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+ "ix86_split_lshr (operands, operands[3], TImode); DONE;")
+
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (lshiftrt:TI (match_operand:TI 1 "register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+ "ix86_split_lshr (operands, NULL_RTX, TImode); DONE;")
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "shiftdi_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "shiftdi_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))]
+ ""
+ "ix86_expand_binary_operator (LSHIFTRT, DImode, operands); DONE;")
+
+(define_insn "*lshrdi3_1_one_bit_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "shr{q}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:DI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrdi3_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,rm")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "J,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "@
+ shr{q}\t{%2, %0|%0, %2}
+ shr{q}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "DI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrdi3_cmp_one_bit_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{q}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:DI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrdi3_cconly_one_bit_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{q}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrdi3_cmp_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "e"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "DI")])
+
+(define_insn "*lshrdi3_cconly_rex64"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_int_operand" "e"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 0 "=r"))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{q}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "DI")])
+
+(define_insn "*lshrdi3_1"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "nonmemory_operand" "Jc")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ "#"
+ [(set_attr "type" "multi")])
+
+;; By default we don't ask for a scratch register, because when DImode
+;; values are manipulated, registers are already at a premium. But if
+;; we have one handy, we won't turn it away.
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (match_dup 3)]
+ "!TARGET_64BIT && TARGET_CMOVE"
+ [(const_int 0)]
+ "ix86_split_lshr (operands, operands[3], DImode); DONE;")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && ((optimize > 0 && flag_peephole2)
+ ? flow2_completed : reload_completed)"
+ [(const_int 0)]
+ "ix86_split_lshr (operands, NULL_RTX, DImode); DONE;")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (LSHIFTRT, SImode, operands); DONE;")
+
+(define_insn "*lshrsi3_1_one_bit"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "shr{l}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrsi3_1_one_bit_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "0"))
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "shr{l}\t%k0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+(define_insn "*lshrsi3_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,rm")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "@
+ shr{l}\t{%2, %0|%0, %2}
+ shr{l}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*lshrsi3_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "@
+ shr{l}\t{%2, %k0|%k0, %2}
+ shr{l}\t{%b2, %k0|%k0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrsi3_one_bit_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{l}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrsi3_one_bit_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{l}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+(define_insn "*lshrsi3_cmp_one_bit_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (zero_extend:DI (match_dup 1)) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{l}\t%k0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrsi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*lshrsi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{l}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*lshrsi3_cmp_zext"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (zero_extend:DI (match_dup 1)) (match_dup 2)))]
+ "TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{l}\t{%2, %k0|%k0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "SI")])
+
+(define_expand "lshrhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (LSHIFTRT, HImode, operands); DONE;")
+
+(define_insn "*lshrhi3_1_one_bit"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "shr{w}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrhi3_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,rm")
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "@
+ shr{w}\t{%2, %0|%0, %2}
+ shr{w}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "HI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrhi3_one_bit_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{w}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrhi3_one_bit_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)"
+ "shr{w}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrhi3_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:HI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "HI")])
+
+(define_insn "*lshrhi3_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 0 "=r"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{w}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "HI")])
+
+(define_expand "lshrqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (LSHIFTRT, QImode, operands); DONE;")
+
+(define_insn "*lshrqi3_1_one_bit"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (LSHIFTRT, QImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "shr{b}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrqi3_1_one_bit_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
+ (lshiftrt:QI (match_dup 0)
+ (match_operand:QI 1 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "shr{b}\t%0"
+ [(set_attr "type" "ishift1")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrqi3_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,qm")
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (LSHIFTRT, QImode, operands)"
+ "@
+ shr{b}\t{%2, %0|%0, %2}
+ shr{b}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "QI")])
+
+(define_insn "*lshrqi3_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,qm"))
+ (lshiftrt:QI (match_dup 0)
+ (match_operand:QI 1 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ shr{b}\t{%1, %0|%0, %1}
+ shr{b}\t{%b1, %0|%0, %b1}"
+ [(set_attr "type" "ishift1")
+ (set_attr "mode" "QI")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrqi2_one_bit_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (lshiftrt:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, QImode, operands)"
+ "shr{b}\t%0"
+ [(set_attr "type" "ishift")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*lshrqi2_one_bit_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && (TARGET_SHIFT1 || optimize_size)
+ && ix86_binary_operator_ok (LSHIFTRT, QImode, operands)"
+ "shr{b}\t%0"
+ [(set_attr "type" "ishift")
+ (set_attr "length" "2")])
+
+;; This pattern can't accept a variable shift count, since shifts by
+;; zero don't affect the flags. We assume that shifts by constant
+;; zero are optimized away.
+(define_insn "*lshrqi2_cmp"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (lshiftrt:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, QImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "QI")])
+
+(define_insn "*lshrqi2_cconly"
+ [(set (reg FLAGS_REG)
+ (compare
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 0 "=q"))]
+ "ix86_match_ccmode (insn, CCGOCmode)
+ && ix86_binary_operator_ok (LSHIFTRT, QImode, operands)
+ && (optimize_size
+ || !TARGET_PARTIAL_FLAG_REG_STALL)"
+ "shr{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ishift")
+ (set_attr "mode" "QI")])
+
+;; Rotate instructions
+
+(define_expand "rotldi3"
+ [(set (match_operand:DI 0 "shiftdi_operand" "")
+ (rotate:DI (match_operand:DI 1 "shiftdi_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+{
+ if (TARGET_64BIT)
+ {
+ ix86_expand_binary_operator (ROTATE, DImode, operands);
+ DONE;
+ }
+ if (!const_1_to_31_operand (operands[2], VOIDmode))
+ FAIL;
+ emit_insn (gen_ix86_rotldi3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; Implement rotation using two double-precision shift instructions
+;; and a scratch register.
+(define_insn_and_split "ix86_rotldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (rotate:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "!TARGET_64BIT"
+ ""
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (parallel
+ [(set (match_dup 4)
+ (ior:SI (ashift:SI (match_dup 4) (match_dup 2))
+ (lshiftrt:SI (match_dup 5)
+ (minus:QI (const_int 32) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel
+ [(set (match_dup 5)
+ (ior:SI (ashift:SI (match_dup 5) (match_dup 2))
+ (lshiftrt:SI (match_dup 3)
+ (minus:QI (const_int 32) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_di (operands, 1, operands + 4, operands + 5);")
+
+(define_insn "*rotlsi3_1_one_bit_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (rotate:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATE, DImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "rol{q}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand:DI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotldi3_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,rm")
+ (rotate:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "e,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATE, DImode, operands)"
+ "@
+ rol{q}\t{%2, %0|%0, %2}
+ rol{q}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "DI")])
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (rotate:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (ROTATE, SImode, operands); DONE;")
+
+(define_insn "*rotlsi3_1_one_bit"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (rotate:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATE, SImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "rol{l}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotlsi3_1_one_bit_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATE, SImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "rol{l}\t%k0"
+ [(set_attr "type" "rotate")
+ (set_attr "length" "2")])
+
+(define_insn "*rotlsi3_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,rm")
+ (rotate:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATE, SImode, operands)"
+ "@
+ rol{l}\t{%2, %0|%0, %2}
+ rol{l}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rotlsi3_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI
+ (rotate:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATE, SImode, operands)"
+ "@
+ rol{l}\t{%2, %k0|%k0, %2}
+ rol{l}\t{%b2, %k0|%k0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "SI")])
+
+(define_expand "rotlhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (rotate:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (ROTATE, HImode, operands); DONE;")
+
+(define_insn "*rotlhi3_1_one_bit"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (rotate:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATE, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "rol{w}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotlhi3_1"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,rm")
+ (rotate:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATE, HImode, operands)"
+ "@
+ rol{w}\t{%2, %0|%0, %2}
+ rol{w}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "HI")])
+
+(define_expand "rotlqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (rotate:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (ROTATE, QImode, operands); DONE;")
+
+(define_insn "*rotlqi3_1_one_bit_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
+ (rotate:QI (match_dup 0)
+ (match_operand:QI 1 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "rol{b}\t%0"
+ [(set_attr "type" "rotate1")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotlqi3_1_one_bit"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (rotate:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATE, QImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "rol{b}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotlqi3_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,qm"))
+ (rotate:QI (match_dup 0)
+ (match_operand:QI 1 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ rol{b}\t{%1, %0|%0, %1}
+ rol{b}\t{%b1, %0|%0, %b1}"
+ [(set_attr "type" "rotate1")
+ (set_attr "mode" "QI")])
+
+(define_insn "*rotlqi3_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,qm")
+ (rotate:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATE, QImode, operands)"
+ "@
+ rol{b}\t{%2, %0|%0, %2}
+ rol{b}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "QI")])
+
+(define_expand "rotrdi3"
+ [(set (match_operand:DI 0 "shiftdi_operand" "")
+ (rotate:DI (match_operand:DI 1 "shiftdi_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+{
+ if (TARGET_64BIT)
+ {
+ ix86_expand_binary_operator (ROTATERT, DImode, operands);
+ DONE;
+ }
+ if (!const_1_to_31_operand (operands[2], VOIDmode))
+ FAIL;
+ emit_insn (gen_ix86_rotrdi3 (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; Implement rotation using two double-precision shift instructions
+;; and a scratch register.
+(define_insn_and_split "ix86_rotrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (rotatert:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:QI 2 "const_1_to_31_operand" "I")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "!TARGET_64BIT"
+ ""
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (parallel
+ [(set (match_dup 4)
+ (ior:SI (ashiftrt:SI (match_dup 4) (match_dup 2))
+ (ashift:SI (match_dup 5)
+ (minus:QI (const_int 32) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel
+ [(set (match_dup 5)
+ (ior:SI (ashiftrt:SI (match_dup 5) (match_dup 2))
+ (ashift:SI (match_dup 3)
+ (minus:QI (const_int 32) (match_dup 2)))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "split_di (operands, 1, operands + 4, operands + 5);")
+
+(define_insn "*rotrdi3_1_one_bit_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (rotatert:DI (match_operand:DI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, DImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "ror{q}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand:DI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotrdi3_1_rex64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,rm")
+ (rotatert:DI (match_operand:DI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "J,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, DImode, operands)"
+ "@
+ ror{q}\t{%2, %0|%0, %2}
+ ror{q}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "DI")])
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (rotatert:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "ix86_expand_binary_operator (ROTATERT, SImode, operands); DONE;")
+
+(define_insn "*rotrsi3_1_one_bit"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (rotatert:SI (match_operand:SI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATERT, SImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "ror{l}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotrsi3_1_one_bit_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:QI 2 "const1_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, SImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "ror{l}\t%k0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand:SI 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotrsi3_1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,rm")
+ (rotatert:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATERT, SImode, operands)"
+ "@
+ ror{l}\t{%2, %0|%0, %2}
+ ror{l}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rotrsi3_1_zext"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, SImode, operands)"
+ "@
+ ror{l}\t{%2, %k0|%k0, %2}
+ ror{l}\t{%b2, %k0|%k0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "SI")])
+
+(define_expand "rotrhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (rotatert:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_HIMODE_MATH"
+ "ix86_expand_binary_operator (ROTATERT, HImode, operands); DONE;")
+
+(define_insn "*rotrhi3_one_bit"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (rotatert:HI (match_operand:HI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATERT, HImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "ror{w}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotrhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,rm")
+ (rotatert:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATERT, HImode, operands)"
+ "@
+ ror{w}\t{%2, %0|%0, %2}
+ ror{w}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "HI")])
+
+(define_expand "rotrqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (rotatert:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_QIMODE_MATH"
+ "ix86_expand_binary_operator (ROTATERT, QImode, operands); DONE;")
+
+(define_insn "*rotrqi3_1_one_bit"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (rotatert:QI (match_operand:QI 1 "nonimmediate_operand" "0")
+ (match_operand:QI 2 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATERT, QImode, operands)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "ror{b}\t%0"
+ [(set_attr "type" "rotate")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotrqi3_1_one_bit_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
+ (rotatert:QI (match_dup 0)
+ (match_operand:QI 1 "const1_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (TARGET_SHIFT1 || optimize_size)"
+ "ror{b}\t%0"
+ [(set_attr "type" "rotate1")
+ (set (attr "length")
+ (if_then_else (match_operand 0 "register_operand" "")
+ (const_string "2")
+ (const_string "*")))])
+
+(define_insn "*rotrqi3_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,qm")
+ (rotatert:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "ix86_binary_operator_ok (ROTATERT, QImode, operands)"
+ "@
+ ror{b}\t{%2, %0|%0, %2}
+ ror{b}\t{%b2, %0|%0, %b2}"
+ [(set_attr "type" "rotate")
+ (set_attr "mode" "QI")])
+
+(define_insn "*rotrqi3_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,qm"))
+ (rotatert:QI (match_dup 0)
+ (match_operand:QI 1 "nonmemory_operand" "I,c")))
+ (clobber (reg:CC FLAGS_REG))]
+ "(! TARGET_PARTIAL_REG_STALL || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ ror{b}\t{%1, %0|%0, %1}
+ ror{b}\t{%b1, %0|%0, %b1}"
+ [(set_attr "type" "rotate1")
+ (set_attr "mode" "QI")])
+
+;; Bit set / bit test instructions
+
+(define_expand "extv"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const8_operand" "")
+ (match_operand:SI 3 "const8_operand" "")))]
+ ""
+{
+ /* Handle extractions from %ah et al. */
+ if (INTVAL (operands[2]) != 8 || INTVAL (operands[3]) != 8)
+ FAIL;
+
+ /* From mips.md: extract_bit_field doesn't verify that our source
+ matches the predicate, so check it again here. */
+ if (! ext_register_operand (operands[1], VOIDmode))
+ FAIL;
+})
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extract:SI (match_operand 1 "ext_register_operand" "")
+ (match_operand:SI 2 "const8_operand" "")
+ (match_operand:SI 3 "const8_operand" "")))]
+ ""
+{
+ /* Handle extractions from %ah et al. */
+ if (INTVAL (operands[2]) != 8 || INTVAL (operands[3]) != 8)
+ FAIL;
+
+ /* From mips.md: extract_bit_field doesn't verify that our source
+ matches the predicate, so check it again here. */
+ if (! ext_register_operand (operands[1], VOIDmode))
+ FAIL;
+})
+
+(define_expand "insv"
+ [(set (zero_extract (match_operand 0 "ext_register_operand" "")
+ (match_operand 1 "const8_operand" "")
+ (match_operand 2 "const8_operand" ""))
+ (match_operand 3 "register_operand" ""))]
+ ""
+{
+ /* Handle insertions to %ah et al. */
+ if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) != 8)
+ FAIL;
+
+ /* From mips.md: insert_bit_field doesn't verify that our source
+ matches the predicate, so check it again here. */
+ if (! ext_register_operand (operands[0], VOIDmode))
+ FAIL;
+
+ if (TARGET_64BIT)
+ emit_insn (gen_movdi_insv_1_rex64 (operands[0], operands[3]));
+ else
+ emit_insn (gen_movsi_insv_1 (operands[0], operands[3]));
+
+ DONE;
+})
+
+;; %%% bts, btr, btc, bt.
+;; In general these instructions are *slow* when applied to memory,
+;; since they enforce atomic operation. When applied to registers,
+;; it depends on the cpu implementation. They're never faster than
+;; the corresponding and/ior/xor operations, so with 32-bit there's
+;; no point. But in 64-bit, we can't hold the relevant immediates
+;; within the instruction itself, so operating on bits in the high
+;; 32-bits of a register becomes easier.
+;;
+;; These are slow on Nocona, but fast on Athlon64. We do require the use
+;; of btrq and btcq for corner cases of post-reload expansion of absdf and
+;; negdf respectively, so they can never be disabled entirely.
+
+(define_insn "*btsq"
+ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:DI 1 "const_0_to_63_operand" ""))
+ (const_int 1))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (TARGET_USE_BT || reload_completed)"
+ "bts{q} %1,%0"
+ [(set_attr "type" "alu1")])
+
+(define_insn "*btrq"
+ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:DI 1 "const_0_to_63_operand" ""))
+ (const_int 0))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (TARGET_USE_BT || reload_completed)"
+ "btr{q} %1,%0"
+ [(set_attr "type" "alu1")])
+
+(define_insn "*btcq"
+ [(set (zero_extract:DI (match_operand:DI 0 "register_operand" "+r")
+ (const_int 1)
+ (match_operand:DI 1 "const_0_to_63_operand" ""))
+ (not:DI (zero_extract:DI (match_dup 0) (const_int 1) (match_dup 1))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && (TARGET_USE_BT || reload_completed)"
+ "btc{q} %1,%0"
+ [(set_attr "type" "alu1")])
+
+;; Allow Nocona to avoid these instructions if a register is available.
+
+(define_peephole2
+ [(match_scratch:DI 2 "r")
+ (parallel [(set (zero_extract:DI
+ (match_operand:DI 0 "register_operand" "")
+ (const_int 1)
+ (match_operand:DI 1 "const_0_to_63_operand" ""))
+ (const_int 1))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT && !TARGET_USE_BT"
+ [(const_int 0)]
+{
+ HOST_WIDE_INT i = INTVAL (operands[1]), hi, lo;
+ rtx op1;
+
+ if (HOST_BITS_PER_WIDE_INT >= 64)
+ lo = (HOST_WIDE_INT)1 << i, hi = 0;
+ else if (i < HOST_BITS_PER_WIDE_INT)
+ lo = (HOST_WIDE_INT)1 << i, hi = 0;
+ else
+ lo = 0, hi = (HOST_WIDE_INT)1 << (i - HOST_BITS_PER_WIDE_INT);
+
+ op1 = immed_double_const (lo, hi, DImode);
+ if (i >= 31)
+ {
+ emit_move_insn (operands[2], op1);
+ op1 = operands[2];
+ }
+
+ emit_insn (gen_iordi3 (operands[0], operands[0], op1));
+ DONE;
+})
+
+(define_peephole2
+ [(match_scratch:DI 2 "r")
+ (parallel [(set (zero_extract:DI
+ (match_operand:DI 0 "register_operand" "")
+ (const_int 1)
+ (match_operand:DI 1 "const_0_to_63_operand" ""))
+ (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT && !TARGET_USE_BT"
+ [(const_int 0)]
+{
+ HOST_WIDE_INT i = INTVAL (operands[1]), hi, lo;
+ rtx op1;
+
+ if (HOST_BITS_PER_WIDE_INT >= 64)
+ lo = (HOST_WIDE_INT)1 << i, hi = 0;
+ else if (i < HOST_BITS_PER_WIDE_INT)
+ lo = (HOST_WIDE_INT)1 << i, hi = 0;
+ else
+ lo = 0, hi = (HOST_WIDE_INT)1 << (i - HOST_BITS_PER_WIDE_INT);
+
+ op1 = immed_double_const (~lo, ~hi, DImode);
+ if (i >= 32)
+ {
+ emit_move_insn (operands[2], op1);
+ op1 = operands[2];
+ }
+
+ emit_insn (gen_anddi3 (operands[0], operands[0], op1));
+ DONE;
+})
+
+(define_peephole2
+ [(match_scratch:DI 2 "r")
+ (parallel [(set (zero_extract:DI
+ (match_operand:DI 0 "register_operand" "")
+ (const_int 1)
+ (match_operand:DI 1 "const_0_to_63_operand" ""))
+ (not:DI (zero_extract:DI
+ (match_dup 0) (const_int 1) (match_dup 1))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT && !TARGET_USE_BT"
+ [(const_int 0)]
+{
+ HOST_WIDE_INT i = INTVAL (operands[1]), hi, lo;
+ rtx op1;
+
+ if (HOST_BITS_PER_WIDE_INT >= 64)
+ lo = (HOST_WIDE_INT)1 << i, hi = 0;
+ else if (i < HOST_BITS_PER_WIDE_INT)
+ lo = (HOST_WIDE_INT)1 << i, hi = 0;
+ else
+ lo = 0, hi = (HOST_WIDE_INT)1 << (i - HOST_BITS_PER_WIDE_INT);
+
+ op1 = immed_double_const (lo, hi, DImode);
+ if (i >= 31)
+ {
+ emit_move_insn (operands[2], op1);
+ op1 = operands[2];
+ }
+
+ emit_insn (gen_xordi3 (operands[0], operands[0], op1));
+ DONE;
+})
+
+;; Store-flag instructions.
+
+;; For all sCOND expanders, also expand the compare or test insn that
+;; generates cc0. Generate an equality comparison if `seq' or `sne'.
+
+;; %%% Do the expansion to SImode. If PII, do things the xor+setcc way
+;; to avoid partial register stalls. Otherwise do things the setcc+movzx
+;; way, which can later delete the movzx if only QImode is needed.
+
+(define_expand "seq"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (eq:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (EQ, operands[0])) DONE; else FAIL;")
+
+(define_expand "sne"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ne:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (NE, operands[0])) DONE; else FAIL;")
+
+(define_expand "sgt"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (gt:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (GT, operands[0])) DONE; else FAIL;")
+
+(define_expand "sgtu"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (gtu:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (GTU, operands[0])) DONE; else FAIL;")
+
+(define_expand "slt"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (lt:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (LT, operands[0])) DONE; else FAIL;")
+
+(define_expand "sltu"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ltu:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (LTU, operands[0])) DONE; else FAIL;")
+
+(define_expand "sge"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ge:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (GE, operands[0])) DONE; else FAIL;")
+
+(define_expand "sgeu"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (geu:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (GEU, operands[0])) DONE; else FAIL;")
+
+(define_expand "sle"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (le:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (LE, operands[0])) DONE; else FAIL;")
+
+(define_expand "sleu"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (leu:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ ""
+ "if (ix86_expand_setcc (LEU, operands[0])) DONE; else FAIL;")
+
+(define_expand "sunordered"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (unordered:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (UNORDERED, operands[0])) DONE; else FAIL;")
+
+(define_expand "sordered"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ordered:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387"
+ "if (ix86_expand_setcc (ORDERED, operands[0])) DONE; else FAIL;")
+
+(define_expand "suneq"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (uneq:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (UNEQ, operands[0])) DONE; else FAIL;")
+
+(define_expand "sunge"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (unge:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (UNGE, operands[0])) DONE; else FAIL;")
+
+(define_expand "sungt"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ungt:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (UNGT, operands[0])) DONE; else FAIL;")
+
+(define_expand "sunle"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (unle:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (UNLE, operands[0])) DONE; else FAIL;")
+
+(define_expand "sunlt"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (unlt:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (UNLT, operands[0])) DONE; else FAIL;")
+
+(define_expand "sltgt"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ltgt:QI (reg:CC FLAGS_REG) (const_int 0)))]
+ "TARGET_80387 || TARGET_SSE"
+ "if (ix86_expand_setcc (LTGT, operands[0])) DONE; else FAIL;")
+
+(define_insn "*setcc_1"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (match_operator:QI 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)]))]
+ ""
+ "set%C1\t%0"
+ [(set_attr "type" "setcc")
+ (set_attr "mode" "QI")])
+
+(define_insn "*setcc_2"
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm"))
+ (match_operator:QI 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)]))]
+ ""
+ "set%C1\t%0"
+ [(set_attr "type" "setcc")
+ (set_attr "mode" "QI")])
+
+;; In general it is not safe to assume too much about CCmode registers,
+;; so simplify-rtx stops when it sees a second one. Under certain
+;; conditions this is safe on x86, so help combine not create
+;;
+;; seta %al
+;; testb %al, %al
+;; sete %al
+
+(define_split
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (ne:QI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (const_int 0)))]
+ ""
+ [(set (match_dup 0) (match_dup 1))]
+{
+ PUT_MODE (operands[1], QImode);
+})
+
+(define_split
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" ""))
+ (ne:QI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (const_int 0)))]
+ ""
+ [(set (match_dup 0) (match_dup 1))]
+{
+ PUT_MODE (operands[1], QImode);
+})
+
+(define_split
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (eq:QI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (const_int 0)))]
+ ""
+ [(set (match_dup 0) (match_dup 1))]
+{
+ rtx new_op1 = copy_rtx (operands[1]);
+ operands[1] = new_op1;
+ PUT_MODE (new_op1, QImode);
+ PUT_CODE (new_op1, ix86_reverse_condition (GET_CODE (new_op1),
+ GET_MODE (XEXP (new_op1, 0))));
+
+ /* Make sure that (a) the CCmode we have for the flags is strong
+ enough for the reversed compare or (b) we have a valid FP compare. */
+ if (! ix86_comparison_operator (new_op1, VOIDmode))
+ FAIL;
+})
+
+(define_split
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" ""))
+ (eq:QI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (const_int 0)))]
+ ""
+ [(set (match_dup 0) (match_dup 1))]
+{
+ rtx new_op1 = copy_rtx (operands[1]);
+ operands[1] = new_op1;
+ PUT_MODE (new_op1, QImode);
+ PUT_CODE (new_op1, ix86_reverse_condition (GET_CODE (new_op1),
+ GET_MODE (XEXP (new_op1, 0))));
+
+ /* Make sure that (a) the CCmode we have for the flags is strong
+ enough for the reversed compare or (b) we have a valid FP compare. */
+ if (! ix86_comparison_operator (new_op1, VOIDmode))
+ FAIL;
+})
+
+;; The SSE store flag instructions saves 0 or 0xffffffff to the result.
+;; subsequent logical operations are used to imitate conditional moves.
+;; 0xffffffff is NaN, but not in normalized form, so we can't represent
+;; it directly.
+
+(define_insn "*sse_setccsf"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (match_operator:SF 1 "sse_comparison_operator"
+ [(match_operand:SF 2 "register_operand" "0")
+ (match_operand:SF 3 "nonimmediate_operand" "xm")]))]
+ "TARGET_SSE"
+ "cmp%D1ss\t{%3, %0|%0, %3}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "SF")])
+
+(define_insn "*sse_setccdf"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (match_operator:DF 1 "sse_comparison_operator"
+ [(match_operand:DF 2 "register_operand" "0")
+ (match_operand:DF 3 "nonimmediate_operand" "xm")]))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2"
+ "cmp%D1sd\t{%3, %0|%0, %3}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "DF")])
+
+;; Basic conditional jump instructions.
+;; We ignore the overflow flag for signed branch instructions.
+
+;; For all bCOND expanders, also expand the compare or test insn that
+;; generates reg FLAGS_REG. Generate an equality comparison if `beq' or `bne'.
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (EQ, operands[0]); DONE;")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (NE, operands[0]); DONE;")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (GT, operands[0]); DONE;")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (GTU, operands[0]); DONE;")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (LT, operands[0]); DONE;")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (LTU, operands[0]); DONE;")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (GE, operands[0]); DONE;")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (GEU, operands[0]); DONE;")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (LE, operands[0]); DONE;")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "ix86_expand_branch (LEU, operands[0]); DONE;")
+
+(define_expand "bunordered"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (UNORDERED, operands[0]); DONE;")
+
+(define_expand "bordered"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (ORDERED, operands[0]); DONE;")
+
+(define_expand "buneq"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (UNEQ, operands[0]); DONE;")
+
+(define_expand "bunge"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (UNGE, operands[0]); DONE;")
+
+(define_expand "bungt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (UNGT, operands[0]); DONE;")
+
+(define_expand "bunle"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (UNLE, operands[0]); DONE;")
+
+(define_expand "bunlt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (UNLT, operands[0]); DONE;")
+
+(define_expand "bltgt"
+ [(set (pc)
+ (if_then_else (match_dup 1)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "ix86_expand_branch (LTGT, operands[0]); DONE;")
+
+(define_insn "*jcc_1"
+ [(set (pc)
+ (if_then_else (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "%+j%C1\t%l0"
+ [(set_attr "type" "ibr")
+ (set_attr "modrm" "0")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc))
+ (const_int -126))
+ (lt (minus (match_dup 0) (pc))
+ (const_int 128)))
+ (const_int 2)
+ (const_int 6)))])
+
+(define_insn "*jcc_2"
+ [(set (pc)
+ (if_then_else (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "%+j%c1\t%l0"
+ [(set_attr "type" "ibr")
+ (set_attr "modrm" "0")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc))
+ (const_int -126))
+ (lt (minus (match_dup 0) (pc))
+ (const_int 128)))
+ (const_int 2)
+ (const_int 6)))])
+
+;; In general it is not safe to assume too much about CCmode registers,
+;; so simplify-rtx stops when it sees a second one. Under certain
+;; conditions this is safe on x86, so help combine not create
+;;
+;; seta %al
+;; testb %al, %al
+;; je Lfoo
+
+(define_split
+ [(set (pc)
+ (if_then_else (ne (match_operator 0 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc)
+ (if_then_else (match_dup 0)
+ (label_ref (match_dup 1))
+ (pc)))]
+{
+ PUT_MODE (operands[0], VOIDmode);
+})
+
+(define_split
+ [(set (pc)
+ (if_then_else (eq (match_operator 0 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc)
+ (if_then_else (match_dup 0)
+ (label_ref (match_dup 1))
+ (pc)))]
+{
+ rtx new_op0 = copy_rtx (operands[0]);
+ operands[0] = new_op0;
+ PUT_MODE (new_op0, VOIDmode);
+ PUT_CODE (new_op0, ix86_reverse_condition (GET_CODE (new_op0),
+ GET_MODE (XEXP (new_op0, 0))));
+
+ /* Make sure that (a) the CCmode we have for the flags is strong
+ enough for the reversed compare or (b) we have a valid FP compare. */
+ if (! ix86_comparison_operator (new_op0, VOIDmode))
+ FAIL;
+})
+
+;; Define combination compare-and-branch fp compare instructions to use
+;; during early optimization. Splitting the operation apart early makes
+;; for bad code when we want to reverse the operation.
+
+(define_insn "*fp_jcc_1_mixed"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f,x")
+ (match_operand 2 "nonimmediate_operand" "f,xm")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "TARGET_MIX_SSE_I387
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_1_sse"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "x")
+ (match_operand 2 "nonimmediate_operand" "xm")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "TARGET_SSE_MATH
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_1_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "register_operand" "f")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "TARGET_CMOVE && TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_2_mixed"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f,x")
+ (match_operand 2 "nonimmediate_operand" "f,xm")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "TARGET_MIX_SSE_I387
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_2_sse"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "x")
+ (match_operand 2 "nonimmediate_operand" "xm")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "TARGET_SSE_MATH
+ && SSE_FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_2_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "register_operand" "f")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "TARGET_CMOVE && TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_3_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "nonimmediate_operand" "fm")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 4 "=a"))]
+ "TARGET_80387
+ && (GET_MODE (operands[1]) == SFmode || GET_MODE (operands[1]) == DFmode)
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && !ix86_use_fcomi_compare (GET_CODE (operands[0]))
+ && SELECT_CC_MODE (GET_CODE (operands[0]),
+ operands[1], operands[2]) == CCFPmode
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_4_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "nonimmediate_operand" "fm")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 4 "=a"))]
+ "TARGET_80387
+ && (GET_MODE (operands[1]) == SFmode || GET_MODE (operands[1]) == DFmode)
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && !ix86_use_fcomi_compare (GET_CODE (operands[0]))
+ && SELECT_CC_MODE (GET_CODE (operands[0]),
+ operands[1], operands[2]) == CCFPmode
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_5_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "register_operand" "f")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 4 "=a"))]
+ "TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_6_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "register_operand" "f")])
+ (pc)
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 4 "=a"))]
+ "TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+(define_insn "*fp_jcc_7_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "f")
+ (match_operand 2 "const0_operand" "X")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 4 "=a"))]
+ "TARGET_80387
+ && FLOAT_MODE_P (GET_MODE (operands[1]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[2])
+ && !ix86_use_fcomi_compare (GET_CODE (operands[0]))
+ && SELECT_CC_MODE (GET_CODE (operands[0]),
+ operands[1], operands[2]) == CCFPmode
+ && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))"
+ "#")
+
+;; The order of operands in *fp_jcc_8_387 is forced by combine in
+;; simplify_comparison () function. Float operator is treated as RTX_OBJ
+;; with a precedence over other operators and is always put in the first
+;; place. Swap condition and operands to match ficom instruction.
+
+(define_insn "*fp_jcc_8<mode>_387"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operator 1 "float_operator"
+ [(match_operand:X87MODEI12 2 "nonimmediate_operand" "m,?r")])
+ (match_operand 3 "register_operand" "f,f")])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 5 "=a,a"))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP
+ && FLOAT_MODE_P (GET_MODE (operands[3]))
+ && GET_MODE (operands[1]) == GET_MODE (operands[3])
+ && !ix86_use_fcomi_compare (swap_condition (GET_CODE (operands[0])))
+ && ix86_fp_compare_mode (swap_condition (GET_CODE (operands[0]))) == CCFPmode
+ && ix86_fp_jump_nontrivial_p (swap_condition (GET_CODE (operands[0])))"
+ "#")
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operand 2 "nonimmediate_operand" "")])
+ (match_operand 3 "" "")
+ (match_operand 4 "" "")))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ ix86_split_fp_branch (GET_CODE (operands[0]), operands[1], operands[2],
+ operands[3], operands[4], NULL_RTX, NULL_RTX);
+ DONE;
+})
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operand 2 "general_operand" "")])
+ (match_operand 3 "" "")
+ (match_operand 4 "" "")))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 5 "=a"))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ ix86_split_fp_branch (GET_CODE (operands[0]), operands[1], operands[2],
+ operands[3], operands[4], operands[5], NULL_RTX);
+ DONE;
+})
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operator 1 "float_operator"
+ [(match_operand:X87MODEI12 2 "memory_operand" "")])
+ (match_operand 3 "register_operand" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 6 "=a"))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ operands[7] = gen_rtx_FLOAT (GET_MODE (operands[1]), operands[2]);
+ ix86_split_fp_branch (swap_condition (GET_CODE (operands[0])),
+ operands[3], operands[7],
+ operands[4], operands[5], operands[6], NULL_RTX);
+ DONE;
+})
+
+;; %%% Kill this when reload knows how to do it.
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operator 1 "float_operator"
+ [(match_operand:X87MODEI12 2 "register_operand" "")])
+ (match_operand 3 "register_operand" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CCFP FPSR_REG))
+ (clobber (reg:CCFP FLAGS_REG))
+ (clobber (match_scratch:HI 6 "=a"))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ operands[7] = ix86_force_to_memory (GET_MODE (operands[2]), operands[2]);
+ operands[7] = gen_rtx_FLOAT (GET_MODE (operands[1]), operands[7]);
+ ix86_split_fp_branch (swap_condition (GET_CODE (operands[0])),
+ operands[3], operands[7],
+ operands[4], operands[5], operands[6], operands[2]);
+ DONE;
+})
+
+;; Unconditional and other jump instructions
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jmp\t%l0"
+ [(set_attr "type" "ibr")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc))
+ (const_int -126))
+ (lt (minus (match_dup 0) (pc))
+ (const_int 128)))
+ (const_int 2)
+ (const_int 5)))
+ (set_attr "modrm" "0")])
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "nonimmediate_operand" "rm"))]
+ ""
+ "")
+
+(define_insn "*indirect_jump"
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))]
+ "!TARGET_64BIT"
+ "jmp\t%A0"
+ [(set_attr "type" "ibr")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*indirect_jump_rtx64"
+ [(set (pc) (match_operand:DI 0 "nonimmediate_operand" "rm"))]
+ "TARGET_64BIT"
+ "jmp\t%A0"
+ [(set_attr "type" "ibr")
+ (set_attr "length_immediate" "0")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand 0 "nonimmediate_operand" "rm"))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+{
+ /* In PIC mode, the table entries are stored GOT (32-bit) or PC (64-bit)
+ relative. Convert the relative address to an absolute address. */
+ if (flag_pic)
+ {
+ rtx op0, op1;
+ enum rtx_code code;
+
+ if (TARGET_64BIT)
+ {
+ code = PLUS;
+ op0 = operands[0];
+ op1 = gen_rtx_LABEL_REF (Pmode, operands[1]);
+ }
+ else if (TARGET_MACHO || HAVE_AS_GOTOFF_IN_DATA)
+ {
+ code = PLUS;
+ op0 = operands[0];
+ op1 = pic_offset_table_rtx;
+ }
+ else
+ {
+ code = MINUS;
+ op0 = pic_offset_table_rtx;
+ op1 = operands[0];
+ }
+
+ operands[0] = expand_simple_binop (Pmode, code, op0, op1, NULL_RTX, 0,
+ OPTAB_DIRECT);
+ }
+})
+
+(define_insn "*tablejump_1"
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "!TARGET_64BIT"
+ "jmp\t%A0"
+ [(set_attr "type" "ibr")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*tablejump_1_rtx64"
+ [(set (pc) (match_operand:DI 0 "nonimmediate_operand" "rm"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "TARGET_64BIT"
+ "jmp\t%A0"
+ [(set_attr "type" "ibr")
+ (set_attr "length_immediate" "0")])
+
+;; Convert setcc + movzbl to xor + setcc if operands don't overlap.
+
+(define_peephole2
+ [(set (reg FLAGS_REG) (match_operand 0 "" ""))
+ (set (match_operand:QI 1 "register_operand" "")
+ (match_operator:QI 2 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)]))
+ (set (match_operand 3 "q_regs_operand" "")
+ (zero_extend (match_dup 1)))]
+ "(peep2_reg_dead_p (3, operands[1])
+ || operands_match_p (operands[1], operands[3]))
+ && ! reg_overlap_mentioned_p (operands[3], operands[0])"
+ [(set (match_dup 4) (match_dup 0))
+ (set (strict_low_part (match_dup 5))
+ (match_dup 2))]
+{
+ operands[4] = gen_rtx_REG (GET_MODE (operands[0]), FLAGS_REG);
+ operands[5] = gen_lowpart (QImode, operands[3]);
+ ix86_expand_clear (operands[3]);
+})
+
+;; Similar, but match zero_extendhisi2_and, which adds a clobber.
+
+(define_peephole2
+ [(set (reg FLAGS_REG) (match_operand 0 "" ""))
+ (set (match_operand:QI 1 "register_operand" "")
+ (match_operator:QI 2 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)]))
+ (parallel [(set (match_operand 3 "q_regs_operand" "")
+ (zero_extend (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "(peep2_reg_dead_p (3, operands[1])
+ || operands_match_p (operands[1], operands[3]))
+ && ! reg_overlap_mentioned_p (operands[3], operands[0])"
+ [(set (match_dup 4) (match_dup 0))
+ (set (strict_low_part (match_dup 5))
+ (match_dup 2))]
+{
+ operands[4] = gen_rtx_REG (GET_MODE (operands[0]), FLAGS_REG);
+ operands[5] = gen_lowpart (QImode, operands[3]);
+ ix86_expand_clear (operands[3]);
+})
+
+;; Call instructions.
+
+;; The predicates normally associated with named expanders are not properly
+;; checked for calls. This is a bug in the generic code, but it isn't that
+;; easy to fix. Ignore it for now and be prepared to fix things up.
+
+;; Call subroutine returning no value.
+
+(define_expand "call_pop"
+ [(parallel [(call (match_operand:QI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 3 "" "")))])]
+ "!TARGET_64BIT"
+{
+ ix86_expand_call (NULL, operands[0], operands[1], operands[2], operands[3], 0);
+ DONE;
+})
+
+(define_insn "*call_pop_0"
+ [(call (mem:QI (match_operand:SI 0 "constant_call_address_operand" ""))
+ (match_operand:SI 1 "" ""))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 2 "immediate_operand" "")))]
+ "!TARGET_64BIT"
+{
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P0";
+ else
+ return "call\t%P0";
+}
+ [(set_attr "type" "call")])
+
+(define_insn "*call_pop_1"
+ [(call (mem:QI (match_operand:SI 0 "call_insn_operand" "rsm"))
+ (match_operand:SI 1 "" ""))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "!TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[0], Pmode))
+ {
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P0";
+ else
+ return "call\t%P0";
+ }
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%A0";
+ else
+ return "call\t%A0";
+}
+ [(set_attr "type" "call")])
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))]
+ ""
+{
+ ix86_expand_call (NULL, operands[0], operands[1], operands[2], NULL, 0);
+ DONE;
+})
+
+(define_expand "sibcall"
+ [(call (match_operand:QI 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))]
+ ""
+{
+ ix86_expand_call (NULL, operands[0], operands[1], operands[2], NULL, 1);
+ DONE;
+})
+
+(define_insn "*call_0"
+ [(call (mem:QI (match_operand 0 "constant_call_address_operand" ""))
+ (match_operand 1 "" ""))]
+ ""
+{
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P0";
+ else
+ return "call\t%P0";
+}
+ [(set_attr "type" "call")])
+
+(define_insn "*call_1"
+ [(call (mem:QI (match_operand:SI 0 "call_insn_operand" "rsm"))
+ (match_operand 1 "" ""))]
+ "!SIBLING_CALL_P (insn) && !TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[0], Pmode))
+ return "call\t%P0";
+ return "call\t%A0";
+}
+ [(set_attr "type" "call")])
+
+(define_insn "*sibcall_1"
+ [(call (mem:QI (match_operand:SI 0 "sibcall_insn_operand" "s,c,d,a"))
+ (match_operand 1 "" ""))]
+ "SIBLING_CALL_P (insn) && !TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[0], Pmode))
+ return "jmp\t%P0";
+ return "jmp\t%A0";
+}
+ [(set_attr "type" "call")])
+
+(define_insn "*call_1_rex64"
+ [(call (mem:QI (match_operand:DI 0 "call_insn_operand" "rsm"))
+ (match_operand 1 "" ""))]
+ "!SIBLING_CALL_P (insn) && TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[0], Pmode))
+ return "call\t%P0";
+ return "call\t%A0";
+}
+ [(set_attr "type" "call")])
+
+(define_insn "*sibcall_1_rex64"
+ [(call (mem:QI (match_operand:DI 0 "constant_call_address_operand" ""))
+ (match_operand 1 "" ""))]
+ "SIBLING_CALL_P (insn) && TARGET_64BIT"
+ "jmp\t%P0"
+ [(set_attr "type" "call")])
+
+(define_insn "*sibcall_1_rex64_v"
+ [(call (mem:QI (reg:DI 40))
+ (match_operand 0 "" ""))]
+ "SIBLING_CALL_P (insn) && TARGET_64BIT"
+ "jmp\t*%%r11"
+ [(set_attr "type" "call")])
+
+
+;; Call subroutine, returning value in operand 0
+
+(define_expand "call_value_pop"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "" "")
+ (match_operand:SI 2 "" "")))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 4 "" "")))])]
+ "!TARGET_64BIT"
+{
+ ix86_expand_call (operands[0], operands[1], operands[2],
+ operands[3], operands[4], 0);
+ DONE;
+})
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "" "")
+ (match_operand:SI 2 "" "")))
+ (use (match_operand:SI 3 "" ""))]
+ ;; Operand 2 not used on the i386.
+ ""
+{
+ ix86_expand_call (operands[0], operands[1], operands[2], operands[3], NULL, 0);
+ DONE;
+})
+
+(define_expand "sibcall_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "" "")
+ (match_operand:SI 2 "" "")))
+ (use (match_operand:SI 3 "" ""))]
+ ;; Operand 2 not used on the i386.
+ ""
+{
+ ix86_expand_call (operands[0], operands[1], operands[2], operands[3], NULL, 1);
+ DONE;
+})
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+{
+ int i;
+
+ /* In order to give reg-stack an easier job in validating two
+ coprocessor registers as containing a possible return value,
+ simply pretend the untyped call returns a complex long double
+ value. */
+
+ ix86_expand_call ((TARGET_FLOAT_RETURNS_IN_80387
+ ? gen_rtx_REG (XCmode, FIRST_FLOAT_REG) : NULL),
+ operands[0], const0_rtx, GEN_INT (SSE_REGPARM_MAX - 1),
+ NULL, 0);
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage (const0_rtx));
+
+ DONE;
+})
+
+;; Prologue and epilogue instructions
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_BLOCKAGE)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+;; Insn emitted into the body of a function to return from a function.
+;; This is only done if the function's epilogue is known to be simple.
+;; See comments for ix86_can_use_return_insn_p in i386.c.
+
+(define_expand "return"
+ [(return)]
+ "ix86_can_use_return_insn_p ()"
+{
+ if (current_function_pops_args)
+ {
+ rtx popc = GEN_INT (current_function_pops_args);
+ emit_jump_insn (gen_return_pop_internal (popc));
+ DONE;
+ }
+})
+
+(define_insn "return_internal"
+ [(return)]
+ "reload_completed"
+ "ret"
+ [(set_attr "length" "1")
+ (set_attr "length_immediate" "0")
+ (set_attr "modrm" "0")])
+
+;; Used by x86_machine_dependent_reorg to avoid penalty on single byte RET
+;; instruction Athlon and K8 have.
+
+(define_insn "return_internal_long"
+ [(return)
+ (unspec [(const_int 0)] UNSPEC_REP)]
+ "reload_completed"
+ "rep {;} ret"
+ [(set_attr "length" "1")
+ (set_attr "length_immediate" "0")
+ (set_attr "prefix_rep" "1")
+ (set_attr "modrm" "0")])
+
+(define_insn "return_pop_internal"
+ [(return)
+ (use (match_operand:SI 0 "const_int_operand" ""))]
+ "reload_completed"
+ "ret\t%0"
+ [(set_attr "length" "3")
+ (set_attr "length_immediate" "2")
+ (set_attr "modrm" "0")])
+
+(define_insn "return_indirect_internal"
+ [(return)
+ (use (match_operand:SI 0 "register_operand" "r"))]
+ "reload_completed"
+ "jmp\t%A0"
+ [(set_attr "type" "ibr")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "length" "1")
+ (set_attr "length_immediate" "0")
+ (set_attr "modrm" "0")])
+
+;; Align to 16-byte boundary, max skip in op0. Used to avoid
+;; branch prediction penalty for the third jump in a 16-byte
+;; block on K8.
+
+(define_insn "align"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_ALIGN)]
+ ""
+{
+#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
+ ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file, 4, (int)INTVAL (operands[0]));
+#else
+ /* It is tempting to use ASM_OUTPUT_ALIGN here, but we don't want to do that.
+ The align insn is used to avoid 3 jump instructions in the row to improve
+ branch prediction and the benefits hardly outweigh the cost of extra 8
+ nops on the average inserted by full alignment pseudo operation. */
+#endif
+ return "";
+}
+ [(set_attr "length" "16")])
+
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "ix86_expand_prologue (); DONE;")
+
+(define_insn "set_got"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_SET_GOT))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ { return output_set_got (operands[0], NULL_RTX); }
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
+(define_insn "set_got_labelled"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(label_ref (match_operand 1 "" ""))]
+ UNSPEC_SET_GOT))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ { return output_set_got (operands[0], operands[1]); }
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
+(define_insn "set_got_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_SET_GOT))]
+ "TARGET_64BIT"
+ "lea{q}\t_GLOBAL_OFFSET_TABLE_(%%rip), %0"
+ [(set_attr "type" "lea")
+ (set_attr "length" "6")])
+
+(define_expand "epilogue"
+ [(const_int 1)]
+ ""
+ "ix86_expand_epilogue (1); DONE;")
+
+(define_expand "sibcall_epilogue"
+ [(const_int 1)]
+ ""
+ "ix86_expand_epilogue (0); DONE;")
+
+(define_expand "eh_return"
+ [(use (match_operand 0 "register_operand" ""))]
+ ""
+{
+ rtx tmp, sa = EH_RETURN_STACKADJ_RTX, ra = operands[0];
+
+ /* Tricky bit: we write the address of the handler to which we will
+ be returning into someone else's stack frame, one word below the
+ stack address we wish to restore. */
+ tmp = gen_rtx_PLUS (Pmode, arg_pointer_rtx, sa);
+ tmp = plus_constant (tmp, -UNITS_PER_WORD);
+ tmp = gen_rtx_MEM (Pmode, tmp);
+ emit_move_insn (tmp, ra);
+
+ if (Pmode == SImode)
+ emit_jump_insn (gen_eh_return_si (sa));
+ else
+ emit_jump_insn (gen_eh_return_di (sa));
+ emit_barrier ();
+ DONE;
+})
+
+(define_insn_and_split "eh_return_si"
+ [(set (pc)
+ (unspec [(match_operand:SI 0 "register_operand" "c")]
+ UNSPEC_EH_RETURN))]
+ "!TARGET_64BIT"
+ "#"
+ "reload_completed"
+ [(const_int 1)]
+ "ix86_expand_epilogue (2); DONE;")
+
+(define_insn_and_split "eh_return_di"
+ [(set (pc)
+ (unspec [(match_operand:DI 0 "register_operand" "c")]
+ UNSPEC_EH_RETURN))]
+ "TARGET_64BIT"
+ "#"
+ "reload_completed"
+ [(const_int 1)]
+ "ix86_expand_epilogue (2); DONE;")
+
+(define_insn "leave"
+ [(set (reg:SI SP_REG) (plus:SI (reg:SI BP_REG) (const_int 4)))
+ (set (reg:SI BP_REG) (mem:SI (reg:SI BP_REG)))
+ (clobber (mem:BLK (scratch)))]
+ "!TARGET_64BIT"
+ "leave"
+ [(set_attr "type" "leave")])
+
+(define_insn "leave_rex64"
+ [(set (reg:DI SP_REG) (plus:DI (reg:DI BP_REG) (const_int 8)))
+ (set (reg:DI BP_REG) (mem:DI (reg:DI BP_REG)))
+ (clobber (mem:BLK (scratch)))]
+ "TARGET_64BIT"
+ "leave"
+ [(set_attr "type" "leave")])
+
+(define_expand "ffssi2"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ffs:SI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn_and_split "*ffs_cmove"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ffs:SI (match_operand:SI 1 "nonimmediate_operand" "rm")))
+ (clobber (match_scratch:SI 2 "=&r"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_CMOVE"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (const_int -1))
+ (parallel [(set (reg:CCZ FLAGS_REG) (compare:CCZ (match_dup 1) (const_int 0)))
+ (set (match_dup 0) (ctz:SI (match_dup 1)))])
+ (set (match_dup 0) (if_then_else:SI
+ (eq (reg:CCZ FLAGS_REG) (const_int 0))
+ (match_dup 2)
+ (match_dup 0)))
+ (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (const_int 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_insn_and_split "*ffs_no_cmove"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r")
+ (ffs:SI (match_operand:SI 1 "nonimmediate_operand" "rm")))
+ (clobber (match_scratch:SI 2 "=&q"))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "#"
+ "reload_completed"
+ [(parallel [(set (reg:CCZ FLAGS_REG) (compare:CCZ (match_dup 1) (const_int 0)))
+ (set (match_dup 0) (ctz:SI (match_dup 1)))])
+ (set (strict_low_part (match_dup 3))
+ (eq:QI (reg:CCZ FLAGS_REG) (const_int 0)))
+ (parallel [(set (match_dup 2) (neg:SI (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (match_dup 0) (ior:SI (match_dup 0) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (const_int 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ operands[3] = gen_lowpart (QImode, operands[2]);
+ ix86_expand_clear (operands[2]);
+})
+
+(define_insn "*ffssi_1"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (match_operand:SI 1 "nonimmediate_operand" "rm")
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (ctz:SI (match_dup 1)))]
+ ""
+ "bsf{l}\t{%1, %0|%0, %1}"
+ [(set_attr "prefix_0f" "1")])
+
+(define_expand "ffsdi2"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ffs:DI (match_operand:DI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:DI 2 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT && TARGET_CMOVE"
+ "")
+
+(define_insn_and_split "*ffs_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ffs:DI (match_operand:DI 1 "nonimmediate_operand" "rm")))
+ (clobber (match_scratch:DI 2 "=&r"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && TARGET_CMOVE"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (const_int -1))
+ (parallel [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (match_dup 1) (const_int 0)))
+ (set (match_dup 0) (ctz:DI (match_dup 1)))])
+ (set (match_dup 0) (if_then_else:DI
+ (eq (reg:CCZ FLAGS_REG) (const_int 0))
+ (match_dup 2)
+ (match_dup 0)))
+ (parallel [(set (match_dup 0) (plus:DI (match_dup 0) (const_int 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_insn "*ffsdi_1"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (match_operand:DI 1 "nonimmediate_operand" "rm")
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (ctz:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "bsf{q}\t{%1, %0|%0, %1}"
+ [(set_attr "prefix_0f" "1")])
+
+(define_insn "ctzsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ctz:SI (match_operand:SI 1 "nonimmediate_operand" "rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "bsf{l}\t{%1, %0|%0, %1}"
+ [(set_attr "prefix_0f" "1")])
+
+(define_insn "ctzdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ctz:DI (match_operand:DI 1 "nonimmediate_operand" "rm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "bsf{q}\t{%1, %0|%0, %1}"
+ [(set_attr "prefix_0f" "1")])
+
+(define_expand "clzsi2"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (const_int 31)
+ (clz:SI (match_operand:SI 1 "nonimmediate_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel
+ [(set (match_dup 0) (xor:SI (match_dup 0) (const_int 31)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*bsr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (const_int 31)
+ (clz:SI (match_operand:SI 1 "nonimmediate_operand" "rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "bsr{l}\t{%1, %0|%0, %1}"
+ [(set_attr "prefix_0f" "1")])
+;; APPLE LOCAL begin mainline bswap
+(define_insn "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (bswap:SI (match_operand:SI 1 "register_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_BSWAP"
+ "bswap\t%k0"
+ [(set_attr "prefix_0f" "1")
+ (set_attr "length" "2")])
+
+(define_insn "bswapdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (bswap:DI (match_operand:DI 1 "register_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && TARGET_BSWAP"
+ "bswap\t%0"
+ [(set_attr "prefix_0f" "1")
+ (set_attr "length" "3")])
+;; APPLE LOCAL end mainline bswap
+
+(define_expand "clzdi2"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (const_int 63)
+ (clz:DI (match_operand:DI 1 "nonimmediate_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel
+ [(set (match_dup 0) (xor:DI (match_dup 0) (const_int 63)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*bsr_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (const_int 63)
+ (clz:DI (match_operand:DI 1 "nonimmediate_operand" "rm"))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "bsr{q}\t{%1, %0|%0, %1}"
+ [(set_attr "prefix_0f" "1")])
+
+;; Thread-local storage patterns for ELF.
+;;
+;; Note that these code sequences must appear exactly as shown
+;; in order to allow linker relaxation.
+
+(define_insn "*tls_global_dynamic_32_gnu"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "tls_symbolic_operand" "")
+ (match_operand:SI 3 "call_insn_operand" "")]
+ UNSPEC_TLS_GD))
+ (clobber (match_scratch:SI 4 "=d"))
+ (clobber (match_scratch:SI 5 "=c"))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_GNU_TLS"
+ "lea{l}\t{%a2@TLSGD(,%1,1), %0|%0, %a2@TLSGD[%1*1]}\;call\t%P3"
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
+(define_insn "*tls_global_dynamic_32_sun"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "tls_symbolic_operand" "")
+ (match_operand:SI 3 "call_insn_operand" "")]
+ UNSPEC_TLS_GD))
+ (clobber (match_scratch:SI 4 "=d"))
+ (clobber (match_scratch:SI 5 "=c"))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_SUN_TLS"
+ "lea{l}\t{%a2@DTLNDX(%1), %4|%4, %a2@DTLNDX[%1]}
+ push{l}\t%4\;call\t%a2@TLSPLT\;pop{l}\t%4\;nop"
+ [(set_attr "type" "multi")
+ (set_attr "length" "14")])
+
+(define_expand "tls_global_dynamic_32"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI
+ [(match_dup 2)
+ (match_operand:SI 1 "tls_symbolic_operand" "")
+ (match_dup 3)]
+ UNSPEC_TLS_GD))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_scratch:SI 5 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+{
+ if (flag_pic)
+ operands[2] = pic_offset_table_rtx;
+ else
+ {
+ operands[2] = gen_reg_rtx (Pmode);
+ emit_insn (gen_set_got (operands[2]));
+ }
+ if (TARGET_GNU2_TLS)
+ {
+ emit_insn (gen_tls_dynamic_gnu2_32
+ (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ operands[3] = ix86_tls_get_addr ();
+})
+
+(define_insn "*tls_global_dynamic_64"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (call:DI (mem:QI (match_operand:DI 2 "call_insn_operand" ""))
+ (match_operand:DI 3 "" "")))
+ (unspec:DI [(match_operand:DI 1 "tls_symbolic_operand" "")]
+ UNSPEC_TLS_GD)]
+ "TARGET_64BIT"
+ ".byte\t0x66\;lea{q}\t{%a1@TLSGD(%%rip), %%rdi|%%rdi, %a1@TLSGD[%%rip]}\;.word\t0x6666\;rex64\;call\t%P2"
+ [(set_attr "type" "multi")
+ (set_attr "length" "16")])
+
+(define_expand "tls_global_dynamic_64"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (call:DI (mem:QI (match_dup 2)) (const_int 0)))
+ (unspec:DI [(match_operand:DI 1 "tls_symbolic_operand" "")]
+ UNSPEC_TLS_GD)])]
+ ""
+{
+ if (TARGET_GNU2_TLS)
+ {
+ emit_insn (gen_tls_dynamic_gnu2_64
+ (operands[0], operands[1]));
+ DONE;
+ }
+ operands[2] = ix86_tls_get_addr ();
+})
+
+(define_insn "*tls_local_dynamic_base_32_gnu"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "call_insn_operand" "")]
+ UNSPEC_TLS_LD_BASE))
+ (clobber (match_scratch:SI 3 "=d"))
+ (clobber (match_scratch:SI 4 "=c"))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_GNU_TLS"
+ "lea{l}\t{%&@TLSLDM(%1), %0|%0, %&@TLSLDM[%1]}\;call\t%P2"
+ [(set_attr "type" "multi")
+ (set_attr "length" "11")])
+
+(define_insn "*tls_local_dynamic_base_32_sun"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "call_insn_operand" "")]
+ UNSPEC_TLS_LD_BASE))
+ (clobber (match_scratch:SI 3 "=d"))
+ (clobber (match_scratch:SI 4 "=c"))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_SUN_TLS"
+ "lea{l}\t{%&@TMDNX(%1), %3|%3, %&@TMDNX[%1]}
+ push{l}\t%3\;call\t%&@TLSPLT\;pop{l}\t%3"
+ [(set_attr "type" "multi")
+ (set_attr "length" "13")])
+
+(define_expand "tls_local_dynamic_base_32"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(match_dup 1) (match_dup 2)]
+ UNSPEC_TLS_LD_BASE))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+{
+ if (flag_pic)
+ operands[1] = pic_offset_table_rtx;
+ else
+ {
+ operands[1] = gen_reg_rtx (Pmode);
+ emit_insn (gen_set_got (operands[1]));
+ }
+ if (TARGET_GNU2_TLS)
+ {
+ emit_insn (gen_tls_dynamic_gnu2_32
+ (operands[0], ix86_tls_module_base (), operands[1]));
+ DONE;
+ }
+ operands[2] = ix86_tls_get_addr ();
+})
+
+(define_insn "*tls_local_dynamic_base_64"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (call:DI (mem:QI (match_operand:DI 1 "call_insn_operand" ""))
+ (match_operand:DI 2 "" "")))
+ (unspec:DI [(const_int 0)] UNSPEC_TLS_LD_BASE)]
+ "TARGET_64BIT"
+ "lea{q}\t{%&@TLSLD(%%rip), %%rdi|%%rdi, %&@TLSLD[%%rip]}\;call\t%P1"
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
+(define_expand "tls_local_dynamic_base_64"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (call:DI (mem:QI (match_dup 1)) (const_int 0)))
+ (unspec:DI [(const_int 0)] UNSPEC_TLS_LD_BASE)])]
+ ""
+{
+ if (TARGET_GNU2_TLS)
+ {
+ emit_insn (gen_tls_dynamic_gnu2_64
+ (operands[0], ix86_tls_module_base ()));
+ DONE;
+ }
+ operands[1] = ix86_tls_get_addr ();
+})
+
+;; Local dynamic of a single variable is a lose. Show combine how
+;; to convert that back to global dynamic.
+
+(define_insn_and_split "*tls_local_dynamic_32_once"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (unspec:SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "call_insn_operand" "")]
+ UNSPEC_TLS_LD_BASE)
+ (const:SI (unspec:SI
+ [(match_operand:SI 3 "tls_symbolic_operand" "")]
+ UNSPEC_DTPOFF))))
+ (clobber (match_scratch:SI 4 "=d"))
+ (clobber (match_scratch:SI 5 "=c"))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "#"
+ ""
+ [(parallel [(set (match_dup 0)
+ (unspec:SI [(match_dup 1) (match_dup 3) (match_dup 2)]
+ UNSPEC_TLS_GD))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+;; Load and add the thread base pointer from %gs:0.
+
+(define_insn "*load_tp_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_TP))]
+ "!TARGET_64BIT"
+ "mov{l}\t{%%gs:0, %0|%0, DWORD PTR %%gs:0}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0")
+ (set_attr "length" "7")
+ (set_attr "memory" "load")
+ (set_attr "imm_disp" "false")])
+
+(define_insn "*add_tp_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (unspec:SI [(const_int 0)] UNSPEC_TP)
+ (match_operand:SI 1 "register_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ "add{l}\t{%%gs:0, %0|%0, DWORD PTR %%gs:0}"
+ [(set_attr "type" "alu")
+ (set_attr "modrm" "0")
+ (set_attr "length" "7")
+ (set_attr "memory" "load")
+ (set_attr "imm_disp" "false")])
+
+(define_insn "*load_tp_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_TP))]
+ "TARGET_64BIT"
+ "mov{q}\t{%%fs:0, %0|%0, QWORD PTR %%fs:0}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0")
+ (set_attr "length" "7")
+ (set_attr "memory" "load")
+ (set_attr "imm_disp" "false")])
+
+(define_insn "*add_tp_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (unspec:DI [(const_int 0)] UNSPEC_TP)
+ (match_operand:DI 1 "register_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "add{q}\t{%%fs:0, %0|%0, QWORD PTR %%fs:0}"
+ [(set_attr "type" "alu")
+ (set_attr "modrm" "0")
+ (set_attr "length" "7")
+ (set_attr "memory" "load")
+ (set_attr "imm_disp" "false")])
+
+;; GNU2 TLS patterns can be split.
+
+(define_expand "tls_dynamic_gnu2_32"
+ [(set (match_dup 3)
+ (plus:SI (match_operand:SI 2 "register_operand" "")
+ (const:SI
+ (unspec:SI [(match_operand:SI 1 "tls_symbolic_operand" "")]
+ UNSPEC_TLSDESC))))
+ (parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(match_dup 1) (match_dup 3)
+ (match_dup 2) (reg:SI SP_REG)]
+ UNSPEC_TLSDESC))
+ (clobber (reg:CC FLAGS_REG))])]
+ "!TARGET_64BIT && TARGET_GNU2_TLS"
+{
+ operands[3] = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
+ ix86_tls_descriptor_calls_expanded_in_cfun = true;
+})
+
+(define_insn "*tls_dynamic_lea_32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "register_operand" "b")
+ (const:SI
+ (unspec:SI [(match_operand:SI 2 "tls_symbolic_operand" "")]
+ UNSPEC_TLSDESC))))]
+ "!TARGET_64BIT && TARGET_GNU2_TLS"
+ "lea{l}\t{%a2@TLSDESC(%1), %0|%0, %a2@TLSDESC[%1]}"
+ [(set_attr "type" "lea")
+ (set_attr "mode" "SI")
+ (set_attr "length" "6")
+ (set_attr "length_address" "4")])
+
+(define_insn "*tls_dynamic_call_32"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(match_operand:SI 1 "tls_symbolic_operand" "")
+ (match_operand:SI 2 "register_operand" "0")
+ ;; we have to make sure %ebx still points to the GOT
+ (match_operand:SI 3 "register_operand" "b")
+ (reg:SI SP_REG)]
+ UNSPEC_TLSDESC))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_GNU2_TLS"
+ "call\t{*%a1@TLSCALL(%2)|[DWORD PTR [%2+%a1@TLSCALL]]}"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")
+ (set_attr "length_address" "0")])
+
+(define_insn_and_split "*tls_dynamic_gnu2_combine_32"
+ [(set (match_operand:SI 0 "register_operand" "=&a")
+ (plus:SI
+ (unspec:SI [(match_operand:SI 3 "tls_modbase_operand" "")
+ (match_operand:SI 4 "" "")
+ (match_operand:SI 2 "register_operand" "b")
+ (reg:SI SP_REG)]
+ UNSPEC_TLSDESC)
+ (const:SI (unspec:SI
+ [(match_operand:SI 1 "tls_symbolic_operand" "")]
+ UNSPEC_DTPOFF))))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_GNU2_TLS"
+ "#"
+ ""
+ [(set (match_dup 0) (match_dup 5))]
+{
+ operands[5] = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
+ emit_insn (gen_tls_dynamic_gnu2_32 (operands[5], operands[1], operands[2]));
+})
+
+(define_expand "tls_dynamic_gnu2_64"
+ [(set (match_dup 2)
+ (unspec:DI [(match_operand:DI 1 "tls_symbolic_operand" "")]
+ UNSPEC_TLSDESC))
+ (parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (unspec:DI [(match_dup 1) (match_dup 2) (reg:DI SP_REG)]
+ UNSPEC_TLSDESC))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT && TARGET_GNU2_TLS"
+{
+ operands[2] = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
+ ix86_tls_descriptor_calls_expanded_in_cfun = true;
+})
+
+(define_insn "*tls_dynamic_lea_64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "tls_symbolic_operand" "")]
+ UNSPEC_TLSDESC))]
+ "TARGET_64BIT && TARGET_GNU2_TLS"
+ "lea{q}\t{%a1@TLSDESC(%%rip), %0|%0, %a1@TLSDESC[%%rip]}"
+ [(set_attr "type" "lea")
+ (set_attr "mode" "DI")
+ (set_attr "length" "7")
+ (set_attr "length_address" "4")])
+
+(define_insn "*tls_dynamic_call_64"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "tls_symbolic_operand" "")
+ (match_operand:DI 2 "register_operand" "0")
+ (reg:DI SP_REG)]
+ UNSPEC_TLSDESC))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && TARGET_GNU2_TLS"
+ "call\t{*%a1@TLSCALL(%2)|[QWORD PTR [%2+%a1@TLSCALL]]}"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")
+ (set_attr "length_address" "0")])
+
+(define_insn_and_split "*tls_dynamic_gnu2_combine_64"
+ [(set (match_operand:DI 0 "register_operand" "=&a")
+ (plus:DI
+ (unspec:DI [(match_operand:DI 2 "tls_modbase_operand" "")
+ (match_operand:DI 3 "" "")
+ (reg:DI SP_REG)]
+ UNSPEC_TLSDESC)
+ (const:DI (unspec:DI
+ [(match_operand:DI 1 "tls_symbolic_operand" "")]
+ UNSPEC_DTPOFF))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && TARGET_GNU2_TLS"
+ "#"
+ ""
+ [(set (match_dup 0) (match_dup 4))]
+{
+ operands[4] = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
+ emit_insn (gen_tls_dynamic_gnu2_64 (operands[4], operands[1]));
+})
+
+;;
+
+;; These patterns match the binary 387 instructions for addM3, subM3,
+;; mulM3 and divM3. There are three patterns for each of DFmode and
+;; SFmode. The first is the normal insn, the second the same insn but
+;; with one operand a conversion, and the third the same insn but with
+;; the other operand a conversion. The conversion may be SFmode or
+;; SImode if the target mode DFmode, but only SImode if the target mode
+;; is SFmode.
+
+;; Gcc is slightly more smart about handling normal two address instructions
+;; so use special patterns for add and mull.
+
+(define_insn "*fop_sf_comm_mixed"
+ [(set (match_operand:SF 0 "register_operand" "=f,x")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SF 2 "nonimmediate_operand" "fm,xm")]))]
+ "TARGET_MIX_SSE_I387
+ && COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (eq_attr "alternative" "1")
+ (if_then_else (match_operand:SF 3 "mult_operator" "")
+ (const_string "ssemul")
+ (const_string "sseadd"))
+ (if_then_else (match_operand:SF 3 "mult_operator" "")
+ (const_string "fmul")
+ (const_string "fop"))))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_sf_comm_sse"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "%0")
+ (match_operand:SF 2 "nonimmediate_operand" "xm")]))]
+ "TARGET_SSE_MATH
+ && COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (match_operand:SF 3 "mult_operator" "")
+ (const_string "ssemul")
+ (const_string "sseadd")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_sf_comm_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "%0")
+ (match_operand:SF 2 "nonimmediate_operand" "fm")]))]
+ "TARGET_80387
+ && COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (match_operand:SF 3 "mult_operator" "")
+ (const_string "fmul")
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_sf_1_mixed"
+ [(set (match_operand:SF 0 "register_operand" "=f,f,x")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "0,fm,0")
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0,xm")]))]
+ "TARGET_MIX_SSE_I387
+ && !COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(and (eq_attr "alternative" "2")
+ (match_operand:SF 3 "mult_operator" ""))
+ (const_string "ssemul")
+ (and (eq_attr "alternative" "2")
+ (match_operand:SF 3 "div_operator" ""))
+ (const_string "ssediv")
+ (eq_attr "alternative" "2")
+ (const_string "sseadd")
+ (match_operand:SF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:SF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_sf_1_sse"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "nonimmediate_operand" "xm")]))]
+ "TARGET_SSE_MATH
+ && !COMMUTATIVE_ARITH_P (operands[3])"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:SF 3 "mult_operator" "")
+ (const_string "ssemul")
+ (match_operand:SF 3 "div_operator" "")
+ (const_string "ssediv")
+ ]
+ (const_string "sseadd")))
+ (set_attr "mode" "SF")])
+
+;; This pattern is not fully shadowed by the pattern above.
+(define_insn "*fop_sf_1_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "nonimmediate_operand" "0,fm")
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0")]))]
+ "TARGET_80387 && !TARGET_SSE_MATH
+ && !COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:SF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:SF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+;; ??? Add SSE splitters for these!
+(define_insn "*fop_sf_2<mode>_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(float:SF (match_operand:X87MODEI12 1 "nonimmediate_operand" "m,?r"))
+ (match_operand:SF 2 "register_operand" "0,0")]))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP && !TARGET_SSE_MATH"
+ "* return which_alternative ? \"#\" : output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:SF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:SF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fop_sf_3<mode>_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (match_operator:SF 3 "binary_fp_operator"
+ [(match_operand:SF 1 "register_operand" "0,0")
+ (float:SF (match_operand:X87MODEI12 2 "nonimmediate_operand" "m,?r"))]))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP && !TARGET_SSE_MATH"
+ "* return which_alternative ? \"#\" : output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:SF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:SF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fop_df_comm_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=f,x")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "%0,0")
+ (match_operand:DF 2 "nonimmediate_operand" "fm,Ym")]))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_MIX_SSE_I387
+ && COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (eq_attr "alternative" "1")
+ (if_then_else (match_operand:DF 3 "mult_operator" "")
+ (const_string "ssemul")
+ (const_string "sseadd"))
+ (if_then_else (match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (const_string "fop"))))
+ (set_attr "mode" "DF")])
+
+(define_insn "*fop_df_comm_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "%0")
+ (match_operand:DF 2 "nonimmediate_operand" "xm")]))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH
+ && COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (match_operand:DF 3 "mult_operator" "")
+ (const_string "ssemul")
+ (const_string "sseadd")))
+ (set_attr "mode" "DF")])
+
+(define_insn "*fop_df_comm_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "%0")
+ (match_operand:DF 2 "nonimmediate_operand" "fm")]))]
+ "TARGET_80387
+ && COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (const_string "fop")))
+ (set_attr "mode" "DF")])
+
+(define_insn "*fop_df_1_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=f,f,x")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "0,fm,0")
+ (match_operand:DF 2 "nonimmediate_operand" "fm,0,xm")]))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387
+ && !COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(and (eq_attr "alternative" "2")
+ (match_operand:DF 3 "mult_operator" ""))
+ (const_string "ssemul")
+ (and (eq_attr "alternative" "2")
+ (match_operand:DF 3 "div_operator" ""))
+ (const_string "ssediv")
+ (eq_attr "alternative" "2")
+ (const_string "sseadd")
+ (match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "DF")])
+
+(define_insn "*fop_df_1_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "nonimmediate_operand" "xm")]))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH
+ && !COMMUTATIVE_ARITH_P (operands[3])"
+ "* return output_387_binary_op (insn, operands);"
+ [(set_attr "mode" "DF")
+ (set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "ssemul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "ssediv")
+ ]
+ (const_string "sseadd")))])
+
+;; This pattern is not fully shadowed by the pattern above.
+(define_insn "*fop_df_1_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "nonimmediate_operand" "0,fm")
+ (match_operand:DF 2 "nonimmediate_operand" "fm,0")]))]
+ "TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH)
+ && !COMMUTATIVE_ARITH_P (operands[3])
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "DF")])
+
+;; ??? Add SSE splitters for these!
+(define_insn "*fop_df_2<mode>_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(float:DF (match_operand:X87MODEI12 1 "nonimmediate_operand" "m,?r"))
+ (match_operand:DF 2 "register_operand" "0,0")]))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP
+ && !(TARGET_SSE2 && TARGET_SSE_MATH)"
+ "* return which_alternative ? \"#\" : output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fop_df_3<mode>_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "register_operand" "0,0")
+ (float:DF (match_operand:X87MODEI12 2 "nonimmediate_operand" "m,?r"))]))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP
+ && !(TARGET_SSE2 && TARGET_SSE_MATH)"
+ "* return which_alternative ? \"#\" : output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fop_df_4_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "fm,0"))
+ (match_operand:DF 2 "register_operand" "0,f")]))]
+ "TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH)
+ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_df_5_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(match_operand:DF 1 "register_operand" "0,f")
+ (float_extend:DF
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0"))]))]
+ "TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_df_6_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (match_operator:DF 3 "binary_fp_operator"
+ [(float_extend:DF
+ (match_operand:SF 1 "register_operand" "0,f"))
+ (float_extend:DF
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0"))]))]
+ "TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH)"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:DF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_xf_comm_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(match_operand:XF 1 "register_operand" "%0")
+ (match_operand:XF 2 "register_operand" "f")]))]
+ "TARGET_80387
+ && COMMUTATIVE_ARITH_P (operands[3])"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (if_then_else (match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (const_string "fop")))
+ (set_attr "mode" "XF")])
+
+(define_insn "*fop_xf_1_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(match_operand:XF 1 "register_operand" "0,f")
+ (match_operand:XF 2 "register_operand" "f,0")]))]
+ "TARGET_80387
+ && !COMMUTATIVE_ARITH_P (operands[3])"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:XF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "XF")])
+
+(define_insn "*fop_xf_2<mode>_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(float:XF (match_operand:X87MODEI12 1 "nonimmediate_operand" "m,?r"))
+ (match_operand:XF 2 "register_operand" "0,0")]))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP"
+ "* return which_alternative ? \"#\" : output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:XF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fop_xf_3<mode>_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(match_operand:XF 1 "register_operand" "0,0")
+ (float:XF (match_operand:X87MODEI12 2 "nonimmediate_operand" "m,?r"))]))]
+ "TARGET_80387 && TARGET_USE_<MODE>MODE_FIOP"
+ "* return which_alternative ? \"#\" : output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:XF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "fp_int_src" "true")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fop_xf_4_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(float_extend:XF (match_operand 1 "nonimmediate_operand" "fm,0"))
+ (match_operand:XF 2 "register_operand" "0,f")]))]
+ "TARGET_80387"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:XF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_xf_5_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(match_operand:XF 1 "register_operand" "0,f")
+ (float_extend:XF
+ (match_operand 2 "nonimmediate_operand" "fm,0"))]))]
+ "TARGET_80387"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:XF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_insn "*fop_xf_6_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (match_operator:XF 3 "binary_fp_operator"
+ [(float_extend:XF
+ (match_operand 1 "register_operand" "0,f"))
+ (float_extend:XF
+ (match_operand 2 "nonimmediate_operand" "fm,0"))]))]
+ "TARGET_80387"
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:XF 3 "mult_operator" "")
+ (const_string "fmul")
+ (match_operand:XF 3 "div_operator" "")
+ (const_string "fdiv")
+ ]
+ (const_string "fop")))
+ (set_attr "mode" "SF")])
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator 3 "binary_fp_operator"
+ [(float (match_operand:X87MODEI12 1 "register_operand" ""))
+ (match_operand 2 "register_operand" "")]))]
+ "TARGET_80387 && reload_completed
+ && FLOAT_MODE_P (GET_MODE (operands[0]))"
+ [(const_int 0)]
+{
+ operands[4] = ix86_force_to_memory (GET_MODE (operands[1]), operands[1]);
+ operands[4] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[4]);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_fmt_ee (GET_CODE (operands[3]),
+ GET_MODE (operands[3]),
+ operands[4],
+ operands[2])));
+ ix86_free_from_memory (GET_MODE (operands[1]));
+ DONE;
+})
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator 3 "binary_fp_operator"
+ [(match_operand 1 "register_operand" "")
+ (float (match_operand:X87MODEI12 2 "register_operand" ""))]))]
+ "TARGET_80387 && reload_completed
+ && FLOAT_MODE_P (GET_MODE (operands[0]))"
+ [(const_int 0)]
+{
+ operands[4] = ix86_force_to_memory (GET_MODE (operands[2]), operands[2]);
+ operands[4] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[4]);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_fmt_ee (GET_CODE (operands[3]),
+ GET_MODE (operands[3]),
+ operands[1],
+ operands[4])));
+ ix86_free_from_memory (GET_MODE (operands[2]));
+ DONE;
+})
+
+;; FPU special functions.
+
+(define_expand "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (sqrt:SF (match_operand:SF 1 "nonimmediate_operand" "")))]
+ "TARGET_USE_FANCY_MATH_387 || TARGET_SSE_MATH"
+{
+ if (!TARGET_SSE_MATH)
+ operands[1] = force_reg (SFmode, operands[1]);
+})
+
+(define_insn "*sqrtsf2_mixed"
+ [(set (match_operand:SF 0 "register_operand" "=f,x")
+ (sqrt:SF (match_operand:SF 1 "nonimmediate_operand" "0,xm")))]
+ "TARGET_USE_FANCY_MATH_387 && TARGET_MIX_SSE_I387"
+ "@
+ fsqrt
+ sqrtss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "fpspc,sse")
+ (set_attr "mode" "SF,SF")
+ (set_attr "athlon_decode" "direct,*")])
+
+(define_insn "*sqrtsf2_sse"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (sqrt:SF (match_operand:SF 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE_MATH"
+ "sqrtss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "SF")
+ (set_attr "athlon_decode" "*")])
+
+(define_insn "*sqrtsf2_i387"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "0")))]
+ "TARGET_USE_FANCY_MATH_387"
+ "fsqrt"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "SF")
+ (set_attr "athlon_decode" "direct")])
+
+(define_expand "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (sqrt:DF (match_operand:DF 1 "nonimmediate_operand" "")))]
+ "TARGET_USE_FANCY_MATH_387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+{
+ if (!(TARGET_SSE2 && TARGET_SSE_MATH))
+ operands[1] = force_reg (DFmode, operands[1]);
+})
+
+(define_insn "*sqrtdf2_mixed"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=f,x")
+ (sqrt:DF (match_operand:DF 1 "nonimmediate_operand" "0,xm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_USE_FANCY_MATH_387 && TARGET_SSE2 && TARGET_MIX_SSE_I387"
+ "@
+ fsqrt
+ sqrtsd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "fpspc,sse")
+ (set_attr "mode" "DF,DF")
+ (set_attr "athlon_decode" "direct,*")])
+
+(define_insn "*sqrtdf2_sse"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (sqrt:DF (match_operand:DF 1 "nonimmediate_operand" "xm")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "sqrtsd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "*")])
+
+(define_insn "*sqrtdf2_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "register_operand" "0")))]
+ "TARGET_USE_FANCY_MATH_387"
+ "fsqrt"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "direct")])
+
+(define_insn "*sqrtextendsfdf2_i387"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)"
+ "fsqrt"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "direct")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "register_operand" "0")))]
+ "TARGET_USE_FANCY_MATH_387"
+ "fsqrt"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")
+ (set_attr "athlon_decode" "direct")])
+
+(define_insn "*sqrtextendsfxf2_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (sqrt:XF (float_extend:XF
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "TARGET_USE_FANCY_MATH_387"
+ "fsqrt"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")
+ (set_attr "athlon_decode" "direct")])
+
+(define_insn "*sqrtextenddfxf2_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (sqrt:XF (float_extend:XF
+ (match_operand:DF 1 "register_operand" "0"))))]
+ "TARGET_USE_FANCY_MATH_387"
+ "fsqrt"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")
+ (set_attr "athlon_decode" "direct")])
+
+(define_insn "fpremxf4"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")
+ (match_operand:XF 3 "register_operand" "1")]
+ UNSPEC_FPREM_F))
+ (set (match_operand:XF 1 "register_operand" "=u")
+ (unspec:XF [(match_dup 2) (match_dup 3)]
+ UNSPEC_FPREM_U))
+ (set (reg:CCFP FPSR_REG)
+ (unspec:CCFP [(const_int 0)] UNSPEC_NOP))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fprem"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "fmodsf3"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))
+ (use (match_operand:SF 2 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx label = gen_label_rtx ();
+
+ rtx op1 = gen_reg_rtx (XFmode);
+ rtx op2 = gen_reg_rtx (XFmode);
+
+ emit_insn(gen_extendsfxf2 (op1, operands[1]));
+ emit_insn(gen_extendsfxf2 (op2, operands[2]));
+
+ emit_label (label);
+
+ emit_insn (gen_fpremxf4 (op1, op2, op1, op2));
+ ix86_emit_fp_unordered_jump (label);
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op1));
+ DONE;
+})
+
+(define_expand "fmoddf3"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))
+ (use (match_operand:DF 2 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx label = gen_label_rtx ();
+
+ rtx op1 = gen_reg_rtx (XFmode);
+ rtx op2 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_extenddfxf2 (op2, operands[2]));
+
+ emit_label (label);
+
+ emit_insn (gen_fpremxf4 (op1, op2, op1, op2));
+ ix86_emit_fp_unordered_jump (label);
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op1));
+ DONE;
+})
+
+(define_expand "fmodxf3"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))
+ (use (match_operand:XF 2 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx label = gen_label_rtx ();
+
+ emit_label (label);
+
+ emit_insn (gen_fpremxf4 (operands[1], operands[2],
+ operands[1], operands[2]));
+ ix86_emit_fp_unordered_jump (label);
+
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "fprem1xf4"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")
+ (match_operand:XF 3 "register_operand" "1")]
+ UNSPEC_FPREM1_F))
+ (set (match_operand:XF 1 "register_operand" "=u")
+ (unspec:XF [(match_dup 2) (match_dup 3)]
+ UNSPEC_FPREM1_U))
+ (set (reg:CCFP FPSR_REG)
+ (unspec:CCFP [(const_int 0)] UNSPEC_NOP))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fprem1"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "dremsf3"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))
+ (use (match_operand:SF 2 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx label = gen_label_rtx ();
+
+ rtx op1 = gen_reg_rtx (XFmode);
+ rtx op2 = gen_reg_rtx (XFmode);
+
+ emit_insn(gen_extendsfxf2 (op1, operands[1]));
+ emit_insn(gen_extendsfxf2 (op2, operands[2]));
+
+ emit_label (label);
+
+ emit_insn (gen_fprem1xf4 (op1, op2, op1, op2));
+ ix86_emit_fp_unordered_jump (label);
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op1));
+ DONE;
+})
+
+(define_expand "dremdf3"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))
+ (use (match_operand:DF 2 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx label = gen_label_rtx ();
+
+ rtx op1 = gen_reg_rtx (XFmode);
+ rtx op2 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_extenddfxf2 (op2, operands[2]));
+
+ emit_label (label);
+
+ emit_insn (gen_fprem1xf4 (op1, op2, op1, op2));
+ ix86_emit_fp_unordered_jump (label);
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op1));
+ DONE;
+})
+
+(define_expand "dremxf3"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))
+ (use (match_operand:XF 2 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx label = gen_label_rtx ();
+
+ emit_label (label);
+
+ emit_insn (gen_fprem1xf4 (operands[1], operands[2],
+ operands[1], operands[2]));
+ ix86_emit_fp_unordered_jump (label);
+
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "*sindf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")] UNSPEC_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fsin"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_insn "*sinsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")] UNSPEC_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fsin"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "SF")])
+
+(define_insn "*sinextendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))]
+ UNSPEC_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fsin"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_insn "*sinxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")] UNSPEC_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fsin"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_insn "*cosdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")] UNSPEC_COS))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fcos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_insn "*cossf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")] UNSPEC_COS))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fcos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "SF")])
+
+(define_insn "*cosextendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 1 "register_operand" "0"))]
+ UNSPEC_COS))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fcos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_insn "*cosxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")] UNSPEC_COS))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fcos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+;; With sincos pattern defined, sin and cos builtin function will be
+;; expanded to sincos pattern with one of its outputs left unused.
+;; Cse pass will detected, if two sincos patterns can be combined,
+;; otherwise sincos pattern will be split back to sin or cos pattern,
+;; depending on the unused output.
+
+(define_insn "sincosdf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 2 "register_operand" "0")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:DF 1 "register_operand" "=u")
+ (unspec:DF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fsincos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(match_operand:DF 2 "register_operand" "")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:DF 1 "register_operand" "")
+ (unspec:DF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[0]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 1) (unspec:DF [(match_dup 2)] UNSPEC_SIN))]
+ "")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(match_operand:DF 2 "register_operand" "")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:DF 1 "register_operand" "")
+ (unspec:DF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[1]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 0) (unspec:DF [(match_dup 2)] UNSPEC_COS))]
+ "")
+
+(define_insn "sincossf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 2 "register_operand" "0")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:SF 1 "register_operand" "=u")
+ (unspec:SF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fsincos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "SF")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (unspec:SF [(match_operand:SF 2 "register_operand" "")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:SF 1 "register_operand" "")
+ (unspec:SF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[0]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 1) (unspec:SF [(match_dup 2)] UNSPEC_SIN))]
+ "")
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (unspec:SF [(match_operand:SF 2 "register_operand" "")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:SF 1 "register_operand" "")
+ (unspec:SF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[1]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 0) (unspec:SF [(match_dup 2)] UNSPEC_COS))]
+ "")
+
+(define_insn "*sincosextendsfdf3"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 2 "register_operand" "0"))]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:DF 1 "register_operand" "=u")
+ (unspec:DF [(float_extend:DF
+ (match_dup 2))] UNSPEC_SINCOS_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fsincos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 2 "register_operand" ""))]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:DF 1 "register_operand" "")
+ (unspec:DF [(float_extend:DF
+ (match_dup 2))] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[0]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 1) (unspec:DF [(float_extend:DF
+ (match_dup 2))] UNSPEC_SIN))]
+ "")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(float_extend:DF
+ (match_operand:SF 2 "register_operand" ""))]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:DF 1 "register_operand" "")
+ (unspec:DF [(float_extend:DF
+ (match_dup 2))] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[1]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 0) (unspec:DF [(float_extend:DF
+ (match_dup 2))] UNSPEC_COS))]
+ "")
+
+(define_insn "sincosxf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:XF 1 "register_operand" "=u")
+ (unspec:XF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fsincos"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_split
+ [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:XF 1 "register_operand" "")
+ (unspec:XF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[0]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 1) (unspec:XF [(match_dup 2)] UNSPEC_SIN))]
+ "")
+
+(define_split
+ [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "")]
+ UNSPEC_SINCOS_COS))
+ (set (match_operand:XF 1 "register_operand" "")
+ (unspec:XF [(match_dup 2)] UNSPEC_SINCOS_SIN))]
+ "find_regno_note (insn, REG_UNUSED, REGNO (operands[1]))
+ && !reload_completed && !reload_in_progress"
+ [(set (match_dup 0) (unspec:XF [(match_dup 2)] UNSPEC_COS))]
+ "")
+
+(define_insn "*tandf3_1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 2 "register_operand" "0")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:DF 1 "register_operand" "=u")
+ (unspec:DF [(match_dup 2)] UNSPEC_TAN_TAN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fptan"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+;; optimize sequence: fptan
+;; fstp %st(0)
+;; fld1
+;; into fptan insn.
+
+(define_peephole2
+ [(parallel[(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(match_operand:DF 2 "register_operand" "")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:DF 1 "register_operand" "")
+ (unspec:DF [(match_dup 2)] UNSPEC_TAN_TAN))])
+ (set (match_dup 0)
+ (match_operand:DF 3 "immediate_operand" ""))]
+ "standard_80387_constant_p (operands[3]) == 2"
+ [(parallel[(set (match_dup 0) (unspec:DF [(match_dup 2)] UNSPEC_TAN_ONE))
+ (set (match_dup 1) (unspec:DF [(match_dup 2)] UNSPEC_TAN_TAN))])]
+ "")
+
+(define_expand "tandf2"
+ [(parallel [(set (match_dup 2)
+ (unspec:DF [(match_operand:DF 1 "register_operand" "")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(match_dup 1)] UNSPEC_TAN_TAN))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (DFmode);
+})
+
+(define_insn "*tansf3_1"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 2 "register_operand" "0")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:SF 1 "register_operand" "=u")
+ (unspec:SF [(match_dup 2)] UNSPEC_TAN_TAN))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fptan"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "SF")])
+
+;; optimize sequence: fptan
+;; fstp %st(0)
+;; fld1
+;; into fptan insn.
+
+(define_peephole2
+ [(parallel[(set (match_operand:SF 0 "register_operand" "")
+ (unspec:SF [(match_operand:SF 2 "register_operand" "")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:SF 1 "register_operand" "")
+ (unspec:SF [(match_dup 2)] UNSPEC_TAN_TAN))])
+ (set (match_dup 0)
+ (match_operand:SF 3 "immediate_operand" ""))]
+ "standard_80387_constant_p (operands[3]) == 2"
+ [(parallel[(set (match_dup 0) (unspec:SF [(match_dup 2)] UNSPEC_TAN_ONE))
+ (set (match_dup 1) (unspec:SF [(match_dup 2)] UNSPEC_TAN_TAN))])]
+ "")
+
+(define_expand "tansf2"
+ [(parallel [(set (match_dup 2)
+ (unspec:SF [(match_operand:SF 1 "register_operand" "")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:SF 0 "register_operand" "")
+ (unspec:SF [(match_dup 1)] UNSPEC_TAN_TAN))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (SFmode);
+})
+
+(define_insn "*tanxf3_1"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:XF 1 "register_operand" "=u")
+ (unspec:XF [(match_dup 2)] UNSPEC_TAN_TAN))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fptan"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+;; optimize sequence: fptan
+;; fstp %st(0)
+;; fld1
+;; into fptan insn.
+
+(define_peephole2
+ [(parallel[(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:XF 1 "register_operand" "")
+ (unspec:XF [(match_dup 2)] UNSPEC_TAN_TAN))])
+ (set (match_dup 0)
+ (match_operand:XF 3 "immediate_operand" ""))]
+ "standard_80387_constant_p (operands[3]) == 2"
+ [(parallel[(set (match_dup 0) (unspec:XF [(match_dup 2)] UNSPEC_TAN_ONE))
+ (set (match_dup 1) (unspec:XF [(match_dup 2)] UNSPEC_TAN_TAN))])]
+ "")
+
+(define_expand "tanxf2"
+ [(parallel [(set (match_dup 2)
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_TAN_ONE))
+ (set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 1)] UNSPEC_TAN_TAN))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+})
+
+(define_insn "atan2df3_1"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 2 "register_operand" "0")
+ (match_operand:DF 1 "register_operand" "u")]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:DF 3 "=1"))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fpatan"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DF")])
+
+(define_expand "atan2df3"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 2 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx copy = gen_reg_rtx (DFmode);
+ emit_move_insn (copy, operands[1]);
+ emit_insn (gen_atan2df3_1 (operands[0], copy, operands[2]));
+ DONE;
+})
+
+(define_expand "atandf2"
+ [(parallel [(set (match_operand:DF 0 "register_operand" "")
+ (unspec:DF [(match_dup 2)
+ (match_operand:DF 1 "register_operand" "")]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:DF 3 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (DFmode);
+ emit_move_insn (operands[2], CONST1_RTX (DFmode)); /* fld1 */
+})
+
+(define_insn "atan2sf3_1"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 2 "register_operand" "0")
+ (match_operand:SF 1 "register_operand" "u")]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:SF 3 "=1"))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "fpatan"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "SF")])
+
+(define_expand "atan2sf3"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 2 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx copy = gen_reg_rtx (SFmode);
+ emit_move_insn (copy, operands[1]);
+ emit_insn (gen_atan2sf3_1 (operands[0], copy, operands[2]));
+ DONE;
+})
+
+(define_expand "atansf2"
+ [(parallel [(set (match_operand:SF 0 "register_operand" "")
+ (unspec:SF [(match_dup 2)
+ (match_operand:SF 1 "register_operand" "")]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:SF 3 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (SFmode);
+ emit_move_insn (operands[2], CONST1_RTX (SFmode)); /* fld1 */
+})
+
+(define_insn "atan2xf3_1"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")
+ (match_operand:XF 1 "register_operand" "u")]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 3 "=1"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fpatan"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "atan2xf3"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 2 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx copy = gen_reg_rtx (XFmode);
+ emit_move_insn (copy, operands[1]);
+ emit_insn (gen_atan2xf3_1 (operands[0], copy, operands[2]));
+ DONE;
+})
+
+(define_expand "atanxf2"
+ [(parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 2)
+ (match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 3 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ emit_move_insn (operands[2], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "asindf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 3) (mult:XF (match_dup 2) (match_dup 2)))
+ (set (match_dup 5) (minus:XF (match_dup 4) (match_dup 3)))
+ (set (match_dup 6) (sqrt:XF (match_dup 5)))
+ (parallel [(set (match_dup 7)
+ (unspec:XF [(match_dup 6) (match_dup 2)]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 8 ""))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 7)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<8; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "asinsf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 3) (mult:XF (match_dup 2) (match_dup 2)))
+ (set (match_dup 5) (minus:XF (match_dup 4) (match_dup 3)))
+ (set (match_dup 6) (sqrt:XF (match_dup 5)))
+ (parallel [(set (match_dup 7)
+ (unspec:XF [(match_dup 6) (match_dup 2)]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 8 ""))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 7)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<8; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "asinxf2"
+ [(set (match_dup 2)
+ (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_dup 1)))
+ (set (match_dup 4) (minus:XF (match_dup 3) (match_dup 2)))
+ (set (match_dup 5) (sqrt:XF (match_dup 4)))
+ (parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 5) (match_dup 1)]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 6 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<6; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "acosdf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 3) (mult:XF (match_dup 2) (match_dup 2)))
+ (set (match_dup 5) (minus:XF (match_dup 4) (match_dup 3)))
+ (set (match_dup 6) (sqrt:XF (match_dup 5)))
+ (parallel [(set (match_dup 7)
+ (unspec:XF [(match_dup 2) (match_dup 6)]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 8 ""))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 7)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<8; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "acossf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 3) (mult:XF (match_dup 2) (match_dup 2)))
+ (set (match_dup 5) (minus:XF (match_dup 4) (match_dup 3)))
+ (set (match_dup 6) (sqrt:XF (match_dup 5)))
+ (parallel [(set (match_dup 7)
+ (unspec:XF [(match_dup 2) (match_dup 6)]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 8 ""))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 7)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<8; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "acosxf2"
+ [(set (match_dup 2)
+ (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_dup 1)))
+ (set (match_dup 4) (minus:XF (match_dup 3) (match_dup 2)))
+ (set (match_dup 5) (sqrt:XF (match_dup 4)))
+ (parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 1) (match_dup 5)]
+ UNSPEC_FPATAN))
+ (clobber (match_scratch:XF 6 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<6; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_insn "fyl2x_xf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")
+ (match_operand:XF 1 "register_operand" "u")]
+ UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 3 "=1"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fyl2x"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "logsf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (parallel [(set (match_dup 4)
+ (unspec:XF [(match_dup 2)
+ (match_dup 3)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 5 ""))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+
+ temp = standard_80387_constant_rtx (4); /* fldln2 */
+ emit_move_insn (operands[3], temp);
+})
+
+(define_expand "logdf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (parallel [(set (match_dup 4)
+ (unspec:XF [(match_dup 2)
+ (match_dup 3)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 5 ""))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+
+ temp = standard_80387_constant_rtx (4); /* fldln2 */
+ emit_move_insn (operands[3], temp);
+})
+
+(define_expand "logxf2"
+ [(parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")
+ (match_dup 2)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 3 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+
+ operands[2] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (4); /* fldln2 */
+ emit_move_insn (operands[2], temp);
+})
+
+(define_expand "log10sf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (parallel [(set (match_dup 4)
+ (unspec:XF [(match_dup 2)
+ (match_dup 3)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 5 ""))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+
+ temp = standard_80387_constant_rtx (3); /* fldlg2 */
+ emit_move_insn (operands[3], temp);
+})
+
+(define_expand "log10df2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (parallel [(set (match_dup 4)
+ (unspec:XF [(match_dup 2)
+ (match_dup 3)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 5 ""))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+
+ temp = standard_80387_constant_rtx (3); /* fldlg2 */
+ emit_move_insn (operands[3], temp);
+})
+
+(define_expand "log10xf2"
+ [(parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")
+ (match_dup 2)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 3 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+
+ operands[2] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (3); /* fldlg2 */
+ emit_move_insn (operands[2], temp);
+})
+
+(define_expand "log2sf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (parallel [(set (match_dup 4)
+ (unspec:XF [(match_dup 2)
+ (match_dup 3)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 5 ""))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "log2df2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (parallel [(set (match_dup 4)
+ (unspec:XF [(match_dup 2)
+ (match_dup 3)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 5 ""))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+
+ emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "log2xf2"
+ [(parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")
+ (match_dup 2)] UNSPEC_FYL2X))
+ (clobber (match_scratch:XF 3 ""))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ emit_move_insn (operands[2], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_insn "fyl2xp1_xf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")
+ (match_operand:XF 1 "register_operand" "u")]
+ UNSPEC_FYL2XP1))
+ (clobber (match_scratch:XF 3 "=1"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fyl2xp1"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "log1psf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extendsfxf2 (op1, operands[1]));
+ ix86_emit_i387_log1p (op0, op1);
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "log1pdf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ ix86_emit_i387_log1p (op0, op1);
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "log1pxf2"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ ix86_emit_i387_log1p (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "*fxtractxf3"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")]
+ UNSPEC_XTRACT_FRACT))
+ (set (match_operand:XF 1 "register_operand" "=u")
+ (unspec:XF [(match_dup 2)] UNSPEC_XTRACT_EXP))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fxtract"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "logbsf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (parallel [(set (match_dup 3)
+ (unspec:XF [(match_dup 2)] UNSPEC_XTRACT_FRACT))
+ (set (match_dup 4)
+ (unspec:XF [(match_dup 2)] UNSPEC_XTRACT_EXP))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+})
+
+(define_expand "logbdf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (parallel [(set (match_dup 3)
+ (unspec:XF [(match_dup 2)] UNSPEC_XTRACT_FRACT))
+ (set (match_dup 4)
+ (unspec:XF [(match_dup 2)] UNSPEC_XTRACT_EXP))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 4)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+ operands[4] = gen_reg_rtx (XFmode);
+})
+
+(define_expand "logbxf2"
+ [(parallel [(set (match_dup 2)
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_XTRACT_FRACT))
+ (set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 1)] UNSPEC_XTRACT_EXP))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+})
+
+(define_expand "ilogbsi2"
+ [(parallel [(set (match_dup 2)
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_XTRACT_FRACT))
+ (set (match_operand:XF 3 "register_operand" "")
+ (unspec:XF [(match_dup 1)] UNSPEC_XTRACT_EXP))])
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (fix:SI (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ operands[2] = gen_reg_rtx (XFmode);
+ operands[3] = gen_reg_rtx (XFmode);
+})
+
+(define_insn "*f2xm1xf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_F2XM1))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "f2xm1"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_insn "*fscalexf4"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 2 "register_operand" "0")
+ (match_operand:XF 3 "register_operand" "1")]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_operand:XF 1 "register_operand" "=u")
+ (unspec:XF [(match_dup 2) (match_dup 3)]
+ UNSPEC_FSCALE_EXP))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fscale"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "expsf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 4) (mult:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_FRNDINT))
+ (set (match_dup 6) (minus:XF (match_dup 4) (match_dup 5)))
+ (set (match_dup 7) (unspec:XF [(match_dup 6)] UNSPEC_F2XM1))
+ (set (match_dup 9) (plus:XF (match_dup 7) (match_dup 8)))
+ (parallel [(set (match_dup 10)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 11)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 10)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<12; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (5); /* fldl2e */
+ emit_move_insn (operands[3], temp);
+ emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "expdf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 4) (mult:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_FRNDINT))
+ (set (match_dup 6) (minus:XF (match_dup 4) (match_dup 5)))
+ (set (match_dup 7) (unspec:XF [(match_dup 6)] UNSPEC_F2XM1))
+ (set (match_dup 9) (plus:XF (match_dup 7) (match_dup 8)))
+ (parallel [(set (match_dup 10)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 11)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 10)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<12; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (5); /* fldl2e */
+ emit_move_insn (operands[3], temp);
+ emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "expxf2"
+ [(set (match_dup 3) (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_dup 2)))
+ (set (match_dup 4) (unspec:XF [(match_dup 3)] UNSPEC_FRNDINT))
+ (set (match_dup 5) (minus:XF (match_dup 3) (match_dup 4)))
+ (set (match_dup 6) (unspec:XF [(match_dup 5)] UNSPEC_F2XM1))
+ (set (match_dup 8) (plus:XF (match_dup 6) (match_dup 7)))
+ (parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 8) (match_dup 4)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 9)
+ (unspec:XF [(match_dup 8) (match_dup 4)]
+ UNSPEC_FSCALE_EXP))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<10; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (5); /* fldl2e */
+ emit_move_insn (operands[2], temp);
+ emit_move_insn (operands[7], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "exp10sf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 4) (mult:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_FRNDINT))
+ (set (match_dup 6) (minus:XF (match_dup 4) (match_dup 5)))
+ (set (match_dup 7) (unspec:XF [(match_dup 6)] UNSPEC_F2XM1))
+ (set (match_dup 9) (plus:XF (match_dup 7) (match_dup 8)))
+ (parallel [(set (match_dup 10)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 11)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 10)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<12; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (6); /* fldl2t */
+ emit_move_insn (operands[3], temp);
+ emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "exp10df2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 4) (mult:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_FRNDINT))
+ (set (match_dup 6) (minus:XF (match_dup 4) (match_dup 5)))
+ (set (match_dup 7) (unspec:XF [(match_dup 6)] UNSPEC_F2XM1))
+ (set (match_dup 9) (plus:XF (match_dup 7) (match_dup 8)))
+ (parallel [(set (match_dup 10)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 11)
+ (unspec:XF [(match_dup 9) (match_dup 5)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 10)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<12; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (6); /* fldl2t */
+ emit_move_insn (operands[3], temp);
+ emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "exp10xf2"
+ [(set (match_dup 3) (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_dup 2)))
+ (set (match_dup 4) (unspec:XF [(match_dup 3)] UNSPEC_FRNDINT))
+ (set (match_dup 5) (minus:XF (match_dup 3) (match_dup 4)))
+ (set (match_dup 6) (unspec:XF [(match_dup 5)] UNSPEC_F2XM1))
+ (set (match_dup 8) (plus:XF (match_dup 6) (match_dup 7)))
+ (parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 8) (match_dup 4)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 9)
+ (unspec:XF [(match_dup 8) (match_dup 4)]
+ UNSPEC_FSCALE_EXP))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<10; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (6); /* fldl2t */
+ emit_move_insn (operands[2], temp);
+ emit_move_insn (operands[7], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "exp2sf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 3) (unspec:XF [(match_dup 2)] UNSPEC_FRNDINT))
+ (set (match_dup 4) (minus:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_F2XM1))
+ (set (match_dup 7) (plus:XF (match_dup 5) (match_dup 6)))
+ (parallel [(set (match_dup 8)
+ (unspec:XF [(match_dup 7) (match_dup 3)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 9)
+ (unspec:XF [(match_dup 7) (match_dup 3)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 8)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<10; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ emit_move_insn (operands[6], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "exp2df2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 3) (unspec:XF [(match_dup 2)] UNSPEC_FRNDINT))
+ (set (match_dup 4) (minus:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_F2XM1))
+ (set (match_dup 7) (plus:XF (match_dup 5) (match_dup 6)))
+ (parallel [(set (match_dup 8)
+ (unspec:XF [(match_dup 7) (match_dup 3)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 9)
+ (unspec:XF [(match_dup 7) (match_dup 3)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 8)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<10; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ emit_move_insn (operands[6], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "exp2xf2"
+ [(set (match_dup 2) (match_operand:XF 1 "register_operand" ""))
+ (set (match_dup 3) (unspec:XF [(match_dup 2)] UNSPEC_FRNDINT))
+ (set (match_dup 4) (minus:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_F2XM1))
+ (set (match_dup 7) (plus:XF (match_dup 5) (match_dup 6)))
+ (parallel [(set (match_operand:XF 0 "register_operand" "")
+ (unspec:XF [(match_dup 7) (match_dup 3)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 8)
+ (unspec:XF [(match_dup 7) (match_dup 3)]
+ UNSPEC_FSCALE_EXP))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=2; i<9; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ emit_move_insn (operands[6], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "expm1df2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 4) (mult:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_FRNDINT))
+ (set (match_dup 6) (minus:XF (match_dup 4) (match_dup 5)))
+ (set (match_dup 7) (unspec:XF [(match_dup 6)] UNSPEC_F2XM1))
+ (parallel [(set (match_dup 8)
+ (unspec:XF [(match_dup 7) (match_dup 5)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 9)
+ (unspec:XF [(match_dup 7) (match_dup 5)]
+ UNSPEC_FSCALE_EXP))])
+ (parallel [(set (match_dup 11)
+ (unspec:XF [(match_dup 10) (match_dup 9)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 12)
+ (unspec:XF [(match_dup 10) (match_dup 9)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_dup 13) (minus:XF (match_dup 11) (match_dup 10)))
+ (set (match_dup 14) (plus:XF (match_dup 13) (match_dup 8)))
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 14)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<15; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (5); /* fldl2e */
+ emit_move_insn (operands[3], temp);
+ emit_move_insn (operands[10], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "expm1sf2"
+ [(set (match_dup 2)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 4) (mult:XF (match_dup 2) (match_dup 3)))
+ (set (match_dup 5) (unspec:XF [(match_dup 4)] UNSPEC_FRNDINT))
+ (set (match_dup 6) (minus:XF (match_dup 4) (match_dup 5)))
+ (set (match_dup 7) (unspec:XF [(match_dup 6)] UNSPEC_F2XM1))
+ (parallel [(set (match_dup 8)
+ (unspec:XF [(match_dup 7) (match_dup 5)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 9)
+ (unspec:XF [(match_dup 7) (match_dup 5)]
+ UNSPEC_FSCALE_EXP))])
+ (parallel [(set (match_dup 11)
+ (unspec:XF [(match_dup 10) (match_dup 9)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 12)
+ (unspec:XF [(match_dup 10) (match_dup 9)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_dup 13) (minus:XF (match_dup 11) (match_dup 10)))
+ (set (match_dup 14) (plus:XF (match_dup 13) (match_dup 8)))
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 14)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<15; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (5); /* fldl2e */
+ emit_move_insn (operands[3], temp);
+ emit_move_insn (operands[10], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "expm1xf2"
+ [(set (match_dup 3) (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_dup 2)))
+ (set (match_dup 4) (unspec:XF [(match_dup 3)] UNSPEC_FRNDINT))
+ (set (match_dup 5) (minus:XF (match_dup 3) (match_dup 4)))
+ (set (match_dup 6) (unspec:XF [(match_dup 5)] UNSPEC_F2XM1))
+ (parallel [(set (match_dup 7)
+ (unspec:XF [(match_dup 6) (match_dup 4)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 8)
+ (unspec:XF [(match_dup 6) (match_dup 4)]
+ UNSPEC_FSCALE_EXP))])
+ (parallel [(set (match_dup 10)
+ (unspec:XF [(match_dup 9) (match_dup 8)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 11)
+ (unspec:XF [(match_dup 9) (match_dup 8)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_dup 12) (minus:XF (match_dup 10) (match_dup 9)))
+ (set (match_operand:XF 0 "register_operand" "")
+ (plus:XF (match_dup 12) (match_dup 7)))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ rtx temp;
+ int i;
+
+ for (i=2; i<13; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+ temp = standard_80387_constant_rtx (5); /* fldl2e */
+ emit_move_insn (operands[2], temp);
+ emit_move_insn (operands[9], CONST1_RTX (XFmode)); /* fld1 */
+})
+
+(define_expand "ldexpdf3"
+ [(set (match_dup 3)
+ (float_extend:XF (match_operand:DF 1 "register_operand" "")))
+ (set (match_dup 4)
+ (float:XF (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_dup 5)
+ (unspec:XF [(match_dup 3) (match_dup 4)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 6)
+ (unspec:XF [(match_dup 3) (match_dup 4)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:DF 0 "register_operand" "")
+ (float_truncate:DF (match_dup 5)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=3; i<7; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+})
+
+(define_expand "ldexpsf3"
+ [(set (match_dup 3)
+ (float_extend:XF (match_operand:SF 1 "register_operand" "")))
+ (set (match_dup 4)
+ (float:XF (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_dup 5)
+ (unspec:XF [(match_dup 3) (match_dup 4)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 6)
+ (unspec:XF [(match_dup 3) (match_dup 4)]
+ UNSPEC_FSCALE_EXP))])
+ (set (match_operand:SF 0 "register_operand" "")
+ (float_truncate:SF (match_dup 5)))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=3; i<7; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+})
+
+(define_expand "ldexpxf3"
+ [(set (match_dup 3)
+ (float:XF (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:XF 0 " register_operand" "")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "")
+ (match_dup 3)]
+ UNSPEC_FSCALE_FRACT))
+ (set (match_dup 4)
+ (unspec:XF [(match_dup 1) (match_dup 3)]
+ UNSPEC_FSCALE_EXP))])]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ int i;
+
+ for (i=3; i<5; i++)
+ operands[i] = gen_reg_rtx (XFmode);
+})
+
+
+(define_insn "frndintxf2"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "frndint"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "XF")])
+
+(define_expand "rintdf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2 (op0, op1));
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "rintsf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extendsfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2 (op0, op1));
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "rintxf2"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ emit_insn (gen_frndintxf2 (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn_and_split "*fistdi2_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,?r")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ if (memory_operand (operands[0], VOIDmode))
+ emit_insn (gen_fistdi2 (operands[0], operands[1]));
+ else
+ {
+ operands[2] = assign_386_stack_local (DImode, SLOT_TEMP);
+ emit_insn (gen_fistdi2_with_temp (operands[0], operands[1],
+ operands[2]));
+ }
+ DONE;
+}
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DI")])
+
+(define_insn "fistdi2"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST))
+ (clobber (match_scratch:XF 2 "=&1f"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DI")])
+
+(define_insn "fistdi2_with_temp"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,?r")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST))
+ (clobber (match_operand:DI 2 "memory_operand" "=m,m"))
+ (clobber (match_scratch:XF 3 "=&1f,&1f"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "#"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST))
+ (clobber (match_operand:DI 2 "memory_operand" ""))
+ (clobber (match_scratch 3 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 2) (unspec:DI [(match_dup 1)] UNSPEC_FIST))
+ (clobber (match_dup 3))])
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST))
+ (clobber (match_operand:DI 2 "memory_operand" ""))
+ (clobber (match_scratch 3 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_FIST))
+ (clobber (match_dup 3))])]
+ "")
+
+(define_insn_and_split "*fist<mode>2_1"
+ [(set (match_operand:X87MODEI12 0 "register_operand" "=r")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ operands[2] = assign_386_stack_local (<MODE>mode, SLOT_TEMP);
+ emit_insn (gen_fist<mode>2_with_temp (operands[0], operands[1],
+ operands[2]));
+ DONE;
+}
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fist<mode>2"
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "=m")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fist<mode>2_with_temp"
+ [(set (match_operand:X87MODEI12 0 "register_operand" "=r")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST))
+ (clobber (match_operand:X87MODEI12 2 "memory_operand" "=m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "#"
+ [(set_attr "type" "fpspc")
+ (set_attr "mode" "<MODE>")])
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "register_operand" "")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST))
+ (clobber (match_operand:X87MODEI12 2 "memory_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (unspec:X87MODEI12 [(match_dup 1)]
+ UNSPEC_FIST))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST))
+ (clobber (match_operand:X87MODEI12 2 "memory_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 0) (unspec:X87MODEI12 [(match_dup 1)]
+ UNSPEC_FIST))]
+ "")
+
+(define_expand "lrint<mode>2"
+ [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "")
+ (unspec:X87MODEI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "")
+
+;; Rounding mode control word calculation could clobber FLAGS_REG.
+(define_insn_and_split "frndintxf2_floor"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_FLOOR))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_FLOOR] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_FLOOR);
+
+ emit_insn (gen_frndintxf2_floor_i387 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "XF")])
+
+(define_insn "frndintxf2_floor_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fldcw\t%3\n\tfrndint\n\tfldcw\t%2"
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "XF")])
+
+(define_expand "floorxf2"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ emit_insn (gen_frndintxf2_floor (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "floordf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_floor (op0, op1));
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "floorsf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extendsfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_floor (op0, op1));
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_insn_and_split "*fist<mode>2_floor_1"
+ [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "=m,?r")
+ (unspec:X87MODEI [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST_FLOOR))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_FLOOR] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_FLOOR);
+ if (memory_operand (operands[0], VOIDmode))
+ emit_insn (gen_fist<mode>2_floor (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ {
+ operands[4] = assign_386_stack_local (<MODE>mode, SLOT_TEMP);
+ emit_insn (gen_fist<mode>2_floor_with_temp (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ }
+ DONE;
+}
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fistdi2_floor"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))
+ (clobber (match_scratch:XF 4 "=&1f"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "DI")])
+
+(define_insn "fistdi2_floor_with_temp"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,?r")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" "m,m"))
+ (use (match_operand:HI 3 "memory_operand" "m,m"))
+ (clobber (match_operand:DI 4 "memory_operand" "=m,m"))
+ (clobber (match_scratch:XF 5 "=&1f,&1f"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "#"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:DI 4 "memory_operand" ""))
+ (clobber (match_scratch 5 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 4) (unspec:DI [(match_dup 1)] UNSPEC_FIST_FLOOR))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 5))])
+ (set (match_dup 0) (match_dup 4))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:DI 4 "memory_operand" ""))
+ (clobber (match_scratch 5 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_FIST_FLOOR))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 5))])]
+ "")
+
+(define_insn "fist<mode>2_floor"
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "=m")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fist<mode>2_floor_with_temp"
+ [(set (match_operand:X87MODEI12 0 "nonimmediate_operand" "=m,?r")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" "m,m"))
+ (use (match_operand:HI 3 "memory_operand" "m,m"))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" "=m,m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "#"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "floor")
+ (set_attr "mode" "<MODE>")])
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "register_operand" "")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 4) (unspec:X87MODEI12 [(match_dup 1)]
+ UNSPEC_FIST_FLOOR))
+ (use (match_dup 2))
+ (use (match_dup 3))])
+ (set (match_dup 0) (match_dup 4))]
+ "")
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_FLOOR))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (unspec:X87MODEI12 [(match_dup 1)]
+ UNSPEC_FIST_FLOOR))
+ (use (match_dup 2))
+ (use (match_dup 3))])]
+ "")
+
+(define_expand "lfloor<mode>2"
+ [(parallel [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "")
+ (unspec:X87MODEI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_FLOOR))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "")
+
+;; Rounding mode control word calculation could clobber FLAGS_REG.
+(define_insn_and_split "frndintxf2_ceil"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_CEIL))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_CEIL] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_CEIL);
+
+ emit_insn (gen_frndintxf2_ceil_i387 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "XF")])
+
+(define_insn "frndintxf2_ceil_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_CEIL))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fldcw\t%3\n\tfrndint\n\tfldcw\t%2"
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "XF")])
+
+(define_expand "ceilxf2"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ emit_insn (gen_frndintxf2_ceil (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "ceildf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_ceil (op0, op1));
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "ceilsf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extendsfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_ceil (op0, op1));
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_insn_and_split "*fist<mode>2_ceil_1"
+ [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "=m,?r")
+ (unspec:X87MODEI [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST_CEIL))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_CEIL] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_CEIL);
+ if (memory_operand (operands[0], VOIDmode))
+ emit_insn (gen_fist<mode>2_ceil (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ {
+ operands[4] = assign_386_stack_local (<MODE>mode, SLOT_TEMP);
+ emit_insn (gen_fist<mode>2_ceil_with_temp (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ }
+ DONE;
+}
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fistdi2_ceil"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))
+ (clobber (match_scratch:XF 4 "=&1f"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "DI")])
+
+(define_insn "fistdi2_ceil_with_temp"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,?r")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" "m,m"))
+ (use (match_operand:HI 3 "memory_operand" "m,m"))
+ (clobber (match_operand:DI 4 "memory_operand" "=m,m"))
+ (clobber (match_scratch:XF 5 "=&1f,&1f"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "#"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "DI")])
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:DI 4 "memory_operand" ""))
+ (clobber (match_scratch 5 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 4) (unspec:DI [(match_dup 1)] UNSPEC_FIST_CEIL))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 5))])
+ (set (match_dup 0) (match_dup 4))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "")
+ (unspec:DI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:DI 4 "memory_operand" ""))
+ (clobber (match_scratch 5 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_FIST_CEIL))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 5))])]
+ "")
+
+(define_insn "fist<mode>2_ceil"
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "=m")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "* return output_fix_trunc (insn, operands, 0);"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "fist<mode>2_ceil_with_temp"
+ [(set (match_operand:X87MODEI12 0 "nonimmediate_operand" "=m,?r")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "f,f")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" "m,m"))
+ (use (match_operand:HI 3 "memory_operand" "m,m"))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" "=m,m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "#"
+ [(set_attr "type" "fistp")
+ (set_attr "i387_cw" "ceil")
+ (set_attr "mode" "<MODE>")])
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "register_operand" "")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 4) (unspec:X87MODEI12 [(match_dup 1)]
+ UNSPEC_FIST_CEIL))
+ (use (match_dup 2))
+ (use (match_dup 3))])
+ (set (match_dup 0) (match_dup 4))]
+ "")
+
+(define_split
+ [(set (match_operand:X87MODEI12 0 "memory_operand" "")
+ (unspec:X87MODEI12 [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_CEIL))
+ (use (match_operand:HI 2 "memory_operand" ""))
+ (use (match_operand:HI 3 "memory_operand" ""))
+ (clobber (match_operand:X87MODEI12 4 "memory_operand" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0) (unspec:X87MODEI12 [(match_dup 1)]
+ UNSPEC_FIST_CEIL))
+ (use (match_dup 2))
+ (use (match_dup 3))])]
+ "")
+
+(define_expand "lceil<mode>2"
+ [(parallel [(set (match_operand:X87MODEI 0 "nonimmediate_operand" "")
+ (unspec:X87MODEI [(match_operand:XF 1 "register_operand" "")]
+ UNSPEC_FIST_CEIL))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+ "")
+
+;; Rounding mode control word calculation could clobber FLAGS_REG.
+(define_insn_and_split "frndintxf2_trunc"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_TRUNC))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_TRUNC] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_TRUNC);
+
+ emit_insn (gen_frndintxf2_trunc_i387 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "XF")])
+
+(define_insn "frndintxf2_trunc_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_TRUNC))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fldcw\t%3\n\tfrndint\n\tfldcw\t%2"
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "trunc")
+ (set_attr "mode" "XF")])
+
+(define_expand "btruncxf2"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ emit_insn (gen_frndintxf2_trunc (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "btruncdf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_trunc (op0, op1));
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "btruncsf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extendsfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_trunc (op0, op1));
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+;; Rounding mode control word calculation could clobber FLAGS_REG.
+(define_insn_and_split "frndintxf2_mask_pm"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_MASK_PM))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ ix86_optimize_mode_switching[I387_MASK_PM] = 1;
+
+ operands[2] = assign_386_stack_local (HImode, SLOT_CW_STORED);
+ operands[3] = assign_386_stack_local (HImode, SLOT_CW_MASK_PM);
+
+ emit_insn (gen_frndintxf2_mask_pm_i387 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "mask_pm")
+ (set_attr "mode" "XF")])
+
+(define_insn "frndintxf2_mask_pm_i387"
+ [(set (match_operand:XF 0 "register_operand" "=f")
+ (unspec:XF [(match_operand:XF 1 "register_operand" "0")]
+ UNSPEC_FRNDINT_MASK_PM))
+ (use (match_operand:HI 2 "memory_operand" "m"))
+ (use (match_operand:HI 3 "memory_operand" "m"))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+ "fldcw\t%3\n\tfrndint\n\tfclex\n\tfldcw\t%2"
+ [(set_attr "type" "frndint")
+ (set_attr "i387_cw" "mask_pm")
+ (set_attr "mode" "XF")])
+
+(define_expand "nearbyintxf2"
+ [(use (match_operand:XF 0 "register_operand" ""))
+ (use (match_operand:XF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && flag_unsafe_math_optimizations"
+{
+ emit_insn (gen_frndintxf2_mask_pm (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_expand "nearbyintdf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:DF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!(TARGET_SSE2 && TARGET_SSE_MATH) || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extenddfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_mask_pm (op0, op1));
+
+ emit_insn (gen_truncxfdf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+(define_expand "nearbyintsf2"
+ [(use (match_operand:SF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "register_operand" ""))]
+ "TARGET_USE_FANCY_MATH_387
+ && (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
+ && flag_unsafe_math_optimizations"
+{
+ rtx op0 = gen_reg_rtx (XFmode);
+ rtx op1 = gen_reg_rtx (XFmode);
+
+ emit_insn (gen_extendsfxf2 (op1, operands[1]));
+ emit_insn (gen_frndintxf2_mask_pm (op0, op1));
+
+ emit_insn (gen_truncxfsf2_i387_noop (operands[0], op0));
+ DONE;
+})
+
+
+;; Block operation instructions
+
+(define_insn "cld"
+ [(set (reg:SI DIRFLAG_REG) (const_int 0))]
+ ""
+ "cld"
+ [(set_attr "type" "cld")])
+
+(define_expand "movmemsi"
+ [(use (match_operand:BLK 0 "memory_operand" ""))
+ (use (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:SI 2 "nonmemory_operand" ""))
+ (use (match_operand:SI 3 "const_int_operand" ""))]
+ "! optimize_size || TARGET_INLINE_ALL_STRINGOPS"
+{
+ if (ix86_expand_movmem (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "movmemdi"
+ [(use (match_operand:BLK 0 "memory_operand" ""))
+ (use (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:DI 2 "nonmemory_operand" ""))
+ (use (match_operand:DI 3 "const_int_operand" ""))]
+ "TARGET_64BIT"
+{
+ if (ix86_expand_movmem (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+;; Most CPUs don't like single string operations
+;; Handle this case here to simplify previous expander.
+
+(define_expand "strmov"
+ [(set (match_dup 4) (match_operand 3 "memory_operand" ""))
+ (set (match_operand 1 "memory_operand" "") (match_dup 4))
+ (parallel [(set (match_operand 0 "register_operand" "") (match_dup 5))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (match_operand 2 "register_operand" "") (match_dup 6))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+{
+ rtx adjust = GEN_INT (GET_MODE_SIZE (GET_MODE (operands[1])));
+
+ /* If .md ever supports :P for Pmode, these can be directly
+ in the pattern above. */
+ operands[5] = gen_rtx_PLUS (Pmode, operands[0], adjust);
+ operands[6] = gen_rtx_PLUS (Pmode, operands[2], adjust);
+
+ if (TARGET_SINGLE_STRINGOP || optimize_size)
+ {
+ emit_insn (gen_strmov_singleop (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[5], operands[6]));
+ DONE;
+ }
+
+ operands[4] = gen_reg_rtx (GET_MODE (operands[1]));
+})
+
+(define_expand "strmov_singleop"
+ [(parallel [(set (match_operand 1 "memory_operand" "")
+ (match_operand 3 "memory_operand" ""))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 4 "" ""))
+ (set (match_operand 2 "register_operand" "")
+ (match_operand 5 "" ""))
+ (use (reg:SI DIRFLAG_REG))])]
+ "TARGET_SINGLE_STRINGOP || optimize_size"
+ "")
+
+(define_insn "*strmovdi_rex_1"
+ [(set (mem:DI (match_operand:DI 2 "register_operand" "0"))
+ (mem:DI (match_operand:DI 3 "register_operand" "1")))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 2)
+ (const_int 8)))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (match_dup 3)
+ (const_int 8)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "movsq"
+ [(set_attr "type" "str")
+ (set_attr "mode" "DI")
+ (set_attr "memory" "both")])
+
+(define_insn "*strmovsi_1"
+ [(set (mem:SI (match_operand:SI 2 "register_operand" "0"))
+ (mem:SI (match_operand:SI 3 "register_operand" "1")))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_dup 2)
+ (const_int 4)))
+ (set (match_operand:SI 1 "register_operand" "=S")
+ (plus:SI (match_dup 3)
+ (const_int 4)))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "{movsl|movsd}"
+ [(set_attr "type" "str")
+ (set_attr "mode" "SI")
+ (set_attr "memory" "both")])
+
+(define_insn "*strmovsi_rex_1"
+ [(set (mem:SI (match_operand:DI 2 "register_operand" "0"))
+ (mem:SI (match_operand:DI 3 "register_operand" "1")))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 2)
+ (const_int 4)))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (match_dup 3)
+ (const_int 4)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "{movsl|movsd}"
+ [(set_attr "type" "str")
+ (set_attr "mode" "SI")
+ (set_attr "memory" "both")])
+
+(define_insn "*strmovhi_1"
+ [(set (mem:HI (match_operand:SI 2 "register_operand" "0"))
+ (mem:HI (match_operand:SI 3 "register_operand" "1")))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_dup 2)
+ (const_int 2)))
+ (set (match_operand:SI 1 "register_operand" "=S")
+ (plus:SI (match_dup 3)
+ (const_int 2)))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "movsw"
+ [(set_attr "type" "str")
+ (set_attr "memory" "both")
+ (set_attr "mode" "HI")])
+
+(define_insn "*strmovhi_rex_1"
+ [(set (mem:HI (match_operand:DI 2 "register_operand" "0"))
+ (mem:HI (match_operand:DI 3 "register_operand" "1")))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 2)
+ (const_int 2)))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (match_dup 3)
+ (const_int 2)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "movsw"
+ [(set_attr "type" "str")
+ (set_attr "memory" "both")
+ (set_attr "mode" "HI")])
+
+(define_insn "*strmovqi_1"
+ [(set (mem:QI (match_operand:SI 2 "register_operand" "0"))
+ (mem:QI (match_operand:SI 3 "register_operand" "1")))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_operand:SI 1 "register_operand" "=S")
+ (plus:SI (match_dup 3)
+ (const_int 1)))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "movsb"
+ [(set_attr "type" "str")
+ (set_attr "memory" "both")
+ (set_attr "mode" "QI")])
+
+(define_insn "*strmovqi_rex_1"
+ [(set (mem:QI (match_operand:DI 2 "register_operand" "0"))
+ (mem:QI (match_operand:DI 3 "register_operand" "1")))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 2)
+ (const_int 1)))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (match_dup 3)
+ (const_int 1)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "movsb"
+ [(set_attr "type" "str")
+ (set_attr "memory" "both")
+ (set_attr "mode" "QI")])
+
+(define_expand "rep_mov"
+ [(parallel [(set (match_operand 4 "register_operand" "") (const_int 0))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 5 "" ""))
+ (set (match_operand 2 "register_operand" "")
+ (match_operand 6 "" ""))
+ (set (match_operand 1 "memory_operand" "")
+ (match_operand 3 "memory_operand" ""))
+ (use (match_dup 4))
+ (use (reg:SI DIRFLAG_REG))])]
+ ""
+ "")
+
+(define_insn "*rep_movdi_rex64"
+ [(set (match_operand:DI 2 "register_operand" "=c") (const_int 0))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (ashift:DI (match_operand:DI 5 "register_operand" "2")
+ (const_int 3))
+ (match_operand:DI 3 "register_operand" "0")))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (ashift:DI (match_dup 5) (const_int 3))
+ (match_operand:DI 4 "register_operand" "1")))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 5))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT"
+ "{rep\;movsq|rep movsq}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "both")
+ (set_attr "mode" "DI")])
+
+(define_insn "*rep_movsi"
+ [(set (match_operand:SI 2 "register_operand" "=c") (const_int 0))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (ashift:SI (match_operand:SI 5 "register_operand" "2")
+ (const_int 2))
+ (match_operand:SI 3 "register_operand" "0")))
+ (set (match_operand:SI 1 "register_operand" "=S")
+ (plus:SI (ashift:SI (match_dup 5) (const_int 2))
+ (match_operand:SI 4 "register_operand" "1")))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 5))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT"
+ "{rep\;movsl|rep movsd}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "both")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rep_movsi_rex64"
+ [(set (match_operand:DI 2 "register_operand" "=c") (const_int 0))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (ashift:DI (match_operand:DI 5 "register_operand" "2")
+ (const_int 2))
+ (match_operand:DI 3 "register_operand" "0")))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (ashift:DI (match_dup 5) (const_int 2))
+ (match_operand:DI 4 "register_operand" "1")))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 5))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT"
+ "{rep\;movsl|rep movsd}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "both")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rep_movqi"
+ [(set (match_operand:SI 2 "register_operand" "=c") (const_int 0))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_operand:SI 3 "register_operand" "0")
+ (match_operand:SI 5 "register_operand" "2")))
+ (set (match_operand:SI 1 "register_operand" "=S")
+ (plus:SI (match_operand:SI 4 "register_operand" "1") (match_dup 5)))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 5))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT"
+ "{rep\;movsb|rep movsb}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "both")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rep_movqi_rex64"
+ [(set (match_operand:DI 2 "register_operand" "=c") (const_int 0))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_operand:DI 3 "register_operand" "0")
+ (match_operand:DI 5 "register_operand" "2")))
+ (set (match_operand:DI 1 "register_operand" "=S")
+ (plus:DI (match_operand:DI 4 "register_operand" "1") (match_dup 5)))
+ (set (mem:BLK (match_dup 3))
+ (mem:BLK (match_dup 4)))
+ (use (match_dup 5))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT"
+ "{rep\;movsb|rep movsb}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "both")
+ (set_attr "mode" "SI")])
+
+(define_expand "setmemsi"
+ [(use (match_operand:BLK 0 "memory_operand" ""))
+ (use (match_operand:SI 1 "nonmemory_operand" ""))
+ (use (match_operand 2 "const_int_operand" ""))
+ (use (match_operand 3 "const_int_operand" ""))]
+ ""
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (ix86_expand_clrmem (operands[0], operands[1], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "setmemdi"
+ [(use (match_operand:BLK 0 "memory_operand" ""))
+ (use (match_operand:DI 1 "nonmemory_operand" ""))
+ (use (match_operand 2 "const_int_operand" ""))
+ (use (match_operand 3 "const_int_operand" ""))]
+ "TARGET_64BIT"
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (ix86_expand_clrmem (operands[0], operands[1], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+;; Most CPUs don't like single string operations
+;; Handle this case here to simplify previous expander.
+
+(define_expand "strset"
+ [(set (match_operand 1 "memory_operand" "")
+ (match_operand 2 "register_operand" ""))
+ (parallel [(set (match_operand 0 "register_operand" "")
+ (match_dup 3))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+{
+ if (GET_MODE (operands[1]) != GET_MODE (operands[2]))
+ operands[1] = adjust_address_nv (operands[1], GET_MODE (operands[2]), 0);
+
+ /* If .md ever supports :P for Pmode, this can be directly
+ in the pattern above. */
+ operands[3] = gen_rtx_PLUS (Pmode, operands[0],
+ GEN_INT (GET_MODE_SIZE (GET_MODE
+ (operands[2]))));
+ if (TARGET_SINGLE_STRINGOP || optimize_size)
+ {
+ emit_insn (gen_strset_singleop (operands[0], operands[1], operands[2],
+ operands[3]));
+ DONE;
+ }
+})
+
+(define_expand "strset_singleop"
+ [(parallel [(set (match_operand 1 "memory_operand" "")
+ (match_operand 2 "register_operand" ""))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 3 "" ""))
+ (use (reg:SI DIRFLAG_REG))])]
+ "TARGET_SINGLE_STRINGOP || optimize_size"
+ "")
+
+(define_insn "*strsetdi_rex_1"
+ [(set (mem:DI (match_operand:DI 1 "register_operand" "0"))
+ (match_operand:DI 2 "register_operand" "a"))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 1)
+ (const_int 8)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "stosq"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "DI")])
+
+(define_insn "*strsetsi_1"
+ [(set (mem:SI (match_operand:SI 1 "register_operand" "0"))
+ (match_operand:SI 2 "register_operand" "a"))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_dup 1)
+ (const_int 4)))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "{stosl|stosd}"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "SI")])
+
+(define_insn "*strsetsi_rex_1"
+ [(set (mem:SI (match_operand:DI 1 "register_operand" "0"))
+ (match_operand:SI 2 "register_operand" "a"))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 1)
+ (const_int 4)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "{stosl|stosd}"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "SI")])
+
+(define_insn "*strsethi_1"
+ [(set (mem:HI (match_operand:SI 1 "register_operand" "0"))
+ (match_operand:HI 2 "register_operand" "a"))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_dup 1)
+ (const_int 2)))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "stosw"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "HI")])
+
+(define_insn "*strsethi_rex_1"
+ [(set (mem:HI (match_operand:DI 1 "register_operand" "0"))
+ (match_operand:HI 2 "register_operand" "a"))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 1)
+ (const_int 2)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "stosw"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "HI")])
+
+(define_insn "*strsetqi_1"
+ [(set (mem:QI (match_operand:SI 1 "register_operand" "0"))
+ (match_operand:QI 2 "register_operand" "a"))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_dup 1)
+ (const_int 1)))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "stosb"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "QI")])
+
+(define_insn "*strsetqi_rex_1"
+ [(set (mem:QI (match_operand:DI 1 "register_operand" "0"))
+ (match_operand:QI 2 "register_operand" "a"))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_dup 1)
+ (const_int 1)))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size)"
+ "stosb"
+ [(set_attr "type" "str")
+ (set_attr "memory" "store")
+ (set_attr "mode" "QI")])
+
+(define_expand "rep_stos"
+ [(parallel [(set (match_operand 1 "register_operand" "") (const_int 0))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 4 "" ""))
+ (set (match_operand 2 "memory_operand" "") (const_int 0))
+ (use (match_operand 3 "register_operand" ""))
+ (use (match_dup 1))
+ (use (reg:SI DIRFLAG_REG))])]
+ ""
+ "")
+
+(define_insn "*rep_stosdi_rex64"
+ [(set (match_operand:DI 1 "register_operand" "=c") (const_int 0))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (ashift:DI (match_operand:DI 4 "register_operand" "1")
+ (const_int 3))
+ (match_operand:DI 3 "register_operand" "0")))
+ (set (mem:BLK (match_dup 3))
+ (const_int 0))
+ (use (match_operand:DI 2 "register_operand" "a"))
+ (use (match_dup 4))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT"
+ "{rep\;stosq|rep stosq}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "store")
+ (set_attr "mode" "DI")])
+
+(define_insn "*rep_stossi"
+ [(set (match_operand:SI 1 "register_operand" "=c") (const_int 0))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (ashift:SI (match_operand:SI 4 "register_operand" "1")
+ (const_int 2))
+ (match_operand:SI 3 "register_operand" "0")))
+ (set (mem:BLK (match_dup 3))
+ (const_int 0))
+ (use (match_operand:SI 2 "register_operand" "a"))
+ (use (match_dup 4))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT"
+ "{rep\;stosl|rep stosd}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "store")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rep_stossi_rex64"
+ [(set (match_operand:DI 1 "register_operand" "=c") (const_int 0))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (ashift:DI (match_operand:DI 4 "register_operand" "1")
+ (const_int 2))
+ (match_operand:DI 3 "register_operand" "0")))
+ (set (mem:BLK (match_dup 3))
+ (const_int 0))
+ (use (match_operand:SI 2 "register_operand" "a"))
+ (use (match_dup 4))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT"
+ "{rep\;stosl|rep stosd}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "store")
+ (set_attr "mode" "SI")])
+
+(define_insn "*rep_stosqi"
+ [(set (match_operand:SI 1 "register_operand" "=c") (const_int 0))
+ (set (match_operand:SI 0 "register_operand" "=D")
+ (plus:SI (match_operand:SI 3 "register_operand" "0")
+ (match_operand:SI 4 "register_operand" "1")))
+ (set (mem:BLK (match_dup 3))
+ (const_int 0))
+ (use (match_operand:QI 2 "register_operand" "a"))
+ (use (match_dup 4))
+ (use (reg:SI DIRFLAG_REG))]
+ "!TARGET_64BIT"
+ "{rep\;stosb|rep stosb}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "store")
+ (set_attr "mode" "QI")])
+
+(define_insn "*rep_stosqi_rex64"
+ [(set (match_operand:DI 1 "register_operand" "=c") (const_int 0))
+ (set (match_operand:DI 0 "register_operand" "=D")
+ (plus:DI (match_operand:DI 3 "register_operand" "0")
+ (match_operand:DI 4 "register_operand" "1")))
+ (set (mem:BLK (match_dup 3))
+ (const_int 0))
+ (use (match_operand:QI 2 "register_operand" "a"))
+ (use (match_dup 4))
+ (use (reg:SI DIRFLAG_REG))]
+ "TARGET_64BIT"
+ "{rep\;stosb|rep stosb}"
+ [(set_attr "type" "str")
+ (set_attr "prefix_rep" "1")
+ (set_attr "memory" "store")
+ (set_attr "mode" "QI")])
+
+(define_expand "cmpstrnsi"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (compare:SI (match_operand:BLK 1 "general_operand" "")
+ (match_operand:BLK 2 "general_operand" "")))
+ (use (match_operand 3 "general_operand" ""))
+ (use (match_operand 4 "immediate_operand" ""))]
+ /* APPLE LOCAL x86_64 disable inline expansion for memcmp until 4436760 is fixed */
+ "(! optimize_size || TARGET_INLINE_ALL_STRINGOPS) && !TARGET_64BIT"
+{
+ /* APPLE LOCAL begin 4134111 */
+ rtx addr1, addr2, out, outlow, count, countreg, align, scratch;
+
+ /* Can't use this if the user has appropriated esi or edi. */
+ if (global_regs[4] || global_regs[5])
+ FAIL;
+
+ /* The Darwin expansion is unsafe on volatile objects. */
+ if (TARGET_MACHO
+ && (MEM_VOLATILE_P (operands[1]) || MEM_VOLATILE_P (operands[2])))
+ FAIL;
+
+ /* APPLE LOCAL begin 4134510 */
+ if (TARGET_MACHO)
+ {
+ count = operands[3];
+ if (GET_CODE (count) != CONST_INT || INTVAL (count) > 30)
+ FAIL;
+ }
+ /* APPLE LOCAL end 4134510 */
+
+ if (TARGET_MACHO)
+ scratch = gen_reg_rtx (SImode);
+
+ out = operands[0];
+ if (GET_CODE (out) != REG)
+ out = gen_reg_rtx (SImode);
+
+ addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+ addr2 = copy_to_mode_reg (Pmode, XEXP (operands[2], 0));
+ if (addr1 != XEXP (operands[1], 0))
+ operands[1] = replace_equiv_address_nv (operands[1], addr1);
+ if (addr2 != XEXP (operands[2], 0))
+ operands[2] = replace_equiv_address_nv (operands[2], addr2);
+
+ count = operands[3];
+ countreg = ix86_zero_extend_to_Pmode (count);
+
+ /* %%% Iff we are testing strict equality, we can use known alignment
+ to good advantage. This may be possible with combine, particularly
+ once cc0 is dead. */
+ align = operands[4];
+
+ emit_insn (gen_cld ());
+ if (GET_CODE (count) == CONST_INT)
+ {
+ if (INTVAL (count) == 0)
+ {
+ emit_move_insn (operands[0], const0_rtx);
+ DONE;
+ }
+ if (!TARGET_MACHO)
+ emit_insn (gen_cmpstrnqi_nz_1 (addr1, addr2, countreg, align,
+ operands[1], operands[2]));
+ else
+ emit_insn (gen_darwin_cmpstrnqi_nz_1 (out, addr1, addr2, countreg, align,
+ operands[1], operands[2], scratch));
+ }
+ else
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_cmpdi_1_rex64 (countreg, countreg));
+ else
+ emit_insn (gen_cmpsi_1 (countreg, countreg));
+ if (!TARGET_MACHO)
+ emit_insn (gen_cmpstrnqi_1 (addr1, addr2, countreg, align,
+ operands[1], operands[2]));
+ else
+ emit_insn (gen_darwin_cmpstrqi_1 (out, addr1, addr2, countreg, align,
+ operands[1], operands[2], scratch));
+ }
+
+ if (!TARGET_MACHO)
+ {
+ outlow = gen_lowpart (QImode, out);
+ emit_insn (gen_cmpintqi (outlow));
+ emit_move_insn (out, gen_rtx_SIGN_EXTEND (SImode, outlow));
+ }
+ /* APPLE LOCAL end 4134111 */
+
+ if (operands[0] != out)
+ emit_move_insn (operands[0], out);
+
+ DONE;
+})
+
+;; Produce a tri-state integer (-1, 0, 1) from condition codes.
+
+(define_expand "cmpintqi"
+ [(set (match_dup 1)
+ (gtu:QI (reg:CC FLAGS_REG) (const_int 0)))
+ (set (match_dup 2)
+ (ltu:QI (reg:CC FLAGS_REG) (const_int 0)))
+ (parallel [(set (match_operand:QI 0 "register_operand" "")
+ (minus:QI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "operands[1] = gen_reg_rtx (QImode);
+ operands[2] = gen_reg_rtx (QImode);")
+
+;; memcmp recognizers. The `cmpsb' opcode does nothing if the count is
+;; zero. Emit extra code to make sure that a zero-length compare is EQ.
+
+(define_expand "cmpstrnqi_nz_1"
+ [(parallel [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand 4 "memory_operand" "")
+ (match_operand 5 "memory_operand" "")))
+ (use (match_operand 2 "register_operand" ""))
+ (use (match_operand:SI 3 "immediate_operand" ""))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand 0 "register_operand" ""))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (match_dup 2))])]
+ ""
+ "")
+
+(define_insn "*cmpstrnqi_nz_1"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (mem:BLK (match_operand:SI 4 "register_operand" "0"))
+ (mem:BLK (match_operand:SI 5 "register_operand" "1"))))
+ (use (match_operand:SI 6 "register_operand" "2"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:SI 0 "register_operand" "=S"))
+ (clobber (match_operand:SI 1 "register_operand" "=D"))
+ (clobber (match_operand:SI 2 "register_operand" "=c"))]
+ "!TARGET_64BIT"
+ "repz{\;| }cmpsb"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+(define_insn "*cmpstrnqi_nz_rex_1"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (mem:BLK (match_operand:DI 4 "register_operand" "0"))
+ (mem:BLK (match_operand:DI 5 "register_operand" "1"))))
+ (use (match_operand:DI 6 "register_operand" "2"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:DI 0 "register_operand" "=S"))
+ (clobber (match_operand:DI 1 "register_operand" "=D"))
+ (clobber (match_operand:DI 2 "register_operand" "=c"))]
+ "TARGET_64BIT"
+ "repz{\;| }cmpsb"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+;; The same, but the count is not known to not be zero.
+
+(define_expand "cmpstrnqi_1"
+ [(parallel [(set (reg:CC FLAGS_REG)
+ (if_then_else:CC (ne (match_operand 2 "register_operand" "")
+ (const_int 0))
+ (compare:CC (match_operand 4 "memory_operand" "")
+ (match_operand 5 "memory_operand" ""))
+ (const_int 0)))
+ (use (match_operand:SI 3 "immediate_operand" ""))
+ (use (reg:CC FLAGS_REG))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand 0 "register_operand" ""))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (match_dup 2))])]
+ ""
+ "")
+
+(define_insn "*cmpstrnqi_1"
+ [(set (reg:CC FLAGS_REG)
+ (if_then_else:CC (ne (match_operand:SI 6 "register_operand" "2")
+ (const_int 0))
+ (compare:CC (mem:BLK (match_operand:SI 4 "register_operand" "0"))
+ (mem:BLK (match_operand:SI 5 "register_operand" "1")))
+ (const_int 0)))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (use (reg:CC FLAGS_REG))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:SI 0 "register_operand" "=S"))
+ (clobber (match_operand:SI 1 "register_operand" "=D"))
+ (clobber (match_operand:SI 2 "register_operand" "=c"))]
+ "!TARGET_64BIT"
+ "repz{\;| }cmpsb"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+(define_insn "*cmpstrnqi_rex_1"
+ [(set (reg:CC FLAGS_REG)
+ (if_then_else:CC (ne (match_operand:DI 6 "register_operand" "2")
+ (const_int 0))
+ (compare:CC (mem:BLK (match_operand:DI 4 "register_operand" "0"))
+ (mem:BLK (match_operand:DI 5 "register_operand" "1")))
+ (const_int 0)))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (use (reg:CC FLAGS_REG))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:DI 0 "register_operand" "=S"))
+ (clobber (match_operand:DI 1 "register_operand" "=D"))
+ (clobber (match_operand:DI 2 "register_operand" "=c"))]
+ "TARGET_64BIT"
+ "repz{\;| }cmpsb"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+;; APPLE LOCAL begin 4134111
+;; Darwin's memcmp returns the difference of the last 2 bytes compared,
+;; not -1/0/1. Unfortunately we must reload those bytes to get the
+;; result, as they aren't sitting around anywhere. This is still
+;; faster than calling libc though.
+
+(define_expand "darwin_cmpstrnqi_nz_1"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (subreg:SI (compare:CC (match_operand 5 "memory_operand" "")
+ (match_operand 6 "memory_operand" "")) 0))
+ (use (match_operand 3 "register_operand" ""))
+ (use (match_operand:SI 4 "immediate_operand" ""))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (match_operand 2 "register_operand" ""))
+ (clobber (match_operand 7 "register_operand" ""))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_dup 3))])]
+ ""
+ "")
+
+(define_insn "*darwin_cmpstrnqi_nz_1"
+;; APPLE LOCAL 5379188
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (subreg:SI (compare:CC (mem:BLK (match_operand:SI 5 "register_operand" "1"))
+ (mem:BLK (match_operand:SI 6 "register_operand" "2"))) 0))
+ (use (match_operand:SI 8 "register_operand" "3"))
+ (use (match_operand:SI 4 "immediate_operand" "i"))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:SI 1 "register_operand" "=S"))
+ (clobber (match_operand:SI 2 "register_operand" "=D"))
+ (clobber (match_operand:SI 7 "register_operand" "=r"))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_operand:SI 3 "register_operand" "=c"))]
+ "!TARGET_64BIT"
+ "repz{\;| }cmpsb\n\tmov\t$0, %0\n\tje\t0f\n\tmovzbl\t-1(%5), %0\n\tmovzbl\t-1(%6), %8\n\tsubl\t%8,%0\n0:"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+(define_expand "darwin_cmpstrqi_1"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (subreg:SI (if_then_else:CC (ne (match_operand 3 "register_operand" "")
+ (const_int 0))
+ (compare:CC (match_operand 5 "memory_operand" "")
+ (match_operand 6 "memory_operand" ""))
+ (const_int 0)) 0))
+ (use (match_operand:SI 4 "immediate_operand" ""))
+ (use (reg:SI DIRFLAG_REG))
+ (use (reg:CC FLAGS_REG))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (match_operand 2 "register_operand" ""))
+ (clobber (match_operand 7 "register_operand" ""))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_dup 3))])]
+ ""
+ "")
+
+(define_insn "*darwin_cmpstrqi_1"
+;; APPLE LOCAL 5379188
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (subreg:SI (if_then_else:CC (ne (match_operand:SI 8 "register_operand" "3")
+ (const_int 0))
+ (compare:CC (mem:BLK (match_operand:SI 5 "register_operand" "1"))
+ (mem:BLK (match_operand:SI 6 "register_operand" "2")))
+ (const_int 0)) 0))
+ (use (match_operand:SI 4 "immediate_operand" "i"))
+ (use (reg:SI DIRFLAG_REG))
+ (use (reg:CC FLAGS_REG))
+ (clobber (match_operand:SI 1 "register_operand" "=S"))
+ (clobber (match_operand:SI 2 "register_operand" "=D"))
+ (clobber (match_operand:SI 7 "register_operand" "=r"))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (match_operand:SI 3 "register_operand" "=c"))]
+ "!TARGET_64BIT"
+ "repz{\;| }cmpsb\n\tmov\t$0, %0\n\tje\t0f\n\tmovzbl\t-1(%5), %0\n\tmovzbl\t-1(%6), %8\n\tsubl\t%8,%0\n0:"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+;; APPLE LOCAL end 4134111
+
+(define_expand "strlensi"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(match_operand:BLK 1 "general_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")
+ (match_operand 3 "immediate_operand" "")] UNSPEC_SCAS))]
+ ""
+{
+ if (ix86_expand_strlen (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "strlendi"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (unspec:DI [(match_operand:BLK 1 "general_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")
+ (match_operand 3 "immediate_operand" "")] UNSPEC_SCAS))]
+ ""
+{
+ if (ix86_expand_strlen (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "strlenqi_1"
+ [(parallel [(set (match_operand 0 "register_operand" "") (match_operand 2 "" ""))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "*strlenqi_1"
+ [(set (match_operand:SI 0 "register_operand" "=&c")
+ (unspec:SI [(mem:BLK (match_operand:SI 5 "register_operand" "1"))
+ (match_operand:QI 2 "register_operand" "a")
+ (match_operand:SI 3 "immediate_operand" "i")
+ (match_operand:SI 4 "register_operand" "0")] UNSPEC_SCAS))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:SI 1 "register_operand" "=D"))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT"
+ "repnz{\;| }scasb"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+(define_insn "*strlenqi_rex_1"
+ [(set (match_operand:DI 0 "register_operand" "=&c")
+ (unspec:DI [(mem:BLK (match_operand:DI 5 "register_operand" "1"))
+ (match_operand:QI 2 "register_operand" "a")
+ (match_operand:DI 3 "immediate_operand" "i")
+ (match_operand:DI 4 "register_operand" "0")] UNSPEC_SCAS))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand:DI 1 "register_operand" "=D"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "repnz{\;| }scasb"
+ [(set_attr "type" "str")
+ (set_attr "mode" "QI")
+ (set_attr "prefix_rep" "1")])
+
+;; Peephole optimizations to clean up after cmpstrn*. This should be
+;; handled in combine, but it is not currently up to the task.
+;; When used for their truth value, the cmpstrn* expanders generate
+;; code like this:
+;;
+;; repz cmpsb
+;; seta %al
+;; setb %dl
+;; cmpb %al, %dl
+;; jcc label
+;;
+;; The intermediate three instructions are unnecessary.
+
+;; This one handles cmpstrn*_nz_1...
+(define_peephole2
+ [(parallel[
+ (set (reg:CC FLAGS_REG)
+ (compare:CC (mem:BLK (match_operand 4 "register_operand" ""))
+ (mem:BLK (match_operand 5 "register_operand" ""))))
+ (use (match_operand 6 "register_operand" ""))
+ (use (match_operand:SI 3 "immediate_operand" ""))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand 0 "register_operand" ""))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (match_operand 2 "register_operand" ""))])
+ (set (match_operand:QI 7 "register_operand" "")
+ (gtu:QI (reg:CC FLAGS_REG) (const_int 0)))
+ (set (match_operand:QI 8 "register_operand" "")
+ (ltu:QI (reg:CC FLAGS_REG) (const_int 0)))
+ (set (reg FLAGS_REG)
+ (compare (match_dup 7) (match_dup 8)))
+ ]
+ "peep2_reg_dead_p (4, operands[7]) && peep2_reg_dead_p (4, operands[8])"
+ [(parallel[
+ (set (reg:CC FLAGS_REG)
+ (compare:CC (mem:BLK (match_dup 4))
+ (mem:BLK (match_dup 5))))
+ (use (match_dup 6))
+ (use (match_dup 3))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_dup 0))
+ (clobber (match_dup 1))
+ (clobber (match_dup 2))])]
+ "")
+
+;; ...and this one handles cmpstrn*_1.
+(define_peephole2
+ [(parallel[
+ (set (reg:CC FLAGS_REG)
+ (if_then_else:CC (ne (match_operand 6 "register_operand" "")
+ (const_int 0))
+ (compare:CC (mem:BLK (match_operand 4 "register_operand" ""))
+ (mem:BLK (match_operand 5 "register_operand" "")))
+ (const_int 0)))
+ (use (match_operand:SI 3 "immediate_operand" ""))
+ (use (reg:CC FLAGS_REG))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_operand 0 "register_operand" ""))
+ (clobber (match_operand 1 "register_operand" ""))
+ (clobber (match_operand 2 "register_operand" ""))])
+ (set (match_operand:QI 7 "register_operand" "")
+ (gtu:QI (reg:CC FLAGS_REG) (const_int 0)))
+ (set (match_operand:QI 8 "register_operand" "")
+ (ltu:QI (reg:CC FLAGS_REG) (const_int 0)))
+ (set (reg FLAGS_REG)
+ (compare (match_dup 7) (match_dup 8)))
+ ]
+ "peep2_reg_dead_p (4, operands[7]) && peep2_reg_dead_p (4, operands[8])"
+ [(parallel[
+ (set (reg:CC FLAGS_REG)
+ (if_then_else:CC (ne (match_dup 6)
+ (const_int 0))
+ (compare:CC (mem:BLK (match_dup 4))
+ (mem:BLK (match_dup 5)))
+ (const_int 0)))
+ (use (match_dup 3))
+ (use (reg:CC FLAGS_REG))
+ (use (reg:SI DIRFLAG_REG))
+ (clobber (match_dup 0))
+ (clobber (match_dup 1))
+ (clobber (match_dup 2))])]
+ "")
+
+
+
+;; Conditional move instructions.
+
+(define_expand "movdicc"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
+ (match_operand:DI 2 "general_operand" "")
+ (match_operand:DI 3 "general_operand" "")))]
+ "TARGET_64BIT"
+ "if (!ix86_expand_int_movcc (operands)) FAIL; DONE;")
+
+(define_insn "x86_movdicc_0_m1_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (if_then_else:DI (match_operand 1 "ix86_carry_flag_operator" "")
+ (const_int -1)
+ (const_int 0)))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "sbb{q}\t%0, %0"
+ ; Since we don't have the proper number of operands for an alu insn,
+ ; fill in all the blanks.
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "memory" "none")
+ (set_attr "imm_disp" "false")
+ (set_attr "mode" "DI")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*movdicc_c_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:DI 2 "nonimmediate_operand" "rm,0")
+ (match_operand:DI 3 "nonimmediate_operand" "0,rm")))]
+ "TARGET_64BIT && TARGET_CMOVE
+ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)"
+ "@
+ cmov%O2%C1\t{%2, %0|%0, %2}
+ cmov%O2%c1\t{%3, %0|%0, %3}"
+ [(set_attr "type" "icmov")
+ (set_attr "mode" "DI")])
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand:SI 3 "general_operand" "")))]
+ ""
+ "if (!ix86_expand_int_movcc (operands)) FAIL; DONE;")
+
+;; Data flow gets confused by our desire for `sbbl reg,reg', and clearing
+;; the register first winds up with `sbbl $0,reg', which is also weird.
+;; So just document what we're doing explicitly.
+
+(define_insn "x86_movsicc_0_m1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI (match_operand 1 "ix86_carry_flag_operator" "")
+ (const_int -1)
+ (const_int 0)))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "sbb{l}\t%0, %0"
+ ; Since we don't have the proper number of operands for an alu insn,
+ ; fill in all the blanks.
+ [(set_attr "type" "alu")
+ (set_attr "pent_pair" "pu")
+ (set_attr "memory" "none")
+ (set_attr "imm_disp" "false")
+ (set_attr "mode" "SI")
+ (set_attr "length_immediate" "0")])
+
+(define_insn "*movsicc_noc"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:SI 2 "nonimmediate_operand" "rm,0")
+ (match_operand:SI 3 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE
+ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)"
+ "@
+ cmov%O2%C1\t{%2, %0|%0, %2}
+ cmov%O2%c1\t{%3, %0|%0, %3}"
+ [(set_attr "type" "icmov")
+ (set_attr "mode" "SI")])
+
+(define_expand "movhicc"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (if_then_else:HI (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "general_operand" "")
+ (match_operand:HI 3 "general_operand" "")))]
+ "TARGET_HIMODE_MATH"
+ "if (!ix86_expand_int_movcc (operands)) FAIL; DONE;")
+
+(define_insn "*movhicc_noc"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "ix86_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:HI 2 "nonimmediate_operand" "rm,0")
+ (match_operand:HI 3 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE
+ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)"
+ "@
+ cmov%O2%C1\t{%2, %0|%0, %2}
+ cmov%O2%c1\t{%3, %0|%0, %3}"
+ [(set_attr "type" "icmov")
+ (set_attr "mode" "HI")])
+
+(define_expand "movqicc"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (if_then_else:QI (match_operand 1 "comparison_operator" "")
+ (match_operand:QI 2 "general_operand" "")
+ (match_operand:QI 3 "general_operand" "")))]
+ "TARGET_QIMODE_MATH"
+ "if (!ix86_expand_int_movcc (operands)) FAIL; DONE;")
+
+(define_insn_and_split "*movqicc_noc"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI (match_operator 1 "ix86_comparison_operator"
+ [(match_operand 4 "flags_reg_operand" "")
+ (const_int 0)])
+ (match_operand:QI 2 "register_operand" "r,0")
+ (match_operand:QI 3 "register_operand" "0,r")))]
+ "TARGET_CMOVE && !TARGET_PARTIAL_REG_STALL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 4) (const_int 0)])
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ operands[3] = gen_lowpart (SImode, operands[3]);"
+ [(set_attr "type" "icmov")
+ (set_attr "mode" "SI")])
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))]
+ "(TARGET_80387 && TARGET_CMOVE) || TARGET_SSE_MATH"
+ "if (! ix86_expand_fp_movcc (operands)) FAIL; DONE;")
+
+(define_insn "*movsfcc_1_387"
+ [(set (match_operand:SF 0 "register_operand" "=f,f,r,r")
+ (if_then_else:SF (match_operator 1 "fcmov_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:SF 2 "nonimmediate_operand" "f,0,rm,0")
+ (match_operand:SF 3 "nonimmediate_operand" "0,f,0,rm")))]
+ "TARGET_80387 && TARGET_CMOVE
+ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)"
+ "@
+ fcmov%F1\t{%2, %0|%0, %2}
+ fcmov%f1\t{%3, %0|%0, %3}
+ cmov%O2%C1\t{%2, %0|%0, %2}
+ cmov%O2%c1\t{%3, %0|%0, %3}"
+ [(set_attr "type" "fcmov,fcmov,icmov,icmov")
+ (set_attr "mode" "SF,SF,SI,SI")])
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")))]
+ "(TARGET_80387 && TARGET_CMOVE) || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "if (! ix86_expand_fp_movcc (operands)) FAIL; DONE;")
+
+(define_insn "*movdfcc_1"
+ [(set (match_operand:DF 0 "register_operand" "=f,f,&r,&r")
+ (if_then_else:DF (match_operator 1 "fcmov_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:DF 2 "nonimmediate_operand" "f,0,rm,0")
+ (match_operand:DF 3 "nonimmediate_operand" "0,f,0,rm")))]
+ "!TARGET_64BIT && TARGET_80387 && TARGET_CMOVE
+ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)"
+ "@
+ fcmov%F1\t{%2, %0|%0, %2}
+ fcmov%f1\t{%3, %0|%0, %3}
+ #
+ #"
+ [(set_attr "type" "fcmov,fcmov,multi,multi")
+ (set_attr "mode" "DF")])
+
+(define_insn "*movdfcc_1_rex64"
+ [(set (match_operand:DF 0 "register_operand" "=f,f,r,r")
+ (if_then_else:DF (match_operator 1 "fcmov_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:DF 2 "nonimmediate_operand" "f,0,rm,0")
+ (match_operand:DF 3 "nonimmediate_operand" "0,f,0,rm")))]
+ "TARGET_64BIT && TARGET_80387 && TARGET_CMOVE
+ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)"
+ "@
+ fcmov%F1\t{%2, %0|%0, %2}
+ fcmov%f1\t{%3, %0|%0, %3}
+ cmov%O2%C1\t{%2, %0|%0, %2}
+ cmov%O2%c1\t{%3, %0|%0, %3}"
+ [(set_attr "type" "fcmov,fcmov,icmov,icmov")
+ (set_attr "mode" "DF")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_and_not_any_fp_reg_operand" "")
+ (if_then_else:DF (match_operator 1 "fcmov_comparison_operator"
+ [(match_operand 4 "flags_reg_operand" "")
+ (const_int 0)])
+ (match_operand:DF 2 "nonimmediate_operand" "")
+ (match_operand:DF 3 "nonimmediate_operand" "")))]
+ "!TARGET_64BIT && reload_completed"
+ [(set (match_dup 2)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 4) (const_int 0)])
+ (match_dup 5)
+ (match_dup 7)))
+ (set (match_dup 3)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 4) (const_int 0)])
+ (match_dup 6)
+ (match_dup 8)))]
+ "split_di (operands+2, 1, operands+5, operands+6);
+ split_di (operands+3, 1, operands+7, operands+8);
+ split_di (operands, 1, operands+2, operands+3);")
+
+(define_expand "movxfcc"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (if_then_else:XF (match_operand 1 "comparison_operator" "")
+ (match_operand:XF 2 "register_operand" "")
+ (match_operand:XF 3 "register_operand" "")))]
+ "TARGET_80387 && TARGET_CMOVE"
+ "if (! ix86_expand_fp_movcc (operands)) FAIL; DONE;")
+
+(define_insn "*movxfcc_1"
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (if_then_else:XF (match_operator 1 "fcmov_comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand:XF 2 "register_operand" "f,0")
+ (match_operand:XF 3 "register_operand" "0,f")))]
+ "TARGET_80387 && TARGET_CMOVE"
+ "@
+ fcmov%F1\t{%2, %0|%0, %2}
+ fcmov%f1\t{%3, %0|%0, %3}"
+ [(set_attr "type" "fcmov")
+ (set_attr "mode" "XF")])
+
+;; These versions of the min/max patterns are intentionally ignorant of
+;; their behavior wrt -0.0 and NaN (via the commutative operand mark).
+;; Since both the tree-level MAX_EXPR and the rtl-level SMAX operator
+;; are undefined in this condition, we're certain this is correct.
+
+(define_insn "sminsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (smin:SF (match_operand:SF 1 "nonimmediate_operand" "%0")
+ (match_operand:SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE_MATH"
+ "minss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "SF")])
+
+(define_insn "smaxsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (smax:SF (match_operand:SF 1 "nonimmediate_operand" "%0")
+ (match_operand:SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE_MATH"
+ "maxss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "SF")])
+
+(define_insn "smindf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (smin:DF (match_operand:DF 1 "nonimmediate_operand" "%0")
+ (match_operand:DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "minsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+(define_insn "smaxdf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (smax:DF (match_operand:DF 1 "nonimmediate_operand" "%0")
+ (match_operand:DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "maxsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+;; These versions of the min/max patterns implement exactly the operations
+;; min = (op1 < op2 ? op1 : op2)
+;; max = (!(op1 < op2) ? op1 : op2)
+;; Their operands are not commutative, and thus they may be used in the
+;; presence of -0.0 and NaN.
+
+(define_insn "*ieee_sminsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MIN))]
+ "TARGET_SSE_MATH"
+ "minss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "SF")])
+
+(define_insn "*ieee_smaxsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MAX))]
+ "TARGET_SSE_MATH"
+ "maxss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "SF")])
+
+(define_insn "*ieee_smindf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MIN))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "minsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+(define_insn "*ieee_smaxdf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MAX))]
+ "TARGET_SSE2 && TARGET_SSE_MATH"
+ "maxsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+;; Make two stack loads independent:
+;; fld aa fld aa
+;; fld %st(0) -> fld bb
+;; fmul bb fmul %st(1), %st
+;;
+;; Actually we only match the last two instructions for simplicity.
+(define_peephole2
+ [(set (match_operand 0 "fp_register_operand" "")
+ (match_operand 1 "fp_register_operand" ""))
+ (set (match_dup 0)
+ (match_operator 2 "binary_fp_operator"
+ [(match_dup 0)
+ (match_operand 3 "memory_operand" "")]))]
+ "REGNO (operands[0]) != REGNO (operands[1])"
+ [(set (match_dup 0) (match_dup 3))
+ (set (match_dup 0) (match_dup 4))]
+
+ ;; The % modifier is not operational anymore in peephole2's, so we have to
+ ;; swap the operands manually in the case of addition and multiplication.
+ "if (COMMUTATIVE_ARITH_P (operands[2]))
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[2]), GET_MODE (operands[2]),
+ operands[0], operands[1]);
+ else
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[2]), GET_MODE (operands[2]),
+ operands[1], operands[0]);")
+
+;; Conditional addition patterns
+(define_expand "addqicc"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 3 "const_int_operand" "")]
+ ""
+ "if (!ix86_expand_int_addcc (operands)) FAIL; DONE;")
+
+(define_expand "addhicc"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "register_operand" "")
+ (match_operand:HI 3 "const_int_operand" "")]
+ ""
+ "if (!ix86_expand_int_addcc (operands)) FAIL; DONE;")
+
+(define_expand "addsicc"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "if (!ix86_expand_int_addcc (operands)) FAIL; DONE;")
+
+(define_expand "adddicc"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:DI 2 "register_operand" "")
+ (match_operand:DI 3 "const_int_operand" "")]
+ "TARGET_64BIT"
+ "if (!ix86_expand_int_addcc (operands)) FAIL; DONE;")
+
+
+;; Misc patterns (?)
+
+;; This pattern exists to put a dependency on all ebp-based memory accesses.
+;; Otherwise there will be nothing to keep
+;;
+;; [(set (reg ebp) (reg esp))]
+;; [(set (reg esp) (plus (reg esp) (const_int -160000)))
+;; (clobber (eflags)]
+;; [(set (mem (plus (reg ebp) (const_int -160000))) (const_int 0))]
+;;
+;; in proper program order.
+(define_insn "pro_epilogue_adjust_stack_1"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "0,r")
+ (match_operand:SI 2 "immediate_operand" "i,i")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))]
+ "!TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOV:
+ return "mov{l}\t{%1, %0|%0, %1}";
+
+ case TYPE_ALU:
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{l}\t{%2, %0|%0, %2}";
+ }
+ return "add{l}\t{%2, %0|%0, %2}";
+
+ case TYPE_LEA:
+ operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ return "lea{l}\t{%a2, %0|%0, %a2}";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "0")
+ (const_string "alu")
+ (match_operand:SI 2 "const0_operand" "")
+ (const_string "imov")
+ ]
+ (const_string "lea")))
+ (set_attr "mode" "SI")])
+
+(define_insn "pro_epilogue_adjust_stack_rex64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "0,r")
+ (match_operand:DI 2 "x86_64_immediate_operand" "e,e")))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))]
+ "TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_IMOV:
+ return "mov{q}\t{%1, %0|%0, %1}";
+
+ case TYPE_ALU:
+ if (GET_CODE (operands[2]) == CONST_INT
+ /* Avoid overflows. */
+ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1)))
+ && (INTVAL (operands[2]) == 128
+ || (INTVAL (operands[2]) < 0
+ && INTVAL (operands[2]) != -128)))
+ {
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ return "sub{q}\t{%2, %0|%0, %2}";
+ }
+ return "add{q}\t{%2, %0|%0, %2}";
+
+ case TYPE_LEA:
+ operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ return "lea{q}\t{%a2, %0|%0, %a2}";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "0")
+ (const_string "alu")
+ (match_operand:DI 2 "const0_operand" "")
+ (const_string "imov")
+ ]
+ (const_string "lea")))
+ (set_attr "mode" "DI")])
+
+(define_insn "pro_epilogue_adjust_stack_rex64_2"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "0,r")
+ (match_operand:DI 3 "immediate_operand" "i,i")))
+ (use (match_operand:DI 2 "register_operand" "r,r"))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))]
+ "TARGET_64BIT"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALU:
+ return "add{q}\t{%2, %0|%0, %2}";
+
+ case TYPE_LEA:
+ operands[2] = gen_rtx_PLUS (DImode, operands[1], operands[2]);
+ return "lea{q}\t{%a2, %0|%0, %a2}";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "alu,lea")
+ (set_attr "mode" "DI")])
+
+(define_expand "allocate_stack_worker"
+ [(match_operand:SI 0 "register_operand" "")]
+ "TARGET_STACK_PROBE"
+{
+ if (reload_completed)
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_allocate_stack_worker_rex64_postreload (operands[0]));
+ else
+ emit_insn (gen_allocate_stack_worker_postreload (operands[0]));
+ }
+ else
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_allocate_stack_worker_rex64 (operands[0]));
+ else
+ emit_insn (gen_allocate_stack_worker_1 (operands[0]));
+ }
+ DONE;
+})
+
+(define_insn "allocate_stack_worker_1"
+ [(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "a")]
+ UNSPECV_STACK_PROBE)
+ (set (reg:SI SP_REG) (minus:SI (reg:SI SP_REG) (match_dup 0)))
+ (clobber (match_scratch:SI 1 "=0"))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_STACK_PROBE"
+ "call\t__alloca"
+ [(set_attr "type" "multi")
+ (set_attr "length" "5")])
+
+(define_expand "allocate_stack_worker_postreload"
+ [(parallel [(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "a")]
+ UNSPECV_STACK_PROBE)
+ (set (reg:SI SP_REG) (minus:SI (reg:SI SP_REG) (match_dup 0)))
+ (clobber (match_dup 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_insn "allocate_stack_worker_rex64"
+ [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "a")]
+ UNSPECV_STACK_PROBE)
+ (set (reg:DI SP_REG) (minus:DI (reg:DI SP_REG) (match_dup 0)))
+ (clobber (match_scratch:DI 1 "=0"))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT && TARGET_STACK_PROBE"
+ "call\t__alloca"
+ [(set_attr "type" "multi")
+ (set_attr "length" "5")])
+
+(define_expand "allocate_stack_worker_rex64_postreload"
+ [(parallel [(unspec_volatile:DI [(match_operand:DI 0 "register_operand" "a")]
+ UNSPECV_STACK_PROBE)
+ (set (reg:DI SP_REG) (minus:DI (reg:DI SP_REG) (match_dup 0)))
+ (clobber (match_dup 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ "")
+
+(define_expand "allocate_stack"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (reg:SI SP_REG)
+ (match_operand:SI 1 "general_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (reg:SI SP_REG)
+ (minus:SI (reg:SI SP_REG) (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_STACK_PROBE"
+{
+#ifdef CHECK_STACK_LIMIT
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < CHECK_STACK_LIMIT)
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ operands[1]));
+ else
+#endif
+ emit_insn (gen_allocate_stack_worker (copy_to_mode_reg (SImode,
+ operands[1])));
+
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+})
+
+(define_expand "builtin_setjmp_receiver"
+ [(label_ref (match_operand 0 "" ""))]
+ "!TARGET_64BIT && flag_pic"
+{
+ if (TARGET_MACHO)
+ {
+ rtx xops[3];
+ rtx picreg = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
+ rtx label_rtx = gen_label_rtx ();
+ emit_insn (gen_set_got_labelled (pic_offset_table_rtx, label_rtx));
+ xops[0] = xops[1] = picreg;
+ xops[2] = gen_rtx_CONST (SImode,
+ gen_rtx_MINUS (SImode,
+ gen_rtx_LABEL_REF (SImode, label_rtx),
+ gen_rtx_SYMBOL_REF (SImode, GOT_SYMBOL_NAME)));
+ ix86_expand_binary_operator (MINUS, SImode, xops);
+ }
+ else
+ emit_insn (gen_set_got (pic_offset_table_rtx));
+ DONE;
+})
+
+;; Avoid redundant prefixes by splitting HImode arithmetic to SImode.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator 3 "promotable_binary_operator"
+ [(match_operand 1 "register_operand" "")
+ (match_operand 2 "aligned_operand" "")]))
+ (clobber (reg:CC FLAGS_REG))]
+ "! TARGET_PARTIAL_REG_STALL && reload_completed
+ && ((GET_MODE (operands[0]) == HImode
+ && ((!optimize_size && !TARGET_FAST_PREFIX)
+ /* ??? next two lines just !satisfies_constraint_K (...) */
+ || GET_CODE (operands[2]) != CONST_INT
+ || satisfies_constraint_K (operands[2])))
+ || (GET_MODE (operands[0]) == QImode
+ && (TARGET_PROMOTE_QImode || optimize_size)))"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ if (GET_CODE (operands[3]) != ASHIFT)
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ PUT_MODE (operands[3], SImode);")
+
+; Promote the QImode tests, as i386 has encoding of the AND
+; instruction with 32-bit sign-extended immediate and thus the
+; instruction size is unchanged, except in the %eax case for
+; which it is increased by one byte, hence the ! optimize_size.
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(and (match_operand 3 "aligned_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_int 0)]))
+ (set (match_operand 1 "register_operand" "")
+ (and (match_dup 3) (match_dup 4)))]
+ "! TARGET_PARTIAL_REG_STALL && reload_completed
+ /* Ensure that the operand will remain sign-extended immediate. */
+ && ix86_match_ccmode (insn, INTVAL (operands[4]) >= 0 ? CCNOmode : CCZmode)
+ && ! optimize_size
+ && ((GET_MODE (operands[1]) == HImode && ! TARGET_FAST_PREFIX)
+ || (GET_MODE (operands[1]) == QImode && TARGET_PROMOTE_QImode))"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(and:SI (match_dup 3) (match_dup 4))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (and:SI (match_dup 3) (match_dup 4)))])]
+{
+ operands[4]
+ = gen_int_mode (INTVAL (operands[4])
+ & GET_MODE_MASK (GET_MODE (operands[1])), SImode);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[3] = gen_lowpart (SImode, operands[3]);
+})
+
+; Don't promote the QImode tests, as i386 doesn't have encoding of
+; the TEST instruction with 32-bit sign-extended immediate and thus
+; the instruction size would at least double, which is not what we
+; want even with ! optimize_size.
+(define_split
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand:HI 2 "aligned_operand" "")
+ (match_operand:HI 3 "const_int_operand" ""))
+ (const_int 0)]))]
+ "! TARGET_PARTIAL_REG_STALL && reload_completed
+ /* Ensure that the operand will remain sign-extended immediate. */
+ && ix86_match_ccmode (insn, INTVAL (operands[3]) >= 0 ? CCNOmode : CCZmode)
+ && ! TARGET_FAST_PREFIX
+ && ! optimize_size"
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:SI (match_dup 2) (match_dup 3))
+ (const_int 0)]))]
+{
+ operands[3]
+ = gen_int_mode (INTVAL (operands[3])
+ & GET_MODE_MASK (GET_MODE (operands[2])), SImode);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+})
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (neg (match_operand 1 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))]
+ "! TARGET_PARTIAL_REG_STALL && reload_completed
+ && (GET_MODE (operands[0]) == HImode
+ || (GET_MODE (operands[0]) == QImode
+ && (TARGET_PROMOTE_QImode || optimize_size)))"
+ [(parallel [(set (match_dup 0)
+ (neg:SI (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);")
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (not (match_operand 1 "register_operand" "")))]
+ "! TARGET_PARTIAL_REG_STALL && reload_completed
+ && (GET_MODE (operands[0]) == HImode
+ || (GET_MODE (operands[0]) == QImode
+ && (TARGET_PROMOTE_QImode || optimize_size)))"
+ [(set (match_dup 0)
+ (not:SI (match_dup 1)))]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);")
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(reg FLAGS_REG) (const_int 0)])
+ (match_operand 2 "register_operand" "")
+ (match_operand 3 "register_operand" "")))]
+ "! TARGET_PARTIAL_REG_STALL && TARGET_CMOVE
+ && (GET_MODE (operands[0]) == HImode
+ || (GET_MODE (operands[0]) == QImode
+ && (TARGET_PROMOTE_QImode || optimize_size)))"
+ [(set (match_dup 0)
+ (if_then_else:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ operands[3] = gen_lowpart (SImode, operands[3]);")
+
+
+;; RTL Peephole optimizations, run before sched2. These primarily look to
+;; transform a complex memory operation into two memory to register operations.
+
+;; Don't push memory operands
+(define_peephole2
+ [(set (match_operand:SI 0 "push_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (match_scratch:SI 2 "r")]
+ "!optimize_size && !TARGET_PUSH_MEMORY
+ && !RTX_FRAME_RELATED_P (peep2_next_insn (0))"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:DI 0 "push_operand" "")
+ (match_operand:DI 1 "memory_operand" ""))
+ (match_scratch:DI 2 "r")]
+ "!optimize_size && !TARGET_PUSH_MEMORY
+ && !RTX_FRAME_RELATED_P (peep2_next_insn (0))"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+;; We need to handle SFmode only, because DFmode and XFmode is split to
+;; SImode pushes.
+(define_peephole2
+ [(set (match_operand:SF 0 "push_operand" "")
+ (match_operand:SF 1 "memory_operand" ""))
+ (match_scratch:SF 2 "r")]
+ "!optimize_size && !TARGET_PUSH_MEMORY
+ && !RTX_FRAME_RELATED_P (peep2_next_insn (0))"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "push_operand" "")
+ (match_operand:HI 1 "memory_operand" ""))
+ (match_scratch:HI 2 "r")]
+ "!optimize_size && !TARGET_PUSH_MEMORY
+ && !RTX_FRAME_RELATED_P (peep2_next_insn (0))"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:QI 0 "push_operand" "")
+ (match_operand:QI 1 "memory_operand" ""))
+ (match_scratch:QI 2 "q")]
+ "!optimize_size && !TARGET_PUSH_MEMORY
+ && !RTX_FRAME_RELATED_P (peep2_next_insn (0))"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+;; Don't move an immediate directly to memory when the instruction
+;; gets too big.
+(define_peephole2
+ [(match_scratch:SI 1 "r")
+ (set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))]
+ "! optimize_size
+ && ! TARGET_USE_MOV0
+ && TARGET_SPLIT_LONG_MOVES
+ && get_attr_length (insn) >= ix86_cost->large_insn
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 1) (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0) (match_dup 1))]
+ "")
+
+(define_peephole2
+ [(match_scratch:HI 1 "r")
+ (set (match_operand:HI 0 "memory_operand" "")
+ (const_int 0))]
+ "! optimize_size
+ && ! TARGET_USE_MOV0
+ && TARGET_SPLIT_LONG_MOVES
+ && get_attr_length (insn) >= ix86_cost->large_insn
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 2) (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0) (match_dup 1))]
+ "operands[2] = gen_lowpart (SImode, operands[1]);")
+
+(define_peephole2
+ [(match_scratch:QI 1 "q")
+ (set (match_operand:QI 0 "memory_operand" "")
+ (const_int 0))]
+ "! optimize_size
+ && ! TARGET_USE_MOV0
+ && TARGET_SPLIT_LONG_MOVES
+ && get_attr_length (insn) >= ix86_cost->large_insn
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 2) (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0) (match_dup 1))]
+ "operands[2] = gen_lowpart (SImode, operands[1]);")
+
+(define_peephole2
+ [(match_scratch:SI 2 "r")
+ (set (match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "immediate_operand" ""))]
+ "! optimize_size
+ && get_attr_length (insn) >= ix86_cost->large_insn
+ && TARGET_SPLIT_LONG_MOVES"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(match_scratch:HI 2 "r")
+ (set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "immediate_operand" ""))]
+ "! optimize_size && get_attr_length (insn) >= ix86_cost->large_insn
+ && TARGET_SPLIT_LONG_MOVES"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(match_scratch:QI 2 "q")
+ (set (match_operand:QI 0 "memory_operand" "")
+ (match_operand:QI 1 "immediate_operand" ""))]
+ "! optimize_size && get_attr_length (insn) >= ix86_cost->large_insn
+ && TARGET_SPLIT_LONG_MOVES"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+;; Don't compare memory with zero, load and use a test instead.
+(define_peephole2
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(match_operand:SI 2 "memory_operand" "")
+ (const_int 0)]))
+ (match_scratch:SI 3 "r")]
+ "ix86_match_ccmode (insn, CCNOmode) && ! optimize_size"
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (match_op_dup 1 [(match_dup 3) (const_int 0)]))]
+ "")
+
+;; NOT is not pairable on Pentium, while XOR is, but one byte longer.
+;; Don't split NOTs with a displacement operand, because resulting XOR
+;; will not be pairable anyway.
+;;
+;; On AMD K6, NOT is vector decoded with memory operand that cannot be
+;; represented using a modRM byte. The XOR replacement is long decoded,
+;; so this split helps here as well.
+;;
+;; Note: Can't do this as a regular split because we can't get proper
+;; lifetime information then.
+
+(define_peephole2
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (not:SI (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "!optimize_size
+ && peep2_regno_dead_p (0, FLAGS_REG)
+ && ((TARGET_PENTIUM
+ && (GET_CODE (operands[0]) != MEM
+ || !memory_displacement_operand (operands[0], SImode)))
+ || (TARGET_K6 && long_memory_operand (operands[0], SImode)))"
+ [(parallel [(set (match_dup 0)
+ (xor:SI (match_dup 1) (const_int -1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (not:HI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ "!optimize_size
+ && peep2_regno_dead_p (0, FLAGS_REG)
+ && ((TARGET_PENTIUM
+ && (GET_CODE (operands[0]) != MEM
+ || !memory_displacement_operand (operands[0], HImode)))
+ || (TARGET_K6 && long_memory_operand (operands[0], HImode)))"
+ [(parallel [(set (match_dup 0)
+ (xor:HI (match_dup 1) (const_int -1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_peephole2
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (not:QI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "!optimize_size
+ && peep2_regno_dead_p (0, FLAGS_REG)
+ && ((TARGET_PENTIUM
+ && (GET_CODE (operands[0]) != MEM
+ || !memory_displacement_operand (operands[0], QImode)))
+ || (TARGET_K6 && long_memory_operand (operands[0], QImode)))"
+ [(parallel [(set (match_dup 0)
+ (xor:QI (match_dup 1) (const_int -1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+;; Non pairable "test imm, reg" instructions can be translated to
+;; "and imm, reg" if reg dies. The "and" form is also shorter (one
+;; byte opcode instead of two, have a short form for byte operands),
+;; so do it for other CPUs as well. Given that the value was dead,
+;; this should not create any new dependencies. Pass on the sub-word
+;; versions if we're concerned about partial register stalls.
+
+(define_peephole2
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" ""))
+ (const_int 0)]))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && (true_regnum (operands[2]) != 0
+ || satisfies_constraint_K (operands[3]))
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:SI (match_dup 2) (match_dup 3))
+ (const_int 0)]))
+ (set (match_dup 2)
+ (and:SI (match_dup 2) (match_dup 3)))])]
+ "")
+
+;; We don't need to handle HImode case, because it will be promoted to SImode
+;; on ! TARGET_PARTIAL_REG_STALL
+
+(define_peephole2
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:QI (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 3 "immediate_operand" ""))
+ (const_int 0)]))]
+ "! TARGET_PARTIAL_REG_STALL
+ && ix86_match_ccmode (insn, CCNOmode)
+ && true_regnum (operands[2]) != 0
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:QI (match_dup 2) (match_dup 3))
+ (const_int 0)]))
+ (set (match_dup 2)
+ (and:QI (match_dup 2) (match_dup 3)))])]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:SI
+ (zero_extract:SI
+ (match_operand 2 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
+ "! TARGET_PARTIAL_REG_STALL
+ && ix86_match_ccmode (insn, CCNOmode)
+ && true_regnum (operands[2]) != 0
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 1
+ [(and:SI
+ (zero_extract:SI
+ (match_dup 2)
+ (const_int 8)
+ (const_int 8))
+ (match_dup 3))
+ (const_int 0)]))
+ (set (zero_extract:SI (match_dup 2)
+ (const_int 8)
+ (const_int 8))
+ (and:SI
+ (zero_extract:SI
+ (match_dup 2)
+ (const_int 8)
+ (const_int 8))
+ (match_dup 3)))])]
+ "")
+
+;; Don't do logical operations with memory inputs.
+(define_peephole2
+ [(match_scratch:SI 2 "r")
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 3 "arith_or_logical_operator"
+ [(match_dup 0)
+ (match_operand:SI 1 "memory_operand" "")]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "! optimize_size && ! TARGET_READ_MODIFY"
+ [(set (match_dup 2) (match_dup 1))
+ (parallel [(set (match_dup 0)
+ (match_op_dup 3 [(match_dup 0) (match_dup 2)]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_peephole2
+ [(match_scratch:SI 2 "r")
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 3 "arith_or_logical_operator"
+ [(match_operand:SI 1 "memory_operand" "")
+ (match_dup 0)]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "! optimize_size && ! TARGET_READ_MODIFY"
+ [(set (match_dup 2) (match_dup 1))
+ (parallel [(set (match_dup 0)
+ (match_op_dup 3 [(match_dup 2) (match_dup 0)]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+; Don't do logical operations with memory outputs
+;
+; These two don't make sense for PPro/PII -- we're expanding a 4-uop
+; instruction into two 1-uop insns plus a 2-uop insn. That last has
+; the same decoder scheduling characteristics as the original.
+
+(define_peephole2
+ [(match_scratch:SI 2 "r")
+ (parallel [(set (match_operand:SI 0 "memory_operand" "")
+ (match_operator:SI 3 "arith_or_logical_operator"
+ [(match_dup 0)
+ (match_operand:SI 1 "nonmemory_operand" "")]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "! optimize_size && ! TARGET_READ_MODIFY_WRITE"
+ [(set (match_dup 2) (match_dup 0))
+ (parallel [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 2) (match_dup 1)]))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+(define_peephole2
+ [(match_scratch:SI 2 "r")
+ (parallel [(set (match_operand:SI 0 "memory_operand" "")
+ (match_operator:SI 3 "arith_or_logical_operator"
+ [(match_operand:SI 1 "nonmemory_operand" "")
+ (match_dup 0)]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "! optimize_size && ! TARGET_READ_MODIFY_WRITE"
+ [(set (match_dup 2) (match_dup 0))
+ (parallel [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_dup 0) (match_dup 2))]
+ "")
+
+;; Attempt to always use XOR for zeroing registers.
+(define_peephole2
+ [(set (match_operand 0 "register_operand" "")
+ (match_operand 1 "const0_operand" ""))]
+ "GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
+ && (! TARGET_USE_MOV0 || optimize_size)
+ && GENERAL_REG_P (operands[0])
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ operands[0] = gen_lowpart (word_mode, operands[0]);
+})
+
+(define_peephole2
+ [(set (strict_low_part (match_operand 0 "register_operand" ""))
+ (const_int 0))]
+ "(GET_MODE (operands[0]) == QImode
+ || GET_MODE (operands[0]) == HImode)
+ && (! TARGET_USE_MOV0 || optimize_size)
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (strict_low_part (match_dup 0)) (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])])
+
+;; For HI and SI modes, or $-1,reg is smaller than mov $-1,reg.
+(define_peephole2
+ [(set (match_operand 0 "register_operand" "")
+ (const_int -1))]
+ "(GET_MODE (operands[0]) == HImode
+ || GET_MODE (operands[0]) == SImode
+ || (GET_MODE (operands[0]) == DImode && TARGET_64BIT))
+ && (optimize_size || TARGET_PENTIUM)
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (const_int -1))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[0] = gen_lowpart (GET_MODE (operands[0]) == DImode ? DImode : SImode,
+ operands[0]);")
+
+;; Attempt to convert simple leas to adds. These can be created by
+;; move expanders.
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_dup 0)
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ "peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand" "")
+ (subreg:SI (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonmemory_operand" "")) 0))]
+ "peep2_regno_dead_p (0, FLAGS_REG) && REGNO (operands[0]) == REGNO (operands[1])"
+ [(parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[2] = gen_lowpart (SImode, operands[2]);")
+
+(define_peephole2
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_dup 0)
+ (match_operand:DI 1 "x86_64_general_operand" "")))]
+ "peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 1)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "")
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand" "")))]
+ "exact_log2 (INTVAL (operands[1])) >= 0
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1])));")
+
+(define_peephole2
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_dup 0)
+ (match_operand:DI 1 "const_int_operand" "")))]
+ "exact_log2 (INTVAL (operands[1])) >= 0
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (ashift:DI (match_dup 0) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1])));")
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand" "")
+ (subreg:SI (mult:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")) 0))]
+ "exact_log2 (INTVAL (operands[2])) >= 0
+ && REGNO (operands[0]) == REGNO (operands[1])
+ && peep2_regno_dead_p (0, FLAGS_REG)"
+ [(parallel [(set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[2] = GEN_INT (exact_log2 (INTVAL (operands[2])));")
+
+;; The ESP adjustments can be done by the push and pop instructions. Resulting
+;; code is shorter, since push is only 1 byte, while add imm, %esp 3 bytes. On
+;; many CPUs it is also faster, since special hardware to avoid esp
+;; dependencies is present.
+
+;; While some of these conversions may be done using splitters, we use peepholes
+;; in order to allow combine_stack_adjustments pass to see nonobfuscated RTL.
+
+;; Convert prologue esp subtractions to push.
+;; We need register to push. In order to keep verify_flow_info happy we have
+;; two choices
+;; - use scratch and clobber it in order to avoid dependencies
+;; - use already live register
+;; We can't use the second way right now, since there is no reliable way how to
+;; verify that given register is live. First choice will also most likely in
+;; fewer dependencies. On the place of esp adjustments it is very likely that
+;; call clobbered registers are dead. We may want to use base pointer as an
+;; alternative when no register is available later.
+
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_SUB_ESP_4"
+ [(clobber (match_dup 0))
+ (parallel [(set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 0))
+ (clobber (mem:BLK (scratch)))])])
+
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -8)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_SUB_ESP_8"
+ [(clobber (match_dup 0))
+ (set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 0))
+ (parallel [(set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 0))
+ (clobber (mem:BLK (scratch)))])])
+
+;; Convert esp subtractions to push.
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -4)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "optimize_size || !TARGET_SUB_ESP_4"
+ [(clobber (match_dup 0))
+ (set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 0))])
+
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -8)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "optimize_size || !TARGET_SUB_ESP_8"
+ [(clobber (match_dup 0))
+ (set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 0))
+ (set (mem:SI (pre_dec:SI (reg:SI SP_REG))) (match_dup 0))])
+
+;; Convert epilogue deallocator to pop.
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_ADD_ESP_4"
+ [(parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (clobber (mem:BLK (scratch)))])]
+ "")
+
+;; Two pops case is tricky, since pop causes dependency on destination register.
+;; We use two registers if available.
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (match_scratch:SI 1 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 8)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_ADD_ESP_8"
+ [(parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (clobber (mem:BLK (scratch)))])
+ (parallel [(set (match_dup 1) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])]
+ "")
+
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 8)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size"
+ [(parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (clobber (mem:BLK (scratch)))])
+ (parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])]
+ "")
+
+;; Convert esp additions to pop.
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])]
+ "")
+
+;; Two pops case is tricky, since pop causes dependency on destination register.
+;; We use two registers if available.
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (match_scratch:SI 1 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 8)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])
+ (parallel [(set (match_dup 1) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])]
+ "")
+
+(define_peephole2
+ [(match_scratch:SI 0 "r")
+ (parallel [(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 8)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "optimize_size"
+ [(parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])
+ (parallel [(set (match_dup 0) (mem:SI (reg:SI SP_REG)))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))])]
+ "")
+
+;; Convert compares with 1 to shorter inc/dec operations when CF is not
+;; required and register dies. Similarly for 128 to plus -128.
+(define_peephole2
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(match_operand 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" "")]))]
+ "(INTVAL (operands[3]) == -1
+ || INTVAL (operands[3]) == 1
+ || INTVAL (operands[3]) == 128)
+ && ix86_match_ccmode (insn, CCGCmode)
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 1 [(match_dup 2) (match_dup 3)]))
+ (clobber (match_dup 2))])]
+ "")
+
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -8)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_SUB_ESP_4"
+ [(clobber (match_dup 0))
+ (parallel [(set (mem:DI (pre_dec:DI (reg:DI SP_REG))) (match_dup 0))
+ (clobber (mem:BLK (scratch)))])])
+
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -16)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_SUB_ESP_8"
+ [(clobber (match_dup 0))
+ (set (mem:DI (pre_dec:DI (reg:DI SP_REG))) (match_dup 0))
+ (parallel [(set (mem:DI (pre_dec:DI (reg:DI SP_REG))) (match_dup 0))
+ (clobber (mem:BLK (scratch)))])])
+
+;; Convert esp subtractions to push.
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -8)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "optimize_size || !TARGET_SUB_ESP_4"
+ [(clobber (match_dup 0))
+ (set (mem:DI (pre_dec:DI (reg:DI SP_REG))) (match_dup 0))])
+
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int -16)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "optimize_size || !TARGET_SUB_ESP_8"
+ [(clobber (match_dup 0))
+ (set (mem:DI (pre_dec:DI (reg:DI SP_REG))) (match_dup 0))
+ (set (mem:DI (pre_dec:DI (reg:DI SP_REG))) (match_dup 0))])
+
+;; Convert epilogue deallocator to pop.
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_ADD_ESP_4"
+ [(parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))
+ (clobber (mem:BLK (scratch)))])]
+ "")
+
+;; Two pops case is tricky, since pop causes dependency on destination register.
+;; We use two registers if available.
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (match_scratch:DI 1 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 16)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size || !TARGET_ADD_ESP_8"
+ [(parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))
+ (clobber (mem:BLK (scratch)))])
+ (parallel [(set (match_dup 1) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])]
+ "")
+
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 16)))
+ (clobber (reg:CC FLAGS_REG))
+ (clobber (mem:BLK (scratch)))])]
+ "optimize_size"
+ [(parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))
+ (clobber (mem:BLK (scratch)))])
+ (parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])]
+ "")
+
+;; Convert esp additions to pop.
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])]
+ "")
+
+;; Two pops case is tricky, since pop causes dependency on destination register.
+;; We use two registers if available.
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (match_scratch:DI 1 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 16)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])
+ (parallel [(set (match_dup 1) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])]
+ "")
+
+(define_peephole2
+ [(match_scratch:DI 0 "r")
+ (parallel [(set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 16)))
+ (clobber (reg:CC FLAGS_REG))])]
+ "optimize_size"
+ [(parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])
+ (parallel [(set (match_dup 0) (mem:DI (reg:DI SP_REG)))
+ (set (reg:DI SP_REG) (plus:DI (reg:DI SP_REG) (const_int 8)))])]
+ "")
+
+;; Convert imul by three, five and nine into lea
+(define_peephole2
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "INTVAL (operands[2]) == 3
+ || INTVAL (operands[2]) == 5
+ || INTVAL (operands[2]) == 9"
+ [(set (match_dup 0)
+ (plus:SI (mult:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))]
+ { operands[2] = GEN_INT (INTVAL (operands[2]) - 1); })
+
+(define_peephole2
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "!optimize_size
+ && (INTVAL (operands[2]) == 3
+ || INTVAL (operands[2]) == 5
+ || INTVAL (operands[2]) == 9)"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0)
+ (plus:SI (mult:SI (match_dup 0) (match_dup 2))
+ (match_dup 0)))]
+ { operands[2] = GEN_INT (INTVAL (operands[2]) - 1); })
+
+(define_peephole2
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT
+ && (INTVAL (operands[2]) == 3
+ || INTVAL (operands[2]) == 5
+ || INTVAL (operands[2]) == 9)"
+ [(set (match_dup 0)
+ (plus:DI (mult:DI (match_dup 1) (match_dup 2))
+ (match_dup 1)))]
+ { operands[2] = GEN_INT (INTVAL (operands[2]) - 1); })
+
+(define_peephole2
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_64BIT
+ && !optimize_size
+ && (INTVAL (operands[2]) == 3
+ || INTVAL (operands[2]) == 5
+ || INTVAL (operands[2]) == 9)"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0)
+ (plus:DI (mult:DI (match_dup 0) (match_dup 2))
+ (match_dup 0)))]
+ { operands[2] = GEN_INT (INTVAL (operands[2]) - 1); })
+
+;; Imul $32bit_imm, mem, reg is vector decoded, while
+;; imul $32bit_imm, reg, reg is direct decoded.
+(define_peephole2
+ [(match_scratch:DI 3 "r")
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_operand:DI 1 "memory_operand" "")
+ (match_operand:DI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size
+ && !satisfies_constraint_K (operands[2])"
+ [(set (match_dup 3) (match_dup 1))
+ (parallel [(set (match_dup 0) (mult:DI (match_dup 3) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+"")
+
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "memory_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size
+ && !satisfies_constraint_K (operands[2])"
+ [(set (match_dup 3) (match_dup 1))
+ (parallel [(set (match_dup 0) (mult:SI (match_dup 3) (match_dup 2)))
+ (clobber (reg:CC FLAGS_REG))])]
+"")
+
+(define_peephole2
+ [(match_scratch:SI 3 "r")
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI
+ (mult:SI (match_operand:SI 1 "memory_operand" "")
+ (match_operand:SI 2 "immediate_operand" ""))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size
+ && !satisfies_constraint_K (operands[2])"
+ [(set (match_dup 3) (match_dup 1))
+ (parallel [(set (match_dup 0) (zero_extend:DI (mult:SI (match_dup 3) (match_dup 2))))
+ (clobber (reg:CC FLAGS_REG))])]
+"")
+
+;; imul $8/16bit_imm, regmem, reg is vector decoded.
+;; Convert it into imul reg, reg
+;; It would be better to force assembler to encode instruction using long
+;; immediate, but there is apparently no way to do so.
+(define_peephole2
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (match_operand:DI 1 "nonimmediate_operand" "")
+ (match_operand:DI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (match_scratch:DI 3 "r")]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size
+ && satisfies_constraint_K (operands[2])"
+ [(set (match_dup 3) (match_dup 2))
+ (parallel [(set (match_dup 0) (mult:DI (match_dup 0) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+})
+
+(define_peephole2
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (match_scratch:SI 3 "r")]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size
+ && satisfies_constraint_K (operands[2])"
+ [(set (match_dup 3) (match_dup 2))
+ (parallel [(set (match_dup 0) (mult:SI (match_dup 0) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+})
+
+(define_peephole2
+ [(parallel [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "immediate_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (match_scratch:HI 3 "r")]
+ "(TARGET_K8 || TARGET_GENERIC64) && !optimize_size"
+ [(set (match_dup 3) (match_dup 2))
+ (parallel [(set (match_dup 0) (mult:HI (match_dup 0) (match_dup 3)))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+})
+
+;; After splitting up read-modify operations, array accesses with memory
+;; operands might end up in form:
+;; sall $2, %eax
+;; movl 4(%esp), %edx
+;; addl %edx, %eax
+;; instead of pre-splitting:
+;; sall $2, %eax
+;; addl 4(%esp), %eax
+;; Turn it into:
+;; movl 4(%esp), %edx
+;; leal (%edx,%eax,4), %eax
+
+(define_peephole2
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (ashift (match_operand 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_operand 3 "register_operand")
+ (match_operand 4 "x86_64_general_operand" ""))
+ (parallel [(set (match_operand 5 "register_operand" "")
+ (plus (match_operand 6 "register_operand" "")
+ (match_operand 7 "register_operand" "")))
+ (clobber (reg:CC FLAGS_REG))])]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 3
+ /* Validate MODE for lea. */
+ && ((!TARGET_PARTIAL_REG_STALL
+ && (GET_MODE (operands[0]) == QImode
+ || GET_MODE (operands[0]) == HImode))
+ || GET_MODE (operands[0]) == SImode
+ || (TARGET_64BIT && GET_MODE (operands[0]) == DImode))
+ /* We reorder load and the shift. */
+ && !rtx_equal_p (operands[1], operands[3])
+ && !reg_overlap_mentioned_p (operands[0], operands[4])
+ /* Last PLUS must consist of operand 0 and 3. */
+ && !rtx_equal_p (operands[0], operands[3])
+ && (rtx_equal_p (operands[3], operands[6])
+ || rtx_equal_p (operands[3], operands[7]))
+ && (rtx_equal_p (operands[0], operands[6])
+ || rtx_equal_p (operands[0], operands[7]))
+ /* The intermediate operand 0 must die or be same as output. */
+ && (rtx_equal_p (operands[0], operands[5])
+ || peep2_reg_dead_p (3, operands[0]))"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (match_dup 1))]
+{
+ enum machine_mode mode = GET_MODE (operands[5]) == DImode ? DImode : SImode;
+ int scale = 1 << INTVAL (operands[2]);
+ rtx index = gen_lowpart (Pmode, operands[1]);
+ rtx base = gen_lowpart (Pmode, operands[3]);
+ rtx dest = gen_lowpart (mode, operands[5]);
+
+ operands[1] = gen_rtx_PLUS (Pmode, base,
+ gen_rtx_MULT (Pmode, index, GEN_INT (scale)));
+ if (mode != Pmode)
+ operands[1] = gen_rtx_SUBREG (mode, operands[1], 0);
+ operands[0] = dest;
+})
+
+;; Call-value patterns last so that the wildcard operand does not
+;; disrupt insn-recog's switch tables.
+
+(define_insn "*call_value_pop_0"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:SI 1 "constant_call_address_operand" ""))
+ (match_operand:SI 2 "" "")))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 3 "immediate_operand" "")))]
+ "!TARGET_64BIT"
+{
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P1";
+ else
+ return "call\t%P1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*call_value_pop_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:SI 1 "call_insn_operand" "rsm"))
+ (match_operand:SI 2 "" "")))
+ (set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ "!TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[1], Pmode))
+ {
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P1";
+ else
+ return "call\t%P1";
+ }
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%A1";
+ else
+ return "call\t%A1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*call_value_0"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:SI 1 "constant_call_address_operand" ""))
+ (match_operand:SI 2 "" "")))]
+ "!TARGET_64BIT"
+{
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P1";
+ else
+ return "call\t%P1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*call_value_0_rex64"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:DI 1 "constant_call_address_operand" ""))
+ (match_operand:DI 2 "const_int_operand" "")))]
+ "TARGET_64BIT"
+{
+ if (SIBLING_CALL_P (insn))
+ return "jmp\t%P1";
+ else
+ return "call\t%P1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*call_value_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:SI 1 "call_insn_operand" "rsm"))
+ (match_operand:SI 2 "" "")))]
+ "!SIBLING_CALL_P (insn) && !TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[1], Pmode))
+ return "call\t%P1";
+ return "call\t%A1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*sibcall_value_1"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:SI 1 "sibcall_insn_operand" "s,c,d,a"))
+ (match_operand:SI 2 "" "")))]
+ "SIBLING_CALL_P (insn) && !TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[1], Pmode))
+ return "jmp\t%P1";
+ return "jmp\t%A1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*call_value_1_rex64"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:DI 1 "call_insn_operand" "rsm"))
+ (match_operand:DI 2 "" "")))]
+ "!SIBLING_CALL_P (insn) && TARGET_64BIT"
+{
+ if (constant_call_address_operand (operands[1], Pmode))
+ return "call\t%P1";
+ return "call\t%A1";
+}
+ [(set_attr "type" "callv")])
+
+(define_insn "*sibcall_value_1_rex64"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (match_operand:DI 1 "constant_call_address_operand" ""))
+ (match_operand:DI 2 "" "")))]
+ "SIBLING_CALL_P (insn) && TARGET_64BIT"
+ "jmp\t%P1"
+ [(set_attr "type" "callv")])
+
+(define_insn "*sibcall_value_1_rex64_v"
+ [(set (match_operand 0 "" "")
+ (call (mem:QI (reg:DI 40))
+ (match_operand:DI 1 "" "")))]
+ "SIBLING_CALL_P (insn) && TARGET_64BIT"
+ "jmp\t*%%r11"
+ [(set_attr "type" "callv")])
+
+;; We used to use "int $5", in honor of #BR which maps to interrupt vector 5.
+;; That, however, is usually mapped by the OS to SIGSEGV, which is often
+;; caught for use by garbage collectors and the like. Using an insn that
+;; maps to SIGILL makes it more likely the program will rightfully die.
+;; Keeping with tradition, "6" is in honor of #UD.
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 6))]
+ ""
+ { return ASM_SHORT "0x0b0f"; }
+ [(set_attr "length" "2")])
+
+(define_expand "sse_prologue_save"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(reg:DI 21)
+ (reg:DI 22)
+ (reg:DI 23)
+ (reg:DI 24)
+ (reg:DI 25)
+ (reg:DI 26)
+ (reg:DI 27)
+ (reg:DI 28)] UNSPEC_SSE_PROLOGUE_SAVE))
+ (use (match_operand:DI 1 "register_operand" ""))
+ (use (match_operand:DI 2 "immediate_operand" ""))
+ (use (label_ref:DI (match_operand 3 "" "")))])]
+ "TARGET_64BIT"
+ "")
+
+(define_insn "*sse_prologue_save_insn"
+ [(set (mem:BLK (plus:DI (match_operand:DI 0 "register_operand" "R")
+ (match_operand:DI 4 "const_int_operand" "n")))
+ (unspec:BLK [(reg:DI 21)
+ (reg:DI 22)
+ (reg:DI 23)
+ (reg:DI 24)
+ (reg:DI 25)
+ (reg:DI 26)
+ (reg:DI 27)
+ (reg:DI 28)] UNSPEC_SSE_PROLOGUE_SAVE))
+ (use (match_operand:DI 1 "register_operand" "r"))
+ (use (match_operand:DI 2 "const_int_operand" "i"))
+ (use (label_ref:DI (match_operand 3 "" "X")))]
+ "TARGET_64BIT
+ && INTVAL (operands[4]) + SSE_REGPARM_MAX * 16 - 16 < 128
+ && INTVAL (operands[4]) + INTVAL (operands[2]) * 16 >= -128"
+ "*
+{
+ int i;
+ operands[0] = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode, operands[0], operands[4]));
+ output_asm_insn (\"jmp\\t%A1\", operands);
+ for (i = SSE_REGPARM_MAX - 1; i >= INTVAL (operands[2]); i--)
+ {
+ operands[4] = adjust_address (operands[0], DImode, i*16);
+ operands[5] = gen_rtx_REG (TImode, SSE_REGNO (i));
+ PUT_MODE (operands[4], TImode);
+ if (GET_CODE (XEXP (operands[0], 0)) != PLUS)
+ output_asm_insn (\"rex\", operands);
+ output_asm_insn (\"movaps\\t{%5, %4|%4, %5}\", operands);
+ }
+ (*targetm.asm_out.internal_label) (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[3]));
+ RET;
+}
+ "
+ [(set_attr "type" "other")
+ (set_attr "length_immediate" "0")
+ (set_attr "length_address" "0")
+ (set_attr "length" "135")
+ (set_attr "memory" "store")
+ (set_attr "modrm" "0")
+ (set_attr "mode" "DI")])
+
+(define_expand "prefetch"
+ [(prefetch (match_operand 0 "address_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))]
+ "TARGET_PREFETCH_SSE || TARGET_3DNOW"
+{
+ int rw = INTVAL (operands[1]);
+ int locality = INTVAL (operands[2]);
+
+ gcc_assert (rw == 0 || rw == 1);
+ gcc_assert (locality >= 0 && locality <= 3);
+ gcc_assert (GET_MODE (operands[0]) == Pmode
+ || GET_MODE (operands[0]) == VOIDmode);
+
+ /* Use 3dNOW prefetch in case we are asking for write prefetch not
+ supported by SSE counterpart or the SSE prefetch is not available
+ (K6 machines). Otherwise use SSE prefetch as it allows specifying
+ of locality. */
+ if (TARGET_3DNOW && (!TARGET_PREFETCH_SSE || rw))
+ operands[2] = GEN_INT (3);
+ else
+ operands[1] = const0_rtx;
+})
+
+(define_insn "*prefetch_sse"
+ [(prefetch (match_operand:SI 0 "address_operand" "p")
+ (const_int 0)
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "TARGET_PREFETCH_SSE && !TARGET_64BIT"
+{
+ static const char * const patterns[4] = {
+ "prefetchnta\t%a0", "prefetcht2\t%a0", "prefetcht1\t%a0", "prefetcht0\t%a0"
+ };
+
+ int locality = INTVAL (operands[1]);
+ gcc_assert (locality >= 0 && locality <= 3);
+
+ return patterns[locality];
+}
+ [(set_attr "type" "sse")
+ (set_attr "memory" "none")])
+
+(define_insn "*prefetch_sse_rex"
+ [(prefetch (match_operand:DI 0 "address_operand" "p")
+ (const_int 0)
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "TARGET_PREFETCH_SSE && TARGET_64BIT"
+{
+ static const char * const patterns[4] = {
+ "prefetchnta\t%a0", "prefetcht2\t%a0", "prefetcht1\t%a0", "prefetcht0\t%a0"
+ };
+
+ int locality = INTVAL (operands[1]);
+ gcc_assert (locality >= 0 && locality <= 3);
+
+ return patterns[locality];
+}
+ [(set_attr "type" "sse")
+ (set_attr "memory" "none")])
+
+(define_insn "*prefetch_3dnow"
+ [(prefetch (match_operand:SI 0 "address_operand" "p")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (const_int 3))]
+ "TARGET_3DNOW && !TARGET_64BIT"
+{
+ if (INTVAL (operands[1]) == 0)
+ return "prefetch\t%a0";
+ else
+ return "prefetchw\t%a0";
+}
+ [(set_attr "type" "mmx")
+ (set_attr "memory" "none")])
+
+(define_insn "*prefetch_3dnow_rex"
+ [(prefetch (match_operand:DI 0 "address_operand" "p")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (const_int 3))]
+ "TARGET_3DNOW && TARGET_64BIT"
+{
+ if (INTVAL (operands[1]) == 0)
+ return "prefetch\t%a0";
+ else
+ return "prefetchw\t%a0";
+}
+ [(set_attr "type" "mmx")
+ (set_attr "memory" "none")])
+
+(define_expand "stack_protect_set"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_tls_protect_set_di (operands[0],
+ GEN_INT (TARGET_THREAD_SSP_OFFSET)));
+ else
+ emit_insn (gen_stack_tls_protect_set_si (operands[0],
+ GEN_INT (TARGET_THREAD_SSP_OFFSET)));
+#else
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_set_di (operands[0], operands[1]));
+ else
+ emit_insn (gen_stack_protect_set_si (operands[0], operands[1]));
+#endif
+ DONE;
+})
+
+(define_insn "stack_protect_set_si"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:SI 2 "=&r") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "mov{l}\t{%1, %2|%2, %1}\;mov{l}\t{%2, %0|%0, %2}\;xor{l}\t%2, %2"
+ [(set_attr "type" "multi")])
+
+(define_insn "stack_protect_set_di"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:DI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:DI 2 "=&r") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "mov{q}\t{%1, %2|%2, %1}\;mov{q}\t{%2, %0|%0, %2}\;xor{l}\t%k2, %k2"
+ [(set_attr "type" "multi")])
+
+(define_insn "stack_tls_protect_set_si"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec:SI [(match_operand:SI 1 "const_int_operand" "i")] UNSPEC_SP_TLS_SET))
+ (set (match_scratch:SI 2 "=&r") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "mov{l}\t{%%gs:%P1, %2|%2, DWORD PTR %%gs:%P1}\;mov{l}\t{%2, %0|%0, %2}\;xor{l}\t%2, %2"
+ [(set_attr "type" "multi")])
+
+(define_insn "stack_tls_protect_set_di"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:DI 1 "const_int_operand" "i")] UNSPEC_SP_TLS_SET))
+ (set (match_scratch:DI 2 "=&r") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ {
+ /* The kernel uses a different segment register for performance reasons; a
+ system call would not have to trash the userspace segment register,
+ which would be expensive */
+ if (ix86_cmodel != CM_KERNEL)
+ return "mov{q}\t{%%fs:%P1, %2|%2, QWORD PTR %%fs:%P1}\;mov{q}\t{%2, %0|%0, %2}\;xor{l}\t%k2, %k2";
+ else
+ return "mov{q}\t{%%gs:%P1, %2|%2, QWORD PTR %%gs:%P1}\;mov{q}\t{%2, %0|%0, %2}\;xor{l}\t%k2, %k2";
+ }
+ [(set_attr "type" "multi")])
+
+(define_expand "stack_protect_test"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")]
+ ""
+{
+ rtx flags = gen_rtx_REG (CCZmode, FLAGS_REG);
+ ix86_compare_op0 = operands[0];
+ ix86_compare_op1 = operands[1];
+ ix86_compare_emitted = flags;
+
+#ifdef TARGET_THREAD_SSP_OFFSET
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_tls_protect_test_di (flags, operands[0],
+ GEN_INT (TARGET_THREAD_SSP_OFFSET)));
+ else
+ emit_insn (gen_stack_tls_protect_test_si (flags, operands[0],
+ GEN_INT (TARGET_THREAD_SSP_OFFSET)));
+#else
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_test_di (flags, operands[0], operands[1]));
+ else
+ emit_insn (gen_stack_protect_test_si (flags, operands[0], operands[1]));
+#endif
+ emit_jump_insn (gen_beq (operands[2]));
+ DONE;
+})
+
+(define_insn "stack_protect_test_si"
+ [(set (match_operand:CCZ 0 "flags_reg_operand" "")
+ (unspec:CCZ [(match_operand:SI 1 "memory_operand" "m")
+ (match_operand:SI 2 "memory_operand" "m")]
+ UNSPEC_SP_TEST))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ ""
+ "mov{l}\t{%1, %3|%3, %1}\;xor{l}\t{%2, %3|%3, %2}"
+ [(set_attr "type" "multi")])
+
+(define_insn "stack_protect_test_di"
+ [(set (match_operand:CCZ 0 "flags_reg_operand" "")
+ (unspec:CCZ [(match_operand:DI 1 "memory_operand" "m")
+ (match_operand:DI 2 "memory_operand" "m")]
+ UNSPEC_SP_TEST))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ "TARGET_64BIT"
+ "mov{q}\t{%1, %3|%3, %1}\;xor{q}\t{%2, %3|%3, %2}"
+ [(set_attr "type" "multi")])
+
+(define_insn "stack_tls_protect_test_si"
+ [(set (match_operand:CCZ 0 "flags_reg_operand" "")
+ (unspec:CCZ [(match_operand:SI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "i")]
+ UNSPEC_SP_TLS_TEST))
+ (clobber (match_scratch:SI 3 "=r"))]
+ ""
+ "mov{l}\t{%1, %3|%3, %1}\;xor{l}\t{%%gs:%P2, %3|%3, DWORD PTR %%gs:%P2}"
+ [(set_attr "type" "multi")])
+
+(define_insn "stack_tls_protect_test_di"
+ [(set (match_operand:CCZ 0 "flags_reg_operand" "")
+ (unspec:CCZ [(match_operand:DI 1 "memory_operand" "m")
+ (match_operand:DI 2 "const_int_operand" "i")]
+ UNSPEC_SP_TLS_TEST))
+ (clobber (match_scratch:DI 3 "=r"))]
+ "TARGET_64BIT"
+ {
+ /* The kernel uses a different segment register for performance reasons; a
+ system call would not have to trash the userspace segment register,
+ which would be expensive */
+ if (ix86_cmodel != CM_KERNEL)
+ return "mov{q}\t{%1, %3|%3, %1}\;xor{q}\t{%%fs:%P2, %3|%3, QWORD PTR %%fs:%P2}";
+ else
+ return "mov{q}\t{%1, %3|%3, %1}\;xor{q}\t{%%gs:%P2, %3|%3, QWORD PTR %%gs:%P2}";
+ }
+ [(set_attr "type" "multi")])
+
+; APPLE LOCAL begin 3399553
+
+; Expand the builtin FLT_ROUNDS by reading the x87 FPSR rounding bits.
+
+(define_expand "flt_rounds"
+ [(set (match_operand 0 "nonimmediate_operand" "")
+ (unspec:SI [(reg:CCFP FPSR_REG)] UNSPEC_FLT_ROUNDS))]
+ ""
+ "
+ {
+ ix86_expand_flt_rounds (operands[0]);
+ DONE;
+ }
+ "
+)
+; APPLE LOCAL end 3399553
+; APPLE LOCAL begin mainline
+(include "mmx.md")
+(include "sse.md")
+; APPLE LOCAL end mainline
+(include "sync.md")
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_mode_macro CRC32MODE [QI HI SI])
+(define_mode_attr crc32modesuffix [(QI "b") (HI "w") (SI "l")])
+(define_mode_attr crc32modeconstraint [(QI "qm") (HI "rm") (SI "rm")])
+
+(define_insn "sse4_2_crc32<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI
+ [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:CRC32MODE 2 "nonimmediate_operand" "<crc32modeconstraint>")]
+ UNSPEC_CRC32))]
+ "TARGET_SSE4_2"
+ "crc32<crc32modesuffix>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix_rep" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "sse4_2_crc32di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI
+ [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "nonimmediate_operand" "rm")]
+ UNSPEC_CRC32))]
+ "TARGET_SSE4_2 && TARGET_64BIT"
+ "crc32q\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix_rep" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 5612787 mainline sse4
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/i386.opt b/gcc-4.2.1-5666.3/gcc/config/i386/i386.opt
new file mode 100644
index 000000000..578ea36e7
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/i386.opt
@@ -0,0 +1,262 @@
+; Options for the IA-32 and AMD64 ports of the compiler.
+
+; Copyright (C) 2005 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+m128bit-long-double
+Target RejectNegative Report Mask(128BIT_LONG_DOUBLE)
+sizeof(long double) is 16
+
+m32
+Target RejectNegative Negative(m64) Report InverseMask(64BIT)
+Generate 32bit i386 code
+
+m386
+Target RejectNegative Undocumented
+;; Deprecated
+
+m3dnow
+Target Report Mask(3DNOW)
+Support 3DNow! built-in functions
+
+m486
+Target RejectNegative Undocumented
+;; Deprecated
+
+m64
+Target RejectNegative Negative(m32) Report Mask(64BIT)
+Generate 64bit x86-64 code
+
+m80387
+Target Report Mask(80387)
+Use hardware fp
+
+m96bit-long-double
+Target RejectNegative Report InverseMask(128BIT_LONG_DOUBLE)
+sizeof(long double) is 12
+
+maccumulate-outgoing-args
+Target Report Mask(ACCUMULATE_OUTGOING_ARGS)
+Reserve space for outgoing arguments in the function prologue
+
+malign-double
+Target Report Mask(ALIGN_DOUBLE)
+Align some doubles on dword boundary
+
+malign-functions=
+Target RejectNegative Joined Var(ix86_align_funcs_string)
+Function starts are aligned to this power of 2
+
+malign-jumps=
+Target RejectNegative Joined Var(ix86_align_jumps_string)
+Jump targets are aligned to this power of 2
+
+malign-loops=
+Target RejectNegative Joined Var(ix86_align_loops_string)
+Loop code aligned to this power of 2
+
+malign-stringops
+Target RejectNegative Report InverseMask(NO_ALIGN_STRINGOPS, ALIGN_STRINGOPS)
+Align destination of the string operations
+
+march=
+Target RejectNegative Joined Var(ix86_arch_string)
+Generate code for given CPU
+
+masm=
+Target RejectNegative Joined Var(ix86_asm_string)
+Use given assembler dialect
+
+mbranch-cost=
+Target RejectNegative Joined Var(ix86_branch_cost_string)
+Branches are this expensive (1-5, arbitrary units)
+
+mlarge-data-threshold=
+Target RejectNegative Joined Var(ix86_section_threshold_string)
+Data greater than given threshold will go into .ldata section in x86-64 medium model
+
+mcmodel=
+Target RejectNegative Joined Var(ix86_cmodel_string)
+Use given x86-64 code model
+
+mdebug-addr
+Target RejectNegative Var(TARGET_DEBUG_ADDR) Undocumented
+
+mdebug-arg
+Target RejectNegative Var(TARGET_DEBUG_ARG) Undocumented
+
+mfancy-math-387
+Target RejectNegative Report InverseMask(NO_FANCY_MATH_387, USE_FANCY_MATH_387)
+Generate sin, cos, sqrt for FPU
+
+mfp-ret-in-387
+Target Report Mask(FLOAT_RETURNS)
+Return values of functions in FPU registers
+
+mfpmath=
+Target RejectNegative Joined Var(ix86_fpmath_string)
+Generate floating point mathematics using given instruction set
+
+mhard-float
+Target RejectNegative Mask(80387) MaskExists
+Use hardware fp
+
+mieee-fp
+Target Report Mask(IEEE_FP)
+Use IEEE math for fp comparisons
+
+minline-all-stringops
+Target Report Mask(INLINE_ALL_STRINGOPS)
+Inline all known string operations
+
+mintel-syntax
+Target Undocumented
+;; Deprecated
+
+mmmx
+Target Report Mask(MMX)
+Support MMX built-in functions
+
+mms-bitfields
+Target Report Mask(MS_BITFIELD_LAYOUT)
+Use native (MS) bitfield layout
+
+mno-align-stringops
+Target RejectNegative Report Mask(NO_ALIGN_STRINGOPS) Undocumented
+
+mno-fancy-math-387
+Target RejectNegative Report Mask(NO_FANCY_MATH_387) Undocumented
+
+mno-push-args
+Target RejectNegative Report Mask(NO_PUSH_ARGS) Undocumented
+
+mno-red-zone
+Target RejectNegative Report Mask(NO_RED_ZONE) Undocumented
+
+momit-leaf-frame-pointer
+Target Report Mask(OMIT_LEAF_FRAME_POINTER)
+Omit the frame pointer in leaf functions
+
+mpentium
+Target RejectNegative Undocumented
+;; Deprecated
+
+mpentiumpro
+Target RejectNegative Undocumented
+;; Deprecated
+
+mpreferred-stack-boundary=
+Target RejectNegative Joined Var(ix86_preferred_stack_boundary_string)
+Attempt to keep stack aligned to this power of 2
+
+mpush-args
+Target Report InverseMask(NO_PUSH_ARGS, PUSH_ARGS)
+Use push instructions to save outgoing arguments
+
+mred-zone
+Target RejectNegative Report InverseMask(NO_RED_ZONE, RED_ZONE)
+Use red-zone in the x86-64 code
+
+mregparm=
+Target RejectNegative Joined Var(ix86_regparm_string)
+Number of registers used to pass integer arguments
+
+mrtd
+Target Report Mask(RTD)
+Alternate calling convention
+
+msoft-float
+Target InverseMask(80387)
+Do not use hardware fp
+
+msse
+Target Report Mask(SSE)
+Support MMX and SSE built-in functions and code generation
+
+msse2
+Target Report Mask(SSE2)
+Support MMX, SSE and SSE2 built-in functions and code generation
+
+msse3
+Target Report Mask(SSE3)
+Support MMX, SSE, SSE2 and SSE3 built-in functions and code generation
+
+; APPLE LOCAL begin mainline
+mssse3
+Target Report Mask(SSSE3)
+Support SSSE3 built-in functions and code generation
+; APPLE LOCAL end mainline
+
+msseregparm
+Target RejectNegative Mask(SSEREGPARM)
+Use SSE register passing conventions for SF and DF mode
+
+mstackrealign
+Target Report Var(ix86_force_align_arg_pointer)
+Realign stack in prologue
+
+msvr3-shlib
+Target Report Mask(SVR3_SHLIB)
+Uninitialized locals in .bss
+
+mstack-arg-probe
+Target Report Mask(STACK_PROBE)
+Enable stack probing
+
+mtls-dialect=
+Target RejectNegative Joined Var(ix86_tls_dialect_string)
+Use given thread-local storage dialect
+
+mtls-direct-seg-refs
+Target Report Mask(TLS_DIRECT_SEG_REFS)
+Use direct references against %gs when accessing tls data
+
+mtune=
+Target RejectNegative Joined Var(ix86_tune_string)
+Schedule code for given CPU
+
+;; Support Athlon 3Dnow builtins
+Mask(3DNOW_A)
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; Var(ix86_isa_flags)
+msse4.1
+Target Report Mask(SSE4_1) VarExists
+Support MMX, SSE, SSE2, SSE3, SSSE3 and SSE4.1 built-in functions and code generation
+
+;; Var(ix86_isa_flags)
+msse4.2
+Target Report Mask(SSE4_2) VarExists
+Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1 and SSE4.2 built-in functions and code generation
+
+;; Var(ix86_isa_flags)
+msse4
+Target RejectNegative Report Mask(SSE4_2) MaskExists VarExists
+Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1 and SSE4.2 built-in functions and code generation
+
+;; Var(ix86_isa_flags)
+mno-sse4
+Target RejectNegative Report InverseMask(SSE4_1) MaskExists VarExists
+Do not support SSE4.1 and SSE4.2 built-in functions and code generation
+
+;; Var(ix86_isa_flags)
+msse4a
+Target Report Mask(SSE4A) VarExists
+Support MMX, SSE, SSE2, SSE3 and SSE4A built-in functions and code generation
+;; APPLE LOCAL end 5612787 mainline sse4
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/k6.md b/gcc-4.2.1-5666.3/gcc/config/i386/k6.md
new file mode 100644
index 000000000..e0006aebb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/k6.md
@@ -0,0 +1,268 @@
+;; AMD K6/K6-2 Scheduling
+;; Copyright (C) 2002, 2004
+;; Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+;;
+;; The K6 architecture is quite similar to PPro. Important difference is
+;; that there are only two decoders and they seems to be much slower than
+;; any of the execution units. So we have to pay much more attention to
+;; proper scheduling for the decoders.
+;; FIXME: We don't do that right now. A good start would be to sort the
+;; instructions based on length.
+;;
+;; This description is based on data from the following documents:
+;;
+;; "AMD-K6 Processor Data Sheet (Preliminary information)"
+;; Advanced Micro Devices, Inc., 1998.
+;;
+;; "AMD-K6 Processor Code Optimization Application Note"
+;; Advanced Micro Devices, Inc., 2000.
+;;
+;; CPU execution units of the K6:
+;;
+;; store describes the Store unit. This unit is not modelled
+;; completely and it is only used to model lea operation.
+;; Otherwise it lies outside of any critical path.
+;; load describes the Load unit
+;; alux describes the Integer X unit
+;; mm describes the Multimedia unit, which shares a pipe
+;; with the Integer X unit. This unit is used for MMX,
+;; which is not implemented for K6.
+;; aluy describes the Integer Y unit
+;; fpu describes the FPU unit
+;; branch describes the Branch unit
+;;
+;; The fp unit is not pipelined, and it can only do one operation per two
+;; cycles, including fxcg.
+;;
+;; Generally this is a very poor description, but at least no worse than
+;; the old description, and a lot easier to extend to something more
+;; reasonable if anyone still cares enough about this architecture in 2004.
+;;
+;; ??? fxch isn't handled; not an issue until sched3 after reg-stack is real.
+
+(define_automaton "k6_decoder,k6_load_unit,k6_store_unit,k6_integer_units,k6_fpu_unit,k6_branch_unit")
+
+;; The K6 instruction decoding begins before the on-chip instruction cache is
+;; filled. Depending on the length of the instruction, two simple instructions
+;; can be decoded in two parallel short decoders, or one complex instruction can
+;; be decoded in either the long or the vector decoder. For all practical
+;; purposes, the long and vector decoder can be modelled as one decoder.
+(define_cpu_unit "k6_decode_short0" "k6_decoder")
+(define_cpu_unit "k6_decode_short1" "k6_decoder")
+(define_cpu_unit "k6_decode_long" "k6_decoder")
+(exclusion_set "k6_decode_long" "k6_decode_short0,k6_decode_short1")
+(define_reservation "k6_decode_short" "k6_decode_short0|k6_decode_short1")
+(define_reservation "k6_decode_vector" "k6_decode_long")
+
+(define_cpu_unit "k6_store" "k6_store_unit")
+(define_cpu_unit "k6_load" "k6_load_unit")
+(define_cpu_unit "k6_alux,k6_aluy" "k6_integer_units")
+(define_cpu_unit "k6_fpu" "k6_fpu_unit")
+(define_cpu_unit "k6_branch" "k6_branch_unit")
+
+;; Shift instructions and certain arithmetic are issued only on Integer X.
+(define_insn_reservation "k6_alux_only" 1
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "ishift,ishift1,rotate,rotate1,alu1,negnot,cld")
+ (eq_attr "memory" "none")))
+ "k6_decode_short,k6_alux")
+
+(define_insn_reservation "k6_alux_only_load" 3
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "ishift,ishift1,rotate,rotate1,alu1,negnot,cld")
+ (eq_attr "memory" "load")))
+ "k6_decode_short,k6_load,k6_alux")
+
+(define_insn_reservation "k6_alux_only_store" 3
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "ishift,ishift1,rotate,rotate1,alu1,negnot,cld")
+ (eq_attr "memory" "store,both,unknown")))
+ "k6_decode_long,k6_load,k6_alux,k6_store")
+
+;; Integer divide and multiply can only be issued on Integer X, too.
+(define_insn_reservation "k6_alu_imul" 2
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "imul"))
+ "k6_decode_vector,k6_alux*3")
+
+(define_insn_reservation "k6_alu_imul_load" 4
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imul")
+ (eq_attr "memory" "load")))
+ "k6_decode_vector,k6_load,k6_alux*3")
+
+(define_insn_reservation "k6_alu_imul_store" 4
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imul")
+ (eq_attr "memory" "store,both,unknown")))
+ "k6_decode_vector,k6_load,k6_alux*3,k6_store")
+
+;; ??? Guessed latencies based on the old pipeline description.
+(define_insn_reservation "k6_alu_idiv" 17
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "memory" "none")))
+ "k6_decode_vector,k6_alux*17")
+
+(define_insn_reservation "k6_alu_idiv_mem" 19
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "memory" "!none")))
+ "k6_decode_vector,k6_load,k6_alux*17")
+
+;; Basic word and doubleword ALU ops can be issued on both Integer units.
+(define_insn_reservation "k6_alu" 1
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "alu,alu1,negnot,icmp,test,imovx,incdec,setcc")
+ (eq_attr "memory" "none")))
+ "k6_decode_short,k6_alux|k6_aluy")
+
+(define_insn_reservation "k6_alu_load" 3
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "alu,alu1,negnot,icmp,test,imovx,incdec,setcc")
+ (eq_attr "memory" "load")))
+ "k6_decode_short,k6_load,k6_alux|k6_aluy")
+
+(define_insn_reservation "k6_alu_store" 3
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "alu,alu1,negnot,icmp,test,imovx,incdec,setcc")
+ (eq_attr "memory" "store,both,unknown")))
+ "k6_decode_long,k6_load,k6_alux|k6_aluy,k6_store")
+
+;; A "load immediate" operation does not require execution at all,
+;; it is available immediately after decoding. Special-case this.
+(define_insn_reservation "k6_alu_imov" 1
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imov")
+ (and (eq_attr "memory" "none")
+ (match_operand 1 "nonimmediate_operand"))))
+ "k6_decode_short,k6_alux|k6_aluy")
+
+(define_insn_reservation "k6_alu_imov_imm" 0
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imov")
+ (and (eq_attr "memory" "none")
+ (match_operand 1 "immediate_operand"))))
+ "k6_decode_short")
+
+(define_insn_reservation "k6_alu_imov_load" 2
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "load")))
+ "k6_decode_short,k6_load")
+
+(define_insn_reservation "k6_alu_imov_store" 1
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "store")))
+ "k6_decode_short,k6_store")
+
+(define_insn_reservation "k6_alu_imov_both" 2
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "both,unknown")))
+ "k6_decode_long,k6_load,k6_alux|k6_aluy")
+
+;; The branch unit.
+(define_insn_reservation "k6_branch_call" 1
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "call,callv"))
+ "k6_decode_vector,k6_branch")
+
+(define_insn_reservation "k6_branch_branch" 1
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "ibr"))
+ "k6_decode_short,k6_branch")
+
+;; The load and units have two pipeline stages. The load latency is
+;; two cycles.
+(define_insn_reservation "k6_load_pop" 3
+ (and (eq_attr "cpu" "k6")
+ (ior (eq_attr "type" "pop")
+ (eq_attr "memory" "load,both")))
+ "k6_decode_short,k6_load")
+
+(define_insn_reservation "k6_load_leave" 5
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "leave"))
+ "k6_decode_long,k6_load,(k6_alux|k6_aluy)*2")
+
+;; ??? From the old pipeline description. Egad!
+;; ??? Apparently we take care of this reservation in adjust_cost.
+(define_insn_reservation "k6_load_str" 10
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "str")
+ (eq_attr "memory" "load,both")))
+ "k6_decode_vector,k6_load*10")
+
+;; The store unit handles lea and push. It is otherwise unmodelled.
+(define_insn_reservation "k6_store_lea" 2
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "lea"))
+ "k6_decode_short,k6_store,k6_alux|k6_aluy")
+
+(define_insn_reservation "k6_store_push" 2
+ (and (eq_attr "cpu" "k6")
+ (ior (eq_attr "type" "push")
+ (eq_attr "memory" "store,both")))
+ "k6_decode_short,k6_store")
+
+(define_insn_reservation "k6_store_str" 10
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "str"))
+ "k6_store*10")
+
+;; Most FPU instructions have latency 2 and throughput 2.
+(define_insn_reservation "k6_fpu" 2
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "fop,fmov,fcmp,fistp")
+ (eq_attr "memory" "none")))
+ "k6_decode_vector,k6_fpu*2")
+
+(define_insn_reservation "k6_fpu_load" 6
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "fop,fmov,fcmp,fistp")
+ (eq_attr "memory" "load,both")))
+ "k6_decode_short,k6_load,k6_fpu*2")
+
+(define_insn_reservation "k6_fpu_store" 6
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "fop,fmov,fcmp,fistp")
+ (eq_attr "memory" "store")))
+ "k6_decode_short,k6_store,k6_fpu*2")
+
+(define_insn_reservation "k6_fpu_fmul" 2
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "memory" "none")))
+ "k6_decode_short,k6_fpu*2")
+
+(define_insn_reservation "k6_fpu_fmul_load" 2
+ (and (eq_attr "cpu" "k6")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "memory" "load,both")))
+ "k6_decode_short,k6_load,k6_fpu*2")
+
+;; ??? Guessed latencies from the old pipeline description.
+(define_insn_reservation "k6_fpu_expensive" 56
+ (and (eq_attr "cpu" "k6")
+ (eq_attr "type" "fdiv,fpspc"))
+ "k6_decode_short,k6_fpu*56")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/lib1funcs.asm b/gcc-4.2.1-5666.3/gcc/config/i386/lib1funcs.asm
new file mode 100644
index 000000000..c672024bb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/lib1funcs.asm
@@ -0,0 +1,30 @@
+# APPLE LOCAL file 4099000
+#ifndef __x86_64__
+#define THUNK(REG) \
+.private_extern ___i686.get_pc_thunk.REG ;\
+___i686.get_pc_thunk.REG: ;\
+ movl (%esp,1),%REG ;\
+ ret ;
+
+#ifdef L_get_pc_thunk_ax
+THUNK(eax)
+#endif
+#ifdef L_get_pc_thunk_dx
+THUNK(edx)
+#endif
+#ifdef L_get_pc_thunk_cx
+THUNK(ecx)
+#endif
+#ifdef L_get_pc_thunk_bx
+THUNK(ebx)
+#endif
+#ifdef L_get_pc_thunk_si
+THUNK(esi)
+#endif
+#ifdef L_get_pc_thunk_di
+THUNK(edi)
+#endif
+#ifdef L_get_pc_thunk_bp
+THUNK(ebp)
+#endif
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/mm3dnow.h b/gcc-4.2.1-5666.3/gcc/config/i386/mm3dnow.h
new file mode 100644
index 000000000..7fdc6dce5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/mm3dnow.h
@@ -0,0 +1,220 @@
+/* Copyright (C) 2004 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the mm3dnow.h (of supposedly AMD origin) included with
+ MSVC 7.1. */
+
+#ifndef _MM3DNOW_H_INCLUDED
+#define _MM3DNOW_H_INCLUDED
+
+#ifdef __3dNOW__
+
+#include <mmintrin.h>
+
+/* Internal data types for implementing the intrinsics. */
+typedef float __v2sf __attribute__ ((__vector_size__ (8)));
+
+static __inline void
+_m_femms (void)
+{
+ __builtin_ia32_femms();
+}
+
+static __inline __m64
+_m_pavgusb (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B);
+}
+
+static __inline __m64
+_m_pf2id (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pf2id ((__v2sf)__A);
+}
+
+static __inline __m64
+_m_pfacc (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfadd (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfcmpeq (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfcmpge (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfcmpgt (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfmax (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfmin (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfmul (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfrcp (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A);
+}
+
+static __inline __m64
+_m_pfrcpit1 (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfrcpit2 (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfrsqrt (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A);
+}
+
+static __inline __m64
+_m_pfrsqit1 (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfsub (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfsubr (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pi2fd (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pi2fd ((__v2si)__A);
+}
+
+static __inline __m64
+_m_pmulhrw (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B);
+}
+
+static __inline void
+_m_prefetch (void *__P)
+{
+ __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
+}
+
+static __inline void
+_m_prefetchw (void *__P)
+{
+ __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
+}
+
+static __inline __m64
+_m_from_float (float __A)
+{
+ return (__m64)(__v2sf){ __A, 0 };
+}
+
+static __inline float
+_m_to_float (__m64 __A)
+{
+ union { __v2sf v; float a[2]; } __tmp = { (__v2sf)__A };
+ return __tmp.a[0];
+}
+
+#ifdef __3dNOW_A__
+
+static __inline __m64
+_m_pf2iw (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A);
+}
+
+static __inline __m64
+_m_pfnacc (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pfpnacc (__m64 __A, __m64 __B)
+{
+ return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B);
+}
+
+static __inline __m64
+_m_pi2fw (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pi2fw ((__v2si)__A);
+}
+
+static __inline __m64
+_m_pswapd (__m64 __A)
+{
+ return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A);
+}
+
+#endif /* __3dNOW_A__ */
+#endif /* __3dNOW__ */
+
+#endif /* _MM3DNOW_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/mmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/mmintrin.h
new file mode 100644
index 000000000..64db0589c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/mmintrin.h
@@ -0,0 +1,1219 @@
+/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+#ifndef __MMX__
+# error "MMX instruction set not enabled"
+#else
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+/* APPLE LOCAL 4505813 */
+typedef long long __m64 __attribute__ ((__vector_size__ (8), __may_alias__));
+
+/* Internal data types for implementing the intrinsics. */
+typedef int __v2si __attribute__ ((__vector_size__ (8)));
+typedef short __v4hi __attribute__ ((__vector_size__ (8)));
+typedef char __v8qi __attribute__ ((__vector_size__ (8)));
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+/* Empty the multimedia state. */
+/* APPLE LOCAL begin radar 4152603 */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_empty (void)
+{
+ __builtin_ia32_emms ();
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_empty (void)
+{
+ _mm_empty ();
+}
+
+/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi32_si64 (int __i)
+{
+ return (__m64) __builtin_ia32_vec_init_v2si (__i, 0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_from_int (int __i)
+{
+ return _mm_cvtsi32_si64 (__i);
+}
+
+#ifdef __x86_64__
+/* Convert I to a __m64 object. */
+
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_from_int64 (long long __i)
+{
+ return (__m64) __i;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64_m64 (long long __i)
+{
+ return (__m64) __i;
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64x_si64 (long long __i)
+{
+ return (__m64) __i;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_pi64x (long long __i)
+{
+ return (__m64) __i;
+}
+#endif
+
+/* Convert the lower 32 bits of the __m64 object into an integer. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64_si32 (__m64 __i)
+{
+ return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_to_int (__m64 __i)
+{
+ return _mm_cvtsi64_si32 (__i);
+}
+
+#ifdef __x86_64__
+/* Convert the __m64 object to a 64bit integer. */
+
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_to_int64 (__m64 __i)
+{
+ return (long long)__i;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtm64_si64 (__m64 __i)
+{
+ return (long long)__i;
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64_si64x (__m64 __i)
+{
+ return (long long)__i;
+}
+#endif
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_packs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_packsswb (__m64 __m1, __m64 __m2)
+{
+ return _mm_packs_pi16 (__m1, __m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_packs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_packssdw (__m64 __m1, __m64 __m2)
+{
+ return _mm_packs_pi32 (__m1, __m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_packs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_packuswb (__m64 __m1, __m64 __m2)
+{
+ return _mm_packs_pu16 (__m1, __m2);
+}
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_punpckhbw (__m64 __m1, __m64 __m2)
+{
+ return _mm_unpackhi_pi8 (__m1, __m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_punpckhwd (__m64 __m1, __m64 __m2)
+{
+ return _mm_unpackhi_pi16 (__m1, __m2);
+}
+
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_punpckhdq (__m64 __m1, __m64 __m2)
+{
+ return _mm_unpackhi_pi32 (__m1, __m2);
+}
+
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_punpcklbw (__m64 __m1, __m64 __m2)
+{
+ return _mm_unpacklo_pi8 (__m1, __m2);
+}
+
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_punpcklwd (__m64 __m1, __m64 __m2)
+{
+ return _mm_unpacklo_pi16 (__m1, __m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_punpckldq (__m64 __m1, __m64 __m2)
+{
+ return _mm_unpacklo_pi32 (__m1, __m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddb (__m64 __m1, __m64 __m2)
+{
+ return _mm_add_pi8 (__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddw (__m64 __m1, __m64 __m2)
+{
+ return _mm_add_pi16 (__m1, __m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddd (__m64 __m1, __m64 __m2)
+{
+ return _mm_add_pi32 (__m1, __m2);
+}
+
+/* Add the 64-bit values in M1 to the 64-bit values in M2. */
+#ifdef __SSE2__
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_si64 (__m64 __m1, __m64 __m2)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_paddq (__m1, __m2);
+}
+#endif
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddsb (__m64 __m1, __m64 __m2)
+{
+ return _mm_adds_pi8 (__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddsw (__m64 __m1, __m64 __m2)
+{
+ return _mm_adds_pi16 (__m1, __m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddusb (__m64 __m1, __m64 __m2)
+{
+ return _mm_adds_pu8 (__m1, __m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_adds_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_paddusw (__m64 __m1, __m64 __m2)
+{
+ return _mm_adds_pu16 (__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubb (__m64 __m1, __m64 __m2)
+{
+ return _mm_sub_pi8 (__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubw (__m64 __m1, __m64 __m2)
+{
+ return _mm_sub_pi16 (__m1, __m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubd (__m64 __m1, __m64 __m2)
+{
+ return _mm_sub_pi32 (__m1, __m2);
+}
+
+/* Add the 64-bit values in M1 to the 64-bit values in M2. */
+#ifdef __SSE2__
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_si64 (__m64 __m1, __m64 __m2)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psubq (__m1, __m2);
+}
+#endif
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubsb (__m64 __m1, __m64 __m2)
+{
+ return _mm_subs_pi8 (__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubsw (__m64 __m1, __m64 __m2)
+{
+ return _mm_subs_pi16 (__m1, __m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubusb (__m64 __m1, __m64 __m2)
+{
+ return _mm_subs_pu8 (__m1, __m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_subs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psubusw (__m64 __m1, __m64 __m2)
+{
+ return _mm_subs_pu16 (__m1, __m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_madd_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmaddwd (__m64 __m1, __m64 __m2)
+{
+ return _mm_madd_pi16 (__m1, __m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmulhw (__m64 __m1, __m64 __m2)
+{
+ return _mm_mulhi_pi16 (__m1, __m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmullw (__m64 __m1, __m64 __m2)
+{
+ return _mm_mullo_pi16 (__m1, __m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sll_pi16 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psllw ((__v4hi)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psllw (__m64 __m, __m64 __count)
+{
+ return _mm_sll_pi16 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_slli_pi16 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psllwi (__m64 __m, int __count)
+{
+ return _mm_slli_pi16 (__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sll_pi32 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_pslld ((__v2si)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pslld (__m64 __m, __m64 __count)
+{
+ return _mm_sll_pi32 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_slli_pi32 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pslldi (__m64 __m, int __count)
+{
+ return _mm_slli_pi32 (__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sll_si64 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psllq (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psllq (__m64 __m, __m64 __count)
+{
+ return _mm_sll_si64 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_slli_si64 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psllqi (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psllqi (__m64 __m, int __count)
+{
+ return _mm_slli_si64 (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sra_pi16 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psraw ((__v4hi)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psraw (__m64 __m, __m64 __count)
+{
+ return _mm_sra_pi16 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srai_pi16 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrawi (__m64 __m, int __count)
+{
+ return _mm_srai_pi16 (__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sra_pi32 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrad ((__v2si)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrad (__m64 __m, __m64 __count)
+{
+ return _mm_sra_pi32 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srai_pi32 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psradi (__m64 __m, int __count)
+{
+ return _mm_srai_pi32 (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srl_pi16 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrlw (__m64 __m, __m64 __count)
+{
+ return _mm_srl_pi16 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srli_pi16 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrlwi (__m64 __m, int __count)
+{
+ return _mm_srli_pi16 (__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srl_pi32 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrld ((__v2si)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrld (__m64 __m, __m64 __count)
+{
+ return _mm_srl_pi32 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srli_pi32 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrldi (__m64 __m, int __count)
+{
+ return _mm_srli_pi32 (__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srl_si64 (__m64 __m, __m64 __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrlq (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrlq (__m64 __m, __m64 __count)
+{
+ return _mm_srl_si64 (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_srli_si64 (__m64 __m, int __count)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ return (__m64) __builtin_ia32_psrlqi (__m, __count);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psrlqi (__m64 __m, int __count)
+{
+ return _mm_srli_si64 (__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_and_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pand (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pand (__m64 __m1, __m64 __m2)
+{
+ return _mm_and_si64 (__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_andnot_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pandn (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pandn (__m64 __m1, __m64 __m2)
+{
+ return _mm_andnot_si64 (__m1, __m2);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_or_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_por (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_por (__m64 __m1, __m64 __m2)
+{
+ return _mm_or_si64 (__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_xor_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pxor (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pxor (__m64 __m1, __m64 __m2)
+{
+ return _mm_xor_si64 (__m1, __m2);
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pcmpeqb (__m64 __m1, __m64 __m2)
+{
+ return _mm_cmpeq_pi8 (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pcmpgtb (__m64 __m1, __m64 __m2)
+{
+ return _mm_cmpgt_pi8 (__m1, __m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pcmpeqw (__m64 __m1, __m64 __m2)
+{
+ return _mm_cmpeq_pi16 (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pcmpgtw (__m64 __m1, __m64 __m2)
+{
+ return _mm_cmpgt_pi16 (__m1, __m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pcmpeqd (__m64 __m1, __m64 __m2)
+{
+ return _mm_cmpeq_pi32 (__m1, __m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pcmpgtd (__m64 __m1, __m64 __m2)
+{
+ return _mm_cmpgt_pi32 (__m1, __m2);
+}
+
+/* Creates a 64-bit zero. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setzero_si64 (void)
+{
+ return (__m64)0LL;
+}
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_pi32 (int __i1, int __i0)
+{
+ return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1);
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
+{
+ return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3);
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
+ char __b3, char __b2, char __b1, char __b0)
+{
+ return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3,
+ __b4, __b5, __b6, __b7);
+}
+
+/* Similar, but with the arguments in reverse order. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_pi32 (int __i0, int __i1)
+{
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16 (__w3, __w2, __w1, __w0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
+ char __b4, char __b5, char __b6, char __b7)
+{
+ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_pi32 (int __i)
+{
+ return _mm_set_pi32 (__i, __i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_pi16 (short __w)
+{
+ return _mm_set_pi16 (__w, __w, __w, __w);
+}
+
+/* Creates a vector of eight 8-bit values, all elements containing B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_pi8 (char __b)
+{
+ return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b);
+}
+/* APPLE LOCAL end radar 4152603 */
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+#endif /* __MMX__ */
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/mmx.md b/gcc-4.2.1-5666.3/gcc/config/i386/mmx.md
new file mode 100644
index 000000000..4e55cfdc2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/mmx.md
@@ -0,0 +1,1470 @@
+;; GCC machine description for MMX and 3dNOW! instructions
+;; Copyright (C) 2005
+;; Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; The MMX and 3dNOW! patterns are in the same file because they use
+;; the same register file, and 3dNOW! adds a number of extensions to
+;; the base integer MMX isa.
+
+;; Note! Except for the basic move instructions, *all* of these
+;; patterns are outside the normal optabs namespace. This is because
+;; use of these registers requires the insertion of emms or femms
+;; instructions to return to normal fpu mode. The compiler doesn't
+;; know how to do that itself, which means it's up to the user. Which
+;; means that we should never use any of these patterns except at the
+;; direction of the user via a builtin.
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+;; 8 byte integral modes handled by MMX (and by extension, SSE)
+(define_mode_macro MMXMODEI [V8QI V4HI V2SI V1DI])
+
+;; All 8-byte vector modes handled by MMX
+(define_mode_macro MMXMODE [V8QI V4HI V2SI V2SF V1DI])
+
+;; Mix-n-match
+(define_mode_macro MMXMODE12 [V8QI V4HI])
+(define_mode_macro MMXMODE24 [V4HI V2SI])
+(define_mode_macro MMXMODE124 [V8QI V4HI V2SI])
+(define_mode_macro MMXMODE248 [V4HI V2SI V1DI])
+
+;; Mapping from integer vector mode to mnemonic suffix
+(define_mode_attr mmxvecsize [(V8QI "b") (V4HI "w") (V2SI "d") (V1DI "q")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Move patterns
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; All of these patterns are enabled for MMX as well as 3dNOW.
+;; This is essential for maintaining stable calling conventions.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:MMXMODEI 0 "nonimmediate_operand" "")
+ (match_operand:MMXMODEI 1 "nonimmediate_operand" ""))]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_move (<MODE>mode, operands);
+ DONE;
+})
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+;; Take {ym->y} into account for register allocation
+(define_insn "*mov<mode>_internal_rex64"
+ [(set (match_operand:MMXMODEI 0 "nonimmediate_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "=rm,r,*y,*y ,m ,*y,Yt,x,x ,m,r,x")
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ (match_operand:MMXMODEI 1 "vector_move_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "Cr ,m,C ,*ym,*y,Yt,*y,C,xm,x,x,r"))]
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ "TARGET_64BIT && TARGET_MMX
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movd\t{%1, %0|%0, %1}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imov,imov,mmx,mmxmov,mmxmov,ssecvt,ssecvt,sselog1,ssemov,ssemov,ssemov,ssemov")
+ (set_attr "unit" "*,*,*,*,*,mmx,mmx,*,*,*,*,*")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "*mov<mode>_internal"
+;; APPLE LOCAL begin radar 4043818
+ [(set (match_operand:MMXMODEI 0 "nonimmediate_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "=*y,y ,m ,*y ,*Yt,*Yt,*Yt ,m ,*x,*x,*x,m ,?r ,?m")
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ (match_operand:MMXMODEI 1 "vector_move_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "C ,*ym,*y,*Yt,*y ,C ,*Ytm,*Yt,C ,*x,m ,*x,irm,r"))]
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+;; APPLE LOCAL end radar 4043818
+ "TARGET_MMX
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}
+ #
+ #"
+ [(set_attr "type" "mmx,mmxmov,mmxmov,ssecvt,ssecvt,sselog1,ssemov,ssemov,sselog1,ssemov,ssemov,ssemov,*,*")
+ (set_attr "unit" "*,*,*,mmx,mmx,*,*,*,*,*,*,*,*,*")
+ (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,DI,V4SF,V4SF,V2SF,V2SF,DI,DI")])
+
+(define_expand "movv2sf"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "")
+ (match_operand:V2SF 1 "nonimmediate_operand" ""))]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_move (V2SFmode, operands);
+ DONE;
+})
+
+(define_insn "*movv2sf_internal_rex64"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "=rm,r,*y ,*y ,m ,*y,Yt,x,x,x,m,r,x")
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ (match_operand:V2SF 1 "vector_move_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "Cr ,m ,C ,*ym,*y,Yt,*y,C,x,m,x,x,r"))]
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ "TARGET_64BIT && TARGET_MMX
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}
+ movd\t{%1, %0|%0, %1}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "imov,imov,mmx,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,sselog1,ssemov,ssemov,ssemov,ssemov")
+ (set_attr "unit" "*,*,*,*,*,mmx,mmx,*,*,*,*,*,*")
+ (set_attr "mode" "DI,DI,DI,DI,DI,DI,DI,V4SF,V4SF,V2SF,V2SF,DI,DI")])
+
+(define_insn "*movv2sf_internal"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "=*y,*y ,m,*y ,*Yt,*x,*x,*x,m ,?r ,?m")
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ (match_operand:V2SF 1 "vector_move_operand"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ "C ,*ym,*y,*Yt,*y ,C ,*x,m ,*x,irm,r"))]
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ "TARGET_MMX
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "@
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}
+ #
+ #"
+ [(set_attr "type" "mmx,mmxmov,mmxmov,ssecvt,ssecvt,sselog1,ssemov,ssemov,ssemov,*,*")
+ (set_attr "unit" "*,*,*,mmx,mmx,*,*,*,*,*,*")
+ (set_attr "mode" "DI,DI,DI,DI,DI,V4SF,V4SF,V2SF,V2SF,DI,DI")])
+
+;; %%% This multiword shite has got to go.
+(define_split
+ [(set (match_operand:MMXMODE 0 "nonimmediate_operand" "")
+ (match_operand:MMXMODE 1 "general_operand" ""))]
+;; APPLE LOCAL begin 4099020
+ "!TARGET_64BIT && reload_completed
+ && (!MMX_REG_P (operands[0]) && !SSE_REG_P (operands[0]) && GET_CODE (operands[0]) != SUBREG)
+ && (!MMX_REG_P (operands[1]) && !SSE_REG_P (operands[1]) && GET_CODE (operands[1]) != SUBREG)"
+;; APPLE LOCAL end 4099020
+ [(const_int 0)]
+ "ix86_split_long_move (operands); DONE;")
+
+(define_expand "push<mode>1"
+ [(match_operand:MMXMODE 0 "register_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_push (<MODE>mode, operands[0]);
+ DONE;
+})
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:MMXMODE 0 "nonimmediate_operand" "")
+ (match_operand:MMXMODE 1 "nonimmediate_operand" ""))]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_move (<MODE>mode, operands);
+ DONE;
+})
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "sse_movntv1di"
+ [(set (match_operand:V1DI 0 "memory_operand" "=m")
+ (unspec:V1DI [(match_operand:V1DI 1 "register_operand" "y")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "movntq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxmov")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point arithmetic
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_addv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (plus:V2SF (match_operand:V2SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW && ix86_binary_operator_ok (PLUS, V2SFmode, operands)"
+ "pfadd\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_subv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y,y")
+ (minus:V2SF (match_operand:V2SF 1 "nonimmediate_operand" "0,ym")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym,0")))]
+ "TARGET_3DNOW && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ pfsub\\t{%2, %0|%0, %2}
+ pfsubr\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+(define_expand "mmx_subrv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "")
+ (minus:V2SF (match_operand:V2SF 2 "nonimmediate_operand" "")
+ (match_operand:V2SF 1 "nonimmediate_operand" "")))]
+ "TARGET_3DNOW && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "")
+
+(define_insn "mmx_mulv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (mult:V2SF (match_operand:V2SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW && ix86_binary_operator_ok (MULT, V2SFmode, operands)"
+ "pfmul\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_smaxv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (smax:V2SF (match_operand:V2SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW && ix86_binary_operator_ok (SMAX, V2SFmode, operands)"
+ "pfmax\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_sminv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (smin:V2SF (match_operand:V2SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW && ix86_binary_operator_ok (SMIN, V2SFmode, operands)"
+ "pfmin\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_rcpv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (unspec:V2SF [(match_operand:V2SF 1 "nonimmediate_operand" "ym")]
+ UNSPEC_PFRCP))]
+ "TARGET_3DNOW"
+ "pfrcp\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmx")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_rcpit1v2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")]
+ UNSPEC_PFRCPIT1))]
+ "TARGET_3DNOW"
+ "pfrcpit1\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmx")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_rcpit2v2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")]
+ UNSPEC_PFRCPIT2))]
+ "TARGET_3DNOW"
+ "pfrcpit2\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmx")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_rsqrtv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (unspec:V2SF [(match_operand:V2SF 1 "nonimmediate_operand" "ym")]
+ UNSPEC_PFRSQRT))]
+ "TARGET_3DNOW"
+ "pfrsqrt\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmx")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_rsqit1v2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")]
+ UNSPEC_PFRSQIT1))]
+ "TARGET_3DNOW"
+ "pfrsqit1\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmx")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_haddv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (vec_concat:V2SF
+ (plus:SF
+ (vec_select:SF
+ (match_operand:V2SF 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
+ (plus:SF
+ (vec_select:SF
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 2) (parallel [(const_int 1)])))))]
+ "TARGET_3DNOW"
+ "pfacc\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_hsubv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (vec_concat:V2SF
+ (minus:SF
+ (vec_select:SF
+ (match_operand:V2SF 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
+ (minus:SF
+ (vec_select:SF
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 2) (parallel [(const_int 1)])))))]
+ "TARGET_3DNOW_A"
+ "pfnacc\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_addsubv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (vec_merge:V2SF
+ (plus:V2SF
+ (match_operand:V2SF 1 "register_operand" "0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym"))
+ (minus:V2SF (match_dup 1) (match_dup 2))
+ (const_int 1)))]
+ "TARGET_3DNOW_A"
+ "pfpnacc\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "V2SF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point comparisons
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_gtv2sf3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (gt:V2SI (match_operand:V2SF 1 "register_operand" "0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW"
+ "pfcmpgt\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcmp")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_gev2sf3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (ge:V2SI (match_operand:V2SF 1 "register_operand" "0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW"
+ "pfcmpge\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcmp")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_eqv2sf3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (eq:V2SI (match_operand:V2SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2SF 2 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW && ix86_binary_operator_ok (EQ, V2SFmode, operands)"
+ "pfcmpeq\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcmp")
+ (set_attr "mode" "V2SF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point conversion operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_pf2id"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (fix:V2SI (match_operand:V2SF 1 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW"
+ "pf2id\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_pf2iw"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (sign_extend:V2SI
+ (ss_truncate:V2HI
+ (fix:V2SI
+ (match_operand:V2SF 1 "nonimmediate_operand" "ym")))))]
+ "TARGET_3DNOW_A"
+ "pf2iw\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_pi2fw"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (float:V2SF
+ (sign_extend:V2SI
+ (truncate:V2HI
+ (match_operand:V2SI 1 "nonimmediate_operand" "ym")))))]
+ "TARGET_3DNOW_A"
+ "pi2fw\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "mmx_floatv2si2"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (float:V2SF (match_operand:V2SI 1 "nonimmediate_operand" "ym")))]
+ "TARGET_3DNOW"
+ "pi2fd\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "V2SF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point element swizzling
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_pswapdv2sf2"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (vec_select:V2SF (match_operand:V2SF 1 "nonimmediate_operand" "ym")
+ (parallel [(const_int 1) (const_int 0)])))]
+ "TARGET_3DNOW_A"
+ "pswapd\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "V2SF")])
+
+(define_insn "*vec_dupv2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=y")
+ (vec_duplicate:V2SF
+ (match_operand:SF 1 "register_operand" "0")))]
+ "TARGET_MMX"
+ "punpckldq\t%0, %0"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "*mmx_concatv2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=y,y")
+ (vec_concat:V2SF
+ (match_operand:SF 1 "nonimmediate_operand" " 0,rm")
+ (match_operand:SF 2 "vector_move_operand" "ym,C")))]
+ "TARGET_MMX && !TARGET_SSE"
+ "@
+ punpckldq\t{%2, %0|%0, %2}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt,mmxmov")
+ (set_attr "mode" "DI")])
+
+(define_expand "vec_setv2sf"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_insn_and_split "*vec_extractv2sf_0"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=x,y,m,m,frxy")
+ (vec_select:SF
+ (match_operand:V2SF 1 "nonimmediate_operand" " x,y,x,y,m")
+ (parallel [(const_int 0)])))]
+ "TARGET_MMX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx op1 = operands[1];
+ if (REG_P (op1))
+ op1 = gen_rtx_REG (SFmode, REGNO (op1));
+ else
+ op1 = gen_lowpart (SFmode, op1);
+ emit_move_insn (operands[0], op1);
+ DONE;
+})
+
+(define_insn "*vec_extractv2sf_1"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=y,x,frxy")
+ (vec_select:SF
+ (match_operand:V2SF 1 "nonimmediate_operand" " 0,0,o")
+ (parallel [(const_int 1)])))]
+ "TARGET_MMX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ punpckhdq\t%0, %0
+ unpckhps\t%0, %0
+ #"
+ [(set_attr "type" "mmxcvt,sselog1,*")
+ (set_attr "mode" "DI,V4SF,SI")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (vec_select:SF
+ (match_operand:V2SF 1 "memory_operand" "")
+ (parallel [(const_int 1)])))]
+ "TARGET_MMX && reload_completed"
+ [(const_int 0)]
+{
+ operands[1] = adjust_address (operands[1], SFmode, 4);
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_extractv2sf"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:V2SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv2sf"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral arithmetic
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_add<mode>3"
+ [(set (match_operand:MMXMODEI 0 "register_operand" "=y")
+ (plus:MMXMODEI
+ (match_operand:MMXMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODEI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX && ix86_binary_operator_ok (PLUS, <MODE>mode, operands)"
+ "padd<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+;; remove mmx_adddi3
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "mmx_ssadd<mode>3"
+ [(set (match_operand:MMXMODE12 0 "register_operand" "=y")
+ (ss_plus:MMXMODE12
+ (match_operand:MMXMODE12 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODE12 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "padds<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_usadd<mode>3"
+ [(set (match_operand:MMXMODE12 0 "register_operand" "=y")
+ (us_plus:MMXMODE12
+ (match_operand:MMXMODE12 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODE12 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "paddus<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_sub<mode>3"
+ [(set (match_operand:MMXMODEI 0 "register_operand" "=y")
+ (minus:MMXMODEI
+ (match_operand:MMXMODEI 1 "register_operand" "0")
+ (match_operand:MMXMODEI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "psub<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+;; remove mmx_subdi3
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "mmx_sssub<mode>3"
+ [(set (match_operand:MMXMODE12 0 "register_operand" "=y")
+ (ss_minus:MMXMODE12
+ (match_operand:MMXMODE12 1 "register_operand" "0")
+ (match_operand:MMXMODE12 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "psubs<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_ussub<mode>3"
+ [(set (match_operand:MMXMODE12 0 "register_operand" "=y")
+ (us_minus:MMXMODE12
+ (match_operand:MMXMODE12 1 "register_operand" "0")
+ (match_operand:MMXMODE12 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "psubus<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_mulv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (mult:V4HI (match_operand:V4HI 1 "nonimmediate_operand" "%0")
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "pmullw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_smulv4hi3_highpart"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (mult:V4SI (sign_extend:V4SI
+ (match_operand:V4HI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:V4SI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))
+ (const_int 16))))]
+ "TARGET_MMX && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "pmulhw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_umulv4hi3_highpart"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (mult:V4SI (zero_extend:V4SI
+ (match_operand:V4HI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:V4SI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))
+ (const_int 16))))]
+ "(TARGET_SSE || TARGET_3DNOW_A)
+ && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "pmulhuw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_pmaddwd"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (plus:V2SI
+ (mult:V2SI
+ (sign_extend:V2SI
+ (vec_select:V2HI
+ (match_operand:V4HI 1 "nonimmediate_operand" "%0")
+ (parallel [(const_int 0) (const_int 2)])))
+ (sign_extend:V2SI
+ (vec_select:V2HI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0) (const_int 2)]))))
+ (mult:V2SI
+ (sign_extend:V2SI
+ (vec_select:V2HI (match_dup 1)
+ (parallel [(const_int 1) (const_int 3)])))
+ (sign_extend:V2SI
+ (vec_select:V2HI (match_dup 2)
+ (parallel [(const_int 1) (const_int 3)]))))))]
+ "TARGET_MMX && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "pmaddwd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_pmulhrwv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (plus:V4SI
+ (mult:V4SI
+ (sign_extend:V4SI
+ (match_operand:V4HI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:V4SI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))
+ (const_vector:V4SI [(const_int 32768) (const_int 32768)
+ (const_int 32768) (const_int 32768)]))
+ (const_int 16))))]
+ "TARGET_3DNOW && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "pmulhrw\\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "DI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "sse2_umulsidi3"
+ [(set (match_operand:V1DI 0 "register_operand" "=y")
+ (mult:V1DI
+ (zero_extend:V1DI
+ (vec_select:V1SI
+ (match_operand:V2SI 1 "nonimmediate_operand" "%0")
+ (parallel [(const_int 0)])))
+ (zero_extend:V1DI
+ (vec_select:V1SI
+ (match_operand:V2SI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)])))))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V2SImode, operands)"
+ "pmuludq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxmul")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "mmx_umaxv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (umax:V8QI (match_operand:V8QI 1 "nonimmediate_operand" "%0")
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym")))]
+ "(TARGET_SSE || TARGET_3DNOW_A)
+ && ix86_binary_operator_ok (UMAX, V8QImode, operands)"
+ "pmaxub\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_smaxv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (smax:V4HI (match_operand:V4HI 1 "nonimmediate_operand" "%0")
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))]
+ "(TARGET_SSE || TARGET_3DNOW_A)
+ && ix86_binary_operator_ok (SMAX, V4HImode, operands)"
+ "pmaxsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_uminv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (umin:V8QI (match_operand:V8QI 1 "nonimmediate_operand" "%0")
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym")))]
+ "(TARGET_SSE || TARGET_3DNOW_A)
+ && ix86_binary_operator_ok (UMIN, V8QImode, operands)"
+ "pminub\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_sminv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (smin:V4HI (match_operand:V4HI 1 "nonimmediate_operand" "%0")
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))]
+ "(TARGET_SSE || TARGET_3DNOW_A)
+ && ix86_binary_operator_ok (SMIN, V4HImode, operands)"
+ "pminsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "mmx_ashr<mode>3"
+ [(set (match_operand:MMXMODE24 0 "register_operand" "=y")
+ (ashiftrt:MMXMODE24
+ (match_operand:MMXMODE24 1 "register_operand" "0")
+ (match_operand:V1DI 2 "nonmemory_operand" "yi")))]
+ "TARGET_MMX"
+ "psra<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_ashr<mode>2si"
+ [(set (match_operand:MMXMODE24 0 "register_operand" "=y")
+ (ashiftrt:MMXMODE24
+ (match_operand:MMXMODE24 1 "register_operand" "0")
+ (sign_extend:DI (match_operand:SI 2 "nonmemory_operand" "yi"))))]
+ "TARGET_MMX"
+ "psra<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_lshr<mode>3"
+ [(set (match_operand:MMXMODE248 0 "register_operand" "=y")
+ (lshiftrt:MMXMODE248
+ (match_operand:MMXMODE248 1 "register_operand" "0")
+ (match_operand:V1DI 2 "nonmemory_operand" "yi")))]
+ "TARGET_MMX"
+ "psrl<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_lshr<mode>2si"
+ [(set (match_operand:MMXMODE248 0 "register_operand" "=y")
+ (lshiftrt:MMXMODE248
+ (match_operand:MMXMODE248 1 "register_operand" "0")
+ (sign_extend:DI (match_operand:SI 2 "nonmemory_operand" "yi"))))]
+ "TARGET_MMX"
+ "psrl<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_ashl<mode>3"
+ [(set (match_operand:MMXMODE248 0 "register_operand" "=y")
+ (ashift:MMXMODE248
+ (match_operand:MMXMODE248 1 "register_operand" "0")
+ (match_operand:V1DI 2 "nonmemory_operand" "yi")))]
+ "TARGET_MMX"
+ "psll<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_ashl<mode>2si"
+ [(set (match_operand:MMXMODE248 0 "register_operand" "=y")
+ (ashift:MMXMODE248
+ (match_operand:MMXMODE248 1 "register_operand" "0")
+ (sign_extend:DI (match_operand:SI 2 "nonmemory_operand" "yi"))))]
+ "TARGET_MMX"
+ "psll<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral comparisons
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "mmx_eq<mode>3"
+ [(set (match_operand:MMXMODE124 0 "register_operand" "=y")
+ (eq:MMXMODE124
+ (match_operand:MMXMODE124 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODE124 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ "pcmpeq<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcmp")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_gt<mode>3"
+ [(set (match_operand:MMXMODE124 0 "register_operand" "=y")
+ (gt:MMXMODE124
+ (match_operand:MMXMODE124 1 "register_operand" "0")
+ (match_operand:MMXMODE124 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "pcmpgt<mmxvecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcmp")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral logical operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_and<mode>3"
+ [(set (match_operand:MMXMODEI 0 "register_operand" "=y")
+ (and:MMXMODEI
+ (match_operand:MMXMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODEI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX && ix86_binary_operator_ok (AND, <MODE>mode, operands)"
+ "pand\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_nand<mode>3"
+ [(set (match_operand:MMXMODEI 0 "register_operand" "=y")
+ (and:MMXMODEI
+ (not:MMXMODEI (match_operand:MMXMODEI 1 "register_operand" "0"))
+ (match_operand:MMXMODEI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX"
+ "pandn\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_ior<mode>3"
+ [(set (match_operand:MMXMODEI 0 "register_operand" "=y")
+ (ior:MMXMODEI
+ (match_operand:MMXMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODEI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX && ix86_binary_operator_ok (IOR, <MODE>mode, operands)"
+ "por\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_xor<mode>3"
+ [(set (match_operand:MMXMODEI 0 "register_operand" "=y")
+ (xor:MMXMODEI
+ (match_operand:MMXMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:MMXMODEI 2 "nonimmediate_operand" "ym")))]
+ "TARGET_MMX && ix86_binary_operator_ok (XOR, <MODE>mode, operands)"
+ "pxor\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxadd")
+ (set_attr "mode" "DI")
+ (set_attr "memory" "none")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral element swizzling
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_packsswb"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_concat:V8QI
+ (ss_truncate:V4QI
+ (match_operand:V4HI 1 "register_operand" "0"))
+ (ss_truncate:V4QI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym"))))]
+ "TARGET_MMX"
+ "packsswb\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_packssdw"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (ss_truncate:V2HI
+ (match_operand:V2SI 1 "register_operand" "0"))
+ (ss_truncate:V2HI
+ (match_operand:V2SI 2 "nonimmediate_operand" "ym"))))]
+ "TARGET_MMX"
+ "packssdw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_packuswb"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_concat:V8QI
+ (us_truncate:V4QI
+ (match_operand:V4HI 1 "register_operand" "0"))
+ (us_truncate:V4QI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym"))))]
+ "TARGET_MMX"
+ "packuswb\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_punpckhbw"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "0")
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym"))
+ (parallel [(const_int 4) (const_int 12)
+ (const_int 5) (const_int 13)
+ (const_int 6) (const_int 14)
+ (const_int 7) (const_int 15)])))]
+ "TARGET_MMX"
+ "punpckhbw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_punpcklbw"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "0")
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym"))
+ (parallel [(const_int 0) (const_int 8)
+ (const_int 1) (const_int 9)
+ (const_int 2) (const_int 10)
+ (const_int 3) (const_int 11)])))]
+ "TARGET_MMX"
+ "punpcklbw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_punpckhwd"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "0")
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym"))
+ (parallel [(const_int 2) (const_int 6)
+ (const_int 3) (const_int 7)])))]
+ "TARGET_MMX"
+ "punpckhwd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_punpcklwd"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "0")
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 1) (const_int 5)])))]
+ "TARGET_MMX"
+ "punpcklwd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_punpckhdq"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_select:V2SI
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" "0")
+ (match_operand:V2SI 2 "nonimmediate_operand" "ym"))
+ (parallel [(const_int 1)
+ (const_int 3)])))]
+ "TARGET_MMX"
+ "punpckhdq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_punpckldq"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_select:V2SI
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" "0")
+ (match_operand:V2SI 2 "nonimmediate_operand" "ym"))
+ (parallel [(const_int 0)
+ (const_int 2)])))]
+ "TARGET_MMX"
+ "punpckldq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_expand "mmx_pinsrw"
+ [(set (match_operand:V4HI 0 "register_operand" "")
+ (vec_merge:V4HI
+ (vec_duplicate:V4HI
+ (match_operand:SI 2 "nonimmediate_operand" ""))
+ (match_operand:V4HI 1 "register_operand" "")
+ (match_operand:SI 3 "const_0_to_3_operand" "")))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+{
+ operands[2] = gen_lowpart (HImode, operands[2]);
+ operands[3] = GEN_INT (1 << INTVAL (operands[3]));
+})
+
+(define_insn "*mmx_pinsrw"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_merge:V4HI
+ (vec_duplicate:V4HI
+ (match_operand:HI 2 "nonimmediate_operand" "rm"))
+ (match_operand:V4HI 1 "register_operand" "0")
+ (match_operand:SI 3 "const_pow2_1_to_8_operand" "n")))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+{
+ operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])));
+ return "pinsrw\t{%3, %k2, %0|%0, %k2, %3}";
+}
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_pextrw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (vec_select:HI
+ (match_operand:V4HI 1 "register_operand" "y")
+ (parallel [(match_operand:SI 2 "const_0_to_3_operand" "n")]))))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "pextrw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_expand "mmx_pshufw"
+ [(match_operand:V4HI 0 "register_operand" "")
+ (match_operand:V4HI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")]
+ "TARGET_SSE || TARGET_3DNOW_A"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_mmx_pshufw_1 (operands[0], operands[1],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT ((mask >> 4) & 3),
+ GEN_INT ((mask >> 6) & 3)));
+ DONE;
+})
+
+(define_insn "mmx_pshufw_1"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_select:V4HI
+ (match_operand:V4HI 1 "nonimmediate_operand" "ym")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "")
+ (match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_0_to_3_operand" "")])))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[2]) << 0;
+ mask |= INTVAL (operands[3]) << 2;
+ mask |= INTVAL (operands[4]) << 4;
+ mask |= INTVAL (operands[5]) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "pshufw\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_pswapdv2si2"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_select:V2SI
+ (match_operand:V2SI 1 "nonimmediate_operand" "ym")
+ (parallel [(const_int 1) (const_int 0)])))]
+ "TARGET_3DNOW_A"
+ "pswapd\\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "*vec_dupv4hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_duplicate:V4HI
+ (truncate:HI
+ (match_operand:SI 1 "register_operand" "0"))))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "pshufw\t{$0, %0, %0|%0, %0, 0}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "*vec_dupv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_duplicate:V2SI
+ (match_operand:SI 1 "register_operand" "0")))]
+ "TARGET_MMX"
+ "punpckldq\t%0, %0"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "*mmx_concatv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=y,y")
+ (vec_concat:V2SI
+ (match_operand:SI 1 "nonimmediate_operand" " 0,rm")
+ (match_operand:SI 2 "vector_move_operand" "ym,C")))]
+ "TARGET_MMX && !TARGET_SSE"
+ "@
+ punpckldq\t{%2, %0|%0, %2}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt,mmxmov")
+ (set_attr "mode" "DI")])
+
+(define_expand "vec_setv2si"
+ [(match_operand:V2SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+;; APPLE LOCAL begin 4684674 permit mmx-to-int reg
+(define_insn_and_split "*vec_extractv2si_0"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=x,y,m,mr,frxy")
+ (vec_select:SI
+ (match_operand:V2SI 1 "nonimmediate_operand" " x,y,x,y,m")
+ (parallel [(const_int 0)])))]
+ "TARGET_MMX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx op1 = operands[1];
+ if (REG_P (op1))
+ op1 = gen_rtx_REG (SImode, REGNO (op1));
+ else
+ op1 = gen_lowpart (SImode, op1);
+ emit_move_insn (operands[0], op1);
+ DONE;
+})
+;; APPLE LOCAL end 4684674
+
+(define_insn "*vec_extractv2si_1"
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=y,Yt,Yt,x,frxy")
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ (vec_select:SI
+;; APPLE LOCAL begin mainline 2007-06-05 5103201
+ (match_operand:V2SI 1 "nonimmediate_operand" " 0,0 ,Yt,0,o")
+;; APPLE LOCAL end mainline 2007-06-05 5103201
+ (parallel [(const_int 1)])))]
+ "TARGET_MMX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ punpckhdq\t%0, %0
+ punpckhdq\t%0, %0
+ pshufd\t{$85, %1, %0|%0, %1, 85}
+ unpckhps\t%0, %0
+ #"
+ [(set_attr "type" "mmxcvt,sselog1,sselog1,sselog1,*")
+ (set_attr "mode" "DI,TI,TI,V4SF,SI")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (vec_select:SI
+ (match_operand:V2SI 1 "memory_operand" "")
+ (parallel [(const_int 1)])))]
+ "TARGET_MMX && reload_completed"
+ [(const_int 0)]
+{
+ operands[1] = adjust_address (operands[1], SImode, 4);
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_extractv2si"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:V2SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv2si"
+ [(match_operand:V2SI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv4hi"
+ [(match_operand:V4HI 0 "register_operand" "")
+ (match_operand:HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4hi"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand:V4HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv4hi"
+ [(match_operand:V4HI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv8qi"
+ [(match_operand:V8QI 0 "register_operand" "")
+ (match_operand:QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv8qi"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand:V8QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_MMX"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv8qi"
+ [(match_operand:V8QI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Miscellaneous
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "mmx_uavgv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (truncate:V8QI
+ (lshiftrt:V8HI
+ (plus:V8HI
+ (plus:V8HI
+ (zero_extend:V8HI
+ (match_operand:V8QI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:V8HI
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym")))
+ (const_vector:V8HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "(TARGET_SSE || TARGET_3DNOW)
+ && ix86_binary_operator_ok (PLUS, V8QImode, operands)"
+{
+ /* These two instructions have the same operation, but their encoding
+ is different. Prefer the one that is de facto standard. */
+ if (TARGET_SSE || TARGET_3DNOW_A)
+ return "pavgb\t{%2, %0|%0, %2}";
+ else
+ return "pavgusb\\t{%2, %0|%0, %2}";
+}
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_uavgv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (plus:V4SI
+ (plus:V4SI
+ (zero_extend:V4SI
+ (match_operand:V4HI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:V4SI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))
+ (const_vector:V4SI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "(TARGET_SSE || TARGET_3DNOW_A)
+ && ix86_binary_operator_ok (PLUS, V4HImode, operands)"
+ "pavgw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "mmx_psadbw"
+ [(set (match_operand:V1DI 0 "register_operand" "=y")
+ (unspec:V1DI [(match_operand:V8QI 1 "register_operand" "0")
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym")]
+ UNSPEC_PSADBW))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "psadbw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "mmxshft")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "mmx_pmovmskb"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V8QI 1 "register_operand" "y")]
+ UNSPEC_MOVMSK))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "pmovmskb\t{%1, %0|%0, %1}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_expand "mmx_maskmovq"
+ [(set (match_operand:V8QI 0 "memory_operand" "")
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")
+ (match_dup 0)]
+ UNSPEC_MASKMOV))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "")
+
+(define_insn "*mmx_maskmovq"
+ [(set (mem:V8QI (match_operand:SI 0 "register_operand" "D"))
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")
+ (mem:V8QI (match_dup 0))]
+ UNSPEC_MASKMOV))]
+ "(TARGET_SSE || TARGET_3DNOW_A) && !TARGET_64BIT"
+ ;; @@@ check ordering of operands in intel/nonintel syntax
+ "maskmovq\t{%2, %1|%1, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "*mmx_maskmovq_rex"
+ [(set (mem:V8QI (match_operand:DI 0 "register_operand" "D"))
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "y")
+ (match_operand:V8QI 2 "register_operand" "y")
+ (mem:V8QI (match_dup 0))]
+ UNSPEC_MASKMOV))]
+ "(TARGET_SSE || TARGET_3DNOW_A) && TARGET_64BIT"
+ ;; @@@ check ordering of operands in intel/nonintel syntax
+ "maskmovq\t{%2, %1|%1, %2}"
+ [(set_attr "type" "mmxcvt")
+ (set_attr "mode" "DI")])
+
+(define_insn "mmx_emms"
+ [(unspec_volatile [(const_int 0)] UNSPECV_EMMS)
+ (clobber (reg:XF 8))
+ (clobber (reg:XF 9))
+ (clobber (reg:XF 10))
+ (clobber (reg:XF 11))
+ (clobber (reg:XF 12))
+ (clobber (reg:XF 13))
+ (clobber (reg:XF 14))
+ (clobber (reg:XF 15))
+ (clobber (reg:DI 29))
+ (clobber (reg:DI 30))
+ (clobber (reg:DI 31))
+ (clobber (reg:DI 32))
+ (clobber (reg:DI 33))
+ (clobber (reg:DI 34))
+ (clobber (reg:DI 35))
+ (clobber (reg:DI 36))]
+ "TARGET_MMX"
+ "emms"
+ [(set_attr "type" "mmx")
+ (set_attr "memory" "unknown")])
+
+(define_insn "mmx_femms"
+ [(unspec_volatile [(const_int 0)] UNSPECV_FEMMS)
+ (clobber (reg:XF 8))
+ (clobber (reg:XF 9))
+ (clobber (reg:XF 10))
+ (clobber (reg:XF 11))
+ (clobber (reg:XF 12))
+ (clobber (reg:XF 13))
+ (clobber (reg:XF 14))
+ (clobber (reg:XF 15))
+ (clobber (reg:DI 29))
+ (clobber (reg:DI 30))
+ (clobber (reg:DI 31))
+ (clobber (reg:DI 32))
+ (clobber (reg:DI 33))
+ (clobber (reg:DI 34))
+ (clobber (reg:DI 35))
+ (clobber (reg:DI 36))]
+ "TARGET_3DNOW"
+ "femms"
+ [(set_attr "type" "mmx")
+ (set_attr "memory" "none")])
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/nmmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/nmmintrin.h
new file mode 100644
index 000000000..5c0db207b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/nmmintrin.h
@@ -0,0 +1,41 @@
+/* APPLE LOCAL file 5612787 mainline sse4 */
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 10.0. */
+
+#ifndef _NMMINTRIN_H_INCLUDED
+#define _NMMINTRIN_H_INCLUDED
+
+#ifndef __SSE4_2__
+# error "SSE4.2 instruction set not enabled"
+#else
+/* We just include SSE4.1 header file. */
+#include <smmintrin.h>
+#endif /* __SSE4_2__ */
+
+#endif /* _NMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/pentium.md b/gcc-4.2.1-5666.3/gcc/config/i386/pentium.md
new file mode 100644
index 000000000..1f994dd60
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/pentium.md
@@ -0,0 +1,312 @@
+;; Pentium Scheduling
+;; Copyright (C) 2002 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA. */
+;;
+;; The Pentium is an in-order core with two integer pipelines.
+
+;; True for insns that behave like prefixed insns on the Pentium.
+(define_attr "pent_prefix" "false,true"
+ (if_then_else (ior (eq_attr "prefix_0f" "1")
+ (ior (eq_attr "prefix_data16" "1")
+ (eq_attr "prefix_rep" "1")))
+ (const_string "true")
+ (const_string "false")))
+
+;; Categorize how an instruction slots.
+
+;; The non-MMX Pentium slots an instruction with prefixes on U pipe only,
+;; while MMX Pentium can slot it on either U or V. Model non-MMX Pentium
+;; rules, because it results in noticeably better code on non-MMX Pentium
+;; and doesn't hurt much on MMX. (Prefixed instructions are not very
+;; common, so the scheduler usually has a non-prefixed insn to pair).
+
+(define_attr "pent_pair" "uv,pu,pv,np"
+ (cond [(eq_attr "imm_disp" "true")
+ (const_string "np")
+ (ior (eq_attr "type" "alu1,alu,imov,icmp,test,lea,incdec")
+ (and (eq_attr "type" "pop,push")
+ (eq_attr "memory" "!both")))
+ (if_then_else (eq_attr "pent_prefix" "true")
+ (const_string "pu")
+ (const_string "uv"))
+ (eq_attr "type" "ibr")
+ (const_string "pv")
+ (and (eq_attr "type" "ishift")
+ (match_operand 2 "const_int_operand" ""))
+ (const_string "pu")
+ (and (eq_attr "type" "rotate")
+ (match_operand 2 "const1_operand" ""))
+ (const_string "pu")
+ (and (eq_attr "type" "ishift1")
+ (match_operand 1 "const_int_operand" ""))
+ (const_string "pu")
+ (and (eq_attr "type" "rotate1")
+ (match_operand 1 "const1_operand" ""))
+ (const_string "pu")
+ (and (eq_attr "type" "call")
+ (match_operand 0 "constant_call_address_operand" ""))
+ (const_string "pv")
+ (and (eq_attr "type" "callv")
+ (match_operand 1 "constant_call_address_operand" ""))
+ (const_string "pv")
+ ]
+ (const_string "np")))
+
+(define_automaton "pentium,pentium_fpu")
+
+;; Pentium do have U and V pipes. Instruction to both pipes
+;; are always issued together, much like on VLIW.
+;;
+;; predecode
+;; / \
+;; decodeu decodev
+;; / | |
+;; fpu executeu executev
+;; | | |
+;; fpu retire retire
+;; |
+;; fpu
+;; We add dummy "port" pipes allocated only first cycle of
+;; instruction to specify this behavior.
+
+(define_cpu_unit "pentium-portu,pentium-portv" "pentium")
+(define_cpu_unit "pentium-u,pentium-v" "pentium")
+(absence_set "pentium-portu" "pentium-u,pentium-v")
+(presence_set "pentium-portv" "pentium-portu")
+
+;; Floating point instructions can overlap with new issue of integer
+;; instructions. We model only first cycle of FP pipeline, as it is
+;; fully pipelined.
+(define_cpu_unit "pentium-fp" "pentium_fpu")
+
+;; There is non-pipelined multiplier unit used for complex operations.
+(define_cpu_unit "pentium-fmul" "pentium_fpu")
+
+;; Pentium preserves memory ordering, so when load-execute-store
+;; instruction is executed together with other instruction loading
+;; data, the execution of the other instruction is delayed to very
+;; last cycle of first instruction, when data are bypassed.
+;; We model this by allocating "memory" unit when store is pending
+;; and using conflicting load units together.
+
+(define_cpu_unit "pentium-memory" "pentium")
+(define_cpu_unit "pentium-load0" "pentium")
+(define_cpu_unit "pentium-load1" "pentium")
+(absence_set "pentium-load0,pentium-load1" "pentium-memory")
+
+(define_reservation "pentium-load" "(pentium-load0 | pentium-load1)")
+(define_reservation "pentium-np" "(pentium-u + pentium-v)")
+(define_reservation "pentium-uv" "(pentium-u | pentium-v)")
+(define_reservation "pentium-portuv" "(pentium-portu | pentium-portv)")
+(define_reservation "pentium-firstu" "(pentium-u + pentium-portu)")
+(define_reservation "pentium-firstv" "(pentium-v + pentium-portuv)")
+(define_reservation "pentium-firstuv" "(pentium-uv + pentium-portuv)")
+(define_reservation "pentium-firstuload" "(pentium-load + pentium-firstu)")
+(define_reservation "pentium-firstvload" "(pentium-load + pentium-firstv)")
+(define_reservation "pentium-firstuvload" "(pentium-load + pentium-firstuv)
+ | (pentium-firstv,pentium-v,
+ (pentium-load+pentium-firstv))")
+(define_reservation "pentium-firstuboth" "(pentium-load + pentium-firstu
+ + pentium-memory)")
+(define_reservation "pentium-firstvboth" "(pentium-load + pentium-firstv
+ + pentium-memory)")
+(define_reservation "pentium-firstuvboth" "(pentium-load + pentium-firstuv
+ + pentium-memory)
+ | (pentium-firstv,pentium-v,
+ (pentium-load+pentium-firstv))")
+
+;; Few common long latency instructions
+(define_insn_reservation "pent_mul" 11
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "imul"))
+ "pentium-np*11")
+
+(define_insn_reservation "pent_str" 12
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "str"))
+ "pentium-np*12")
+
+;; Integer division and some other long latency instruction block all
+;; units, including the FP pipe. There is no value in modeling the
+;; latency of these instructions and not modeling the latency
+;; decreases the size of the DFA.
+(define_insn_reservation "pent_block" 1
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "idiv"))
+ "pentium-np+pentium-fp")
+
+(define_insn_reservation "pent_cld" 2
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "cld"))
+ "pentium-np*2")
+
+;; Moves usually have one cycle penalty, but there are exceptions.
+(define_insn_reservation "pent_fmov" 1
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "none,load")))
+ "(pentium-fp+pentium-np)")
+
+(define_insn_reservation "pent_fpmovxf" 3
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "type" "fmov")
+ (and (eq_attr "memory" "load,store")
+ (eq_attr "mode" "XF"))))
+ "(pentium-fp+pentium-np)*3")
+
+(define_insn_reservation "pent_fpstore" 2
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "type" "fmov")
+ (ior (match_operand 1 "immediate_operand" "")
+ (eq_attr "memory" "store"))))
+ "(pentium-fp+pentium-np)*2")
+
+(define_insn_reservation "pent_imov" 1
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "imov"))
+ "pentium-firstuv")
+
+;; Push and pop instructions have 1 cycle latency and special
+;; hardware bypass allows them to be paired with other push,pop
+;; and call instructions.
+(define_bypass 0 "pent_push,pent_pop" "pent_push,pent_pop,pent_call")
+(define_insn_reservation "pent_push" 1
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "type" "push")
+ (eq_attr "memory" "store")))
+ "pentium-firstuv")
+
+(define_insn_reservation "pent_pop" 1
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "pop,leave"))
+ "pentium-firstuv")
+
+;; Call and branch instruction can execute in either pipe, but
+;; they are only pairable when in the v pipe.
+(define_insn_reservation "pent_call" 10
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "call,callv"))
+ "pentium-firstv,pentium-v*9")
+
+(define_insn_reservation "pent_branch" 1
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "ibr"))
+ "pentium-firstv")
+
+;; Floating point instruction dispatch in U pipe, but continue
+;; in FP pipeline allowing other instructions to be executed.
+(define_insn_reservation "pent_fp" 3
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "fop,fistp"))
+ "(pentium-firstu+pentium-fp),nothing,nothing")
+
+;; First two cycles of fmul are not pipelined.
+(define_insn_reservation "pent_fmul" 3
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "fmul"))
+ "(pentium-firstuv+pentium-fp+pentium-fmul),pentium-fmul,nothing")
+
+;; Long latency FP instructions overlap with integer instructions,
+;; but only last 2 cycles with FP ones.
+(define_insn_reservation "pent_fdiv" 39
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "fdiv"))
+ "(pentium-np+pentium-fp+pentium-fmul),
+ (pentium-fp+pentium-fmul)*36,pentium-fmul*2")
+
+(define_insn_reservation "pent_fpspc" 70
+ (and (eq_attr "cpu" "pentium")
+ (eq_attr "type" "fpspc"))
+ "(pentium-np+pentium-fp+pentium-fmul),
+ (pentium-fp+pentium-fmul)*67,pentium-fmul*2")
+
+;; Integer instructions. Load/execute/store takes 3 cycles,
+;; load/execute 2 cycles and execute only one cycle.
+(define_insn_reservation "pent_uv_both" 3
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "uv")
+ (eq_attr "memory" "both")))
+ "pentium-firstuvboth,pentium-uv+pentium-memory,pentium-uv")
+
+(define_insn_reservation "pent_u_both" 3
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "pu")
+ (eq_attr "memory" "both")))
+ "pentium-firstuboth,pentium-u+pentium-memory,pentium-u")
+
+(define_insn_reservation "pent_v_both" 3
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "pv")
+ (eq_attr "memory" "both")))
+ "pentium-firstvboth,pentium-v+pentium-memory,pentium-v")
+
+(define_insn_reservation "pent_np_both" 3
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "np")
+ (eq_attr "memory" "both")))
+ "pentium-np,pentium-np,pentium-np")
+
+(define_insn_reservation "pent_uv_load" 2
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "uv")
+ (eq_attr "memory" "load")))
+ "pentium-firstuvload,pentium-uv")
+
+(define_insn_reservation "pent_u_load" 2
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "pu")
+ (eq_attr "memory" "load")))
+ "pentium-firstuload,pentium-u")
+
+(define_insn_reservation "pent_v_load" 2
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "pv")
+ (eq_attr "memory" "load")))
+ "pentium-firstvload,pentium-v")
+
+(define_insn_reservation "pent_np_load" 2
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "np")
+ (eq_attr "memory" "load")))
+ "pentium-np,pentium-np")
+
+(define_insn_reservation "pent_uv" 1
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "uv")
+ (eq_attr "memory" "none")))
+ "pentium-firstuv")
+
+(define_insn_reservation "pent_u" 1
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "pu")
+ (eq_attr "memory" "none")))
+ "pentium-firstu")
+
+(define_insn_reservation "pent_v" 1
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "pv")
+ (eq_attr "memory" "none")))
+ "pentium-firstv")
+
+(define_insn_reservation "pent_np" 1
+ (and (eq_attr "cpu" "pentium")
+ (and (eq_attr "pent_pair" "np")
+ (eq_attr "memory" "none")))
+ "pentium-np")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/pmmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/pmmintrin.h
new file mode 100644
index 000000000..764094186
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/pmmintrin.h
@@ -0,0 +1,172 @@
+/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */
+/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef _PMMINTRIN_H_INCLUDED
+#define _PMMINTRIN_H_INCLUDED
+
+#ifdef __SSE3__
+#include <xmmintrin.h>
+#include <emmintrin.h>
+
+/* Additional bits in the MXCSR. */
+#define _MM_DENORMALS_ZERO_MASK 0x0040
+#define _MM_DENORMALS_ZERO_ON 0x0040
+#define _MM_DENORMALS_ZERO_OFF 0x0000
+
+#define _MM_SET_DENORMALS_ZERO_MODE(mode) \
+ _mm_setcsr ((_mm_getcsr () & ~_MM_DENORMALS_ZERO_MASK) | (mode))
+#define _MM_GET_DENORMALS_ZERO_MODE() \
+ (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+/* APPLE LOCAL begin radar 4152603 */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_addsub_ps (__m128 __X, __m128 __Y)
+{
+ return (__m128) __builtin_ia32_addsubps ((__v4sf)__X, (__v4sf)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadd_ps (__m128 __X, __m128 __Y)
+{
+ return (__m128) __builtin_ia32_haddps ((__v4sf)__X, (__v4sf)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsub_ps (__m128 __X, __m128 __Y)
+{
+ return (__m128) __builtin_ia32_hsubps ((__v4sf)__X, (__v4sf)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movehdup_ps (__m128 __X)
+{
+ return (__m128) __builtin_ia32_movshdup ((__v4sf)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_moveldup_ps (__m128 __X)
+{
+ return (__m128) __builtin_ia32_movsldup ((__v4sf)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_addsub_pd (__m128d __X, __m128d __Y)
+{
+ return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadd_pd (__m128d __X, __m128d __Y)
+{
+ return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsub_pd (__m128d __X, __m128d __Y)
+{
+ return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loaddup_pd (double const *__P)
+{
+ return _mm_load1_pd (__P);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movedup_pd (__m128d __X)
+{
+ return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_lddqu_si128 (__m128i const *__P)
+{
+ return (__m128i) __builtin_ia32_lddqu ((char const *)__P);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_monitor (void const * __P, unsigned int __E, unsigned int __H)
+{
+ __builtin_ia32_monitor (__P, __E, __H);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mwait (unsigned int __E, unsigned int __H)
+{
+ __builtin_ia32_mwait (__E, __H);
+}
+/* APPLE LOCAL end radar 4152603 */
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+#endif /* __SSE3__ */
+
+#endif /* _PMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/ppro.md b/gcc-4.2.1-5666.3/gcc/config/i386/ppro.md
new file mode 100644
index 000000000..3e31eb336
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/ppro.md
@@ -0,0 +1,763 @@
+;; Scheduling for the Intel P6 family of processors
+;; Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA. */
+
+;; The P6 family includes the Pentium Pro, Pentium II, Pentium III, Celeron
+;; and Xeon lines of CPUs. The DFA scheduler description in this file is
+;; based on information that can be found in the following three documents:
+;;
+;; "P6 Family of Processors Hardware Developer's Manual",
+;; Intel, September 1999.
+;;
+;; "Intel Architecture Optimization Manual",
+;; Intel, 1999 (Order Number: 245127-001).
+;;
+;; "How to optimize for the Pentium family of microprocessors",
+;; by Agner Fog, PhD.
+;;
+;; The P6 pipeline has three major components:
+;; 1) the FETCH/DECODE unit, an in-order issue front-end
+;; 2) the DISPATCH/EXECUTE unit, which is the out-of-order core
+;; 3) the RETIRE unit, an in-order retirement unit
+;;
+;; So, the P6 CPUs have out-of-order cores, but the instruction decoder and
+;; retirement unit are naturally in-order.
+;;
+;; BUS INTERFACE UNIT
+;; / \
+;; L1 ICACHE L1 DCACHE
+;; / | \ | \
+;; DECODER0 DECODER1 DECODER2 DISP/EXEC RETIRE
+;; \ | / | |
+;; INSTRUCTION POOL __________|_______/
+;; (inc. reorder buffer)
+;;
+;; Since the P6 CPUs execute instructions out-of-order, the most important
+;; consideration in performance tuning is making sure enough micro-ops are
+;; ready for execution in the out-of-order core, while not stalling the
+;; decoder.
+;;
+;; TODO:
+;; - Find a less crude way to model complex instructions, in
+;; particular how many cycles they take to be decoded.
+;; - Include decoder latencies in the total reservation latencies.
+;; This isn't necessary right now because we assume for every
+;; instruction that it never blocks a decoder.
+;; - Figure out where the p0 and p1 reservations come from. These
+;; appear not to be in the manual (e.g. why is cld "(p0+p1)*2"
+;; better than "(p0|p1)*4" ???)
+;; - Lots more because I'm sure this is still far from optimal :-)
+
+;; The ppro_idiv and ppro_fdiv automata are used to model issue
+;; latencies of idiv and fdiv type insns.
+(define_automaton "ppro_decoder,ppro_core,ppro_idiv,ppro_fdiv,ppro_load,ppro_store")
+
+;; Simple instructions of the register-register form have only one uop.
+;; Load instructions are also only one uop. Store instructions decode to
+;; two uops, and simple read-modify instructions also take two uops.
+;; Simple instructions of the register-memory form have two to three uops.
+;; Simple read-modify-write instructions have four uops. The rules for
+;; the decoder are simple:
+;; - an instruction with 1 uop can be decoded by any of the three
+;; decoders in one cycle.
+;; - an instruction with 1 to 4 uops can be decoded only by decoder 0
+;; but still in only one cycle.
+;; - a complex (microcode) instruction can also only be decoded by
+;; decoder 0, and this takes an unspecified number of cycles.
+;;
+;; The goal is to schedule such that we have a few-one-one uops sequence
+;; in each cycle, to decode as many instructions per cycle as possible.
+(define_cpu_unit "decoder0" "ppro_decoder")
+(define_cpu_unit "decoder1" "ppro_decoder")
+(define_cpu_unit "decoder2" "ppro_decoder")
+
+;; We first wish to find an instruction for decoder0, so exclude
+;; decoder1 and decoder2 from being reserved until decoder 0 is
+;; reserved.
+(presence_set "decoder1" "decoder0")
+(presence_set "decoder2" "decoder0")
+
+;; Most instructions can be decoded on any of the three decoders.
+(define_reservation "decodern" "(decoder0|decoder1|decoder2)")
+
+;; The out-of-order core has five pipelines. During each cycle, the core
+;; may dispatch zero or one uop on the port of any of the five pipelines
+;; so the maximum number of dispatched uops per cycle is 5. In practicer,
+;; 3 uops per cycle is more realistic.
+;;
+;; Two of the five pipelines contain several execution units:
+;;
+;; Port 0 Port 1 Port 2 Port 3 Port 4
+;; ALU ALU LOAD SAC SDA
+;; FPU JUE
+;; AGU MMX
+;; MMX P3FPU
+;; P3FPU
+;;
+;; (SAC=Store Address Calculation, SDA=Store Data Unit, P3FPU = SSE unit,
+;; JUE = Jump Execution Unit, AGU = Address Generation Unit)
+;;
+(define_cpu_unit "p0,p1" "ppro_core")
+(define_cpu_unit "p2" "ppro_load")
+(define_cpu_unit "p3,p4" "ppro_store")
+(define_cpu_unit "idiv" "ppro_idiv")
+(define_cpu_unit "fdiv" "ppro_fdiv")
+
+;; Only the irregular instructions have to be modeled here. A load
+;; increases the latency by 2 or 3, or by nothing if the manual gives
+;; a latency already. Store latencies are not accounted for.
+;;
+;; The simple instructions follow a very regular pattern of 1 uop per
+;; reg-reg operation, 1 uop per load on port 2. and 2 uops per store
+;; on port 4 and port 3. These instructions are modelled at the bottom
+;; of this file.
+;;
+;; For microcoded instructions we don't know how many uops are produced.
+;; These instructions are the "complex" ones in the Intel manuals. All
+;; we _do_ know is that they typically produce four or more uops, so
+;; they can only be decoded on decoder0. Modelling their latencies
+;; doesn't make sense because we don't know how these instructions are
+;; executed in the core. So we just model that they can only be decoded
+;; on decoder 0, and say that it takes a little while before the result
+;; is available.
+(define_insn_reservation "ppro_complex_insn" 6
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (eq_attr "type" "other,multi,call,callv,str"))
+ "decoder0")
+
+;; imov with memory operands does not use the integer units.
+(define_insn_reservation "ppro_imov" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "imov")))
+ "decodern,(p0|p1)")
+
+(define_insn_reservation "ppro_imov_load" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (eq_attr "type" "imov")))
+ "decodern,p2")
+
+(define_insn_reservation "ppro_imov_store" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (eq_attr "type" "imov")))
+ "decoder0,p4+p3")
+
+;; imovx always decodes to one uop, and also doesn't use the integer
+;; units if it has memory operands.
+(define_insn_reservation "ppro_imovx" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "imovx")))
+ "decodern,(p0|p1)")
+
+(define_insn_reservation "ppro_imovx_load" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (eq_attr "type" "imovx")))
+ "decodern,p2")
+
+;; lea executes on port 0 with latency one and throughput 1.
+(define_insn_reservation "ppro_lea" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "lea")))
+ "decodern,p0")
+
+;; Shift and rotate execute on port 0 with latency and throughput 1.
+;; The load and store units need to be reserved when memory operands
+;; are involved.
+(define_insn_reservation "ppro_shift_rotate" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "ishift,ishift1,rotate,rotate1")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_shift_rotate_mem" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "!none")
+ (eq_attr "type" "ishift,ishift1,rotate,rotate1")))
+ "decoder0,p2+p0,p4+p3")
+
+(define_insn_reservation "ppro_cld" 2
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (eq_attr "type" "cld"))
+ "decoder0,(p0+p1)*2")
+
+;; The P6 has a sophisticated branch prediction mechanism to minimize
+;; latencies due to branching. In particular, it has a fast way to
+;; execute branches that are taken multiple times (such as in loops).
+;; Branches not taken suffer no penalty, and correctly predicted
+;; branches cost only one fetch cycle. Mispredicted branches are very
+;; costly: typically 15 cycles and possibly as many as 26 cycles.
+;;
+;; Unfortunately all this makes it quite difficult to properly model
+;; the latencies for the compiler. Here I've made the choice to be
+;; optimistic and assume branches are often predicted correctly, so
+;; they have latency 1, and the decoders are not blocked.
+;;
+;; In addition, the model assumes a branch always decodes to only 1 uop,
+;; which is not exactly true because there are a few instructions that
+;; decode to 2 uops or microcode. But this probably gives the best
+;; results because we can assume these instructions can decode on all
+;; decoders.
+(define_insn_reservation "ppro_branch" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "ibr")))
+ "decodern,p1")
+
+;; ??? Indirect branches probably have worse latency than this.
+(define_insn_reservation "ppro_indirect_branch" 6
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "!none")
+ (eq_attr "type" "ibr")))
+ "decoder0,p2+p1")
+
+(define_insn_reservation "ppro_leave" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (eq_attr "type" "leave"))
+ "decoder0,p2+(p0|p1),(p0|p1)")
+
+;; imul has throughput one, but latency 4, and can only execute on port 0.
+(define_insn_reservation "ppro_imul" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "imul")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_imul_mem" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "!none")
+ (eq_attr "type" "imul")))
+ "decoder0,p2+p0")
+
+;; div and idiv are very similar, so we model them the same.
+;; QI, HI, and SI have issue latency 12, 21, and 37, respectively.
+;; These issue latencies are modelled via the ppro_div automaton.
+(define_insn_reservation "ppro_idiv_QI" 19
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "QI")
+ (eq_attr "type" "idiv"))))
+ "decoder0,(p0+idiv)*2,(p0|p1)+idiv,idiv*9")
+
+(define_insn_reservation "ppro_idiv_QI_load" 19
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "QI")
+ (eq_attr "type" "idiv"))))
+ "decoder0,p2+p0+idiv,p0+idiv,(p0|p1)+idiv,idiv*9")
+
+(define_insn_reservation "ppro_idiv_HI" 23
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "HI")
+ (eq_attr "type" "idiv"))))
+ "decoder0,(p0+idiv)*3,(p0|p1)+idiv,idiv*17")
+
+(define_insn_reservation "ppro_idiv_HI_load" 23
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "HI")
+ (eq_attr "type" "idiv"))))
+ "decoder0,p2+p0+idiv,p0+idiv,(p0|p1)+idiv,idiv*18")
+
+(define_insn_reservation "ppro_idiv_SI" 39
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SI")
+ (eq_attr "type" "idiv"))))
+ "decoder0,(p0+idiv)*3,(p0|p1)+idiv,idiv*33")
+
+(define_insn_reservation "ppro_idiv_SI_load" 39
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SI")
+ (eq_attr "type" "idiv"))))
+ "decoder0,p2+p0+idiv,p0+idiv,(p0|p1)+idiv,idiv*34")
+
+;; Floating point operations always execute on port 0.
+;; ??? where do these latencies come from? fadd has latency 3 and
+;; has throughput "1/cycle (align with FADD)". What do they
+;; mean and how can we model that?
+(define_insn_reservation "ppro_fop" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none,unknown")
+ (eq_attr "type" "fop")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_fop_load" 5
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (eq_attr "type" "fop")))
+ "decoder0,p2+p0,p0")
+
+(define_insn_reservation "ppro_fop_store" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (eq_attr "type" "fop")))
+ "decoder0,p0,p0,p0+p4+p3")
+
+(define_insn_reservation "ppro_fop_both" 5
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "both")
+ (eq_attr "type" "fop")))
+ "decoder0,p2+p0,p0+p4+p3")
+
+(define_insn_reservation "ppro_fsgn" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (eq_attr "type" "fsgn"))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_fistp" 5
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (eq_attr "type" "fistp"))
+ "decoder0,p0*2,p4+p3")
+
+(define_insn_reservation "ppro_fcmov" 2
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (eq_attr "type" "fcmov"))
+ "decoder0,p0*2")
+
+(define_insn_reservation "ppro_fcmp" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "fcmp")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_fcmp_load" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (eq_attr "type" "fcmp")))
+ "decoder0,p2+p0")
+
+(define_insn_reservation "ppro_fmov" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "fmov")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_fmov_load" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "!XF")
+ (eq_attr "type" "fmov"))))
+ "decodern,p2")
+
+(define_insn_reservation "ppro_fmov_XF_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "XF")
+ (eq_attr "type" "fmov"))))
+ "decoder0,(p2+p0)*2")
+
+(define_insn_reservation "ppro_fmov_store" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (and (eq_attr "mode" "!XF")
+ (eq_attr "type" "fmov"))))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_fmov_XF_store" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (and (eq_attr "mode" "XF")
+ (eq_attr "type" "fmov"))))
+ "decoder0,(p0+p4),(p0+p3)")
+
+;; fmul executes on port 0 with latency 5. It has issue latency 2,
+;; but we don't model this.
+(define_insn_reservation "ppro_fmul" 5
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "fmul")))
+ "decoder0,p0*2")
+
+(define_insn_reservation "ppro_fmul_load" 6
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (eq_attr "type" "fmul")))
+ "decoder0,p2+p0,p0")
+
+;; fdiv latencies depend on the mode of the operands. XFmode gives
+;; a latency of 38 cycles, DFmode gives 32, and SFmode gives latency 18.
+;; Division by a power of 2 takes only 9 cycles, but we cannot model
+;; that. Throughput is equal to latency - 1, which we model using the
+;; ppro_div automaton.
+(define_insn_reservation "ppro_fdiv_SF" 18
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "fdiv,fpspc"))))
+ "decodern,p0+fdiv,fdiv*16")
+
+(define_insn_reservation "ppro_fdiv_SF_load" 19
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "fdiv,fpspc"))))
+ "decoder0,p2+p0+fdiv,fdiv*16")
+
+(define_insn_reservation "ppro_fdiv_DF" 32
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "type" "fdiv,fpspc"))))
+ "decodern,p0+fdiv,fdiv*30")
+
+(define_insn_reservation "ppro_fdiv_DF_load" 33
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "type" "fdiv,fpspc"))))
+ "decoder0,p2+p0+fdiv,fdiv*30")
+
+(define_insn_reservation "ppro_fdiv_XF" 38
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "XF")
+ (eq_attr "type" "fdiv,fpspc"))))
+ "decodern,p0+fdiv,fdiv*36")
+
+(define_insn_reservation "ppro_fdiv_XF_load" 39
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "XF")
+ (eq_attr "type" "fdiv,fpspc"))))
+ "decoder0,p2+p0+fdiv,fdiv*36")
+
+;; MMX instructions can execute on either port 0 or port 1 with a
+;; throughput of 1/cycle.
+;; on port 0: - ALU (latency 1)
+;; - Multiplier Unit (latency 3)
+;; on port 1: - ALU (latency 1)
+;; - Shift Unit (latency 1)
+;;
+;; MMX instructions are either of the type reg-reg, or read-modify, and
+;; except for mmxshft and mmxmul they can execute on port 0 or port 1,
+;; so they behave as "simple" instructions that need no special modelling.
+;; We only have to model mmxshft and mmxmul.
+(define_insn_reservation "ppro_mmx_shft" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "mmxshft")))
+ "decodern,p1")
+
+(define_insn_reservation "ppro_mmx_shft_load" 2
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "mmxshft")))
+ "decoder0,p2+p1")
+
+(define_insn_reservation "ppro_mmx_mul" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "mmxmul")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_mmx_mul_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (eq_attr "type" "mmxmul")))
+ "decoder0,p2+p0")
+
+(define_insn_reservation "ppro_sse_mmxcvt" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "mode" "DI")
+ (eq_attr "type" "mmxcvt")))
+ "decodern,p1")
+
+;; FIXME: These are Pentium III only, but we cannot tell here if
+;; we're generating code for PentiumPro/Pentium II or Pentium III
+;; (define_insn_reservation "ppro_sse_mmxshft" 2
+;; (and (eq_attr "cpu" "pentiumpro,generic32")
+;; (and (eq_attr "mode" "DI")
+;; (eq_attr "type" "mmxshft")))
+;; "decodern,p0")
+
+;; SSE is very complicated, and takes a bit more effort.
+;; ??? I assumed that all SSE instructions decode on decoder0,
+;; but is this correct?
+
+;; The sfence instruction.
+(define_insn_reservation "ppro_sse_sfence" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "unknown")
+ (eq_attr "type" "sse")))
+ "decoder0,p4+p3")
+
+;; FIXME: This reservation is all wrong when we're scheduling sqrtss.
+(define_insn_reservation "ppro_sse_SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "sse")))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_sse_add_SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "sseadd"))))
+ "decodern,p1")
+
+(define_insn_reservation "ppro_sse_add_SF_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "sseadd"))))
+ "decoder0,p2+p1")
+
+(define_insn_reservation "ppro_sse_cmp_SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssecmp"))))
+ "decoder0,p1")
+
+(define_insn_reservation "ppro_sse_cmp_SF_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssecmp"))))
+ "decoder0,p2+p1")
+
+(define_insn_reservation "ppro_sse_comi_SF" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssecomi"))))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_sse_comi_SF_load" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssecomi"))))
+ "decoder0,p2+p0")
+
+(define_insn_reservation "ppro_sse_mul_SF" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssemul"))))
+ "decodern,p0")
+
+(define_insn_reservation "ppro_sse_mul_SF_load" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssemul"))))
+ "decoder0,p2+p0")
+
+;; FIXME: ssediv doesn't close p0 for 17 cycles, surely???
+(define_insn_reservation "ppro_sse_div_SF" 18
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssediv"))))
+ "decoder0,p0*17")
+
+(define_insn_reservation "ppro_sse_div_SF_load" 18
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssediv"))))
+ "decoder0,(p2+p0),p0*16")
+
+(define_insn_reservation "ppro_sse_icvt_SF" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "sseicvt")))
+ "decoder0,(p2+p1)*2")
+
+(define_insn_reservation "ppro_sse_icvt_SI" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "mode" "SI")
+ (eq_attr "type" "sseicvt")))
+ "decoder0,(p2+p1)")
+
+(define_insn_reservation "ppro_sse_mov_SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssemov"))))
+ "decoder0,(p0|p1)")
+
+(define_insn_reservation "ppro_sse_mov_SF_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssemov"))))
+ "decoder0,p2+(p0|p1)")
+
+(define_insn_reservation "ppro_sse_mov_SF_store" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "type" "ssemov"))))
+ "decoder0,p4+p3")
+
+(define_insn_reservation "ppro_sse_V4SF" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "sse")))
+ "decoder0,p1*2")
+
+(define_insn_reservation "ppro_sse_add_V4SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "sseadd"))))
+ "decoder0,p1*2")
+
+(define_insn_reservation "ppro_sse_add_V4SF_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "sseadd"))))
+ "decoder0,(p2+p1)*2")
+
+(define_insn_reservation "ppro_sse_cmp_V4SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssecmp"))))
+ "decoder0,p1*2")
+
+(define_insn_reservation "ppro_sse_cmp_V4SF_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssecmp"))))
+ "decoder0,(p2+p1)*2")
+
+(define_insn_reservation "ppro_sse_cvt_V4SF" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none,unknown")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssecvt"))))
+ "decoder0,p1*2")
+
+(define_insn_reservation "ppro_sse_cvt_V4SF_other" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "!none,unknown")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssecmp"))))
+ "decoder0,p1,p4+p3")
+
+(define_insn_reservation "ppro_sse_mul_V4SF" 5
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssemul"))))
+ "decoder0,p0*2")
+
+(define_insn_reservation "ppro_sse_mul_V4SF_load" 5
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssemul"))))
+ "decoder0,(p2+p0)*2")
+
+;; FIXME: p0 really closed this long???
+(define_insn_reservation "ppro_sse_div_V4SF" 48
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssediv"))))
+ "decoder0,p0*34")
+
+(define_insn_reservation "ppro_sse_div_V4SF_load" 48
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssediv"))))
+ "decoder0,(p2+p0)*2,p0*32")
+
+(define_insn_reservation "ppro_sse_log_V4SF" 2
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "sselog,sselog1"))))
+ "decodern,p1")
+
+(define_insn_reservation "ppro_sse_log_V4SF_load" 2
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "sselog,sselog1"))))
+ "decoder0,(p2+p1)")
+
+(define_insn_reservation "ppro_sse_mov_V4SF" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssemov"))))
+ "decoder0,(p0|p1)*2")
+
+(define_insn_reservation "ppro_sse_mov_V4SF_load" 2
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssemov"))))
+ "decoder0,p2*2")
+
+(define_insn_reservation "ppro_sse_mov_V4SF_store" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (and (eq_attr "mode" "V4SF")
+ (eq_attr "type" "ssemov"))))
+ "decoder0,(p4+p3)*2")
+
+;; All other instructions are modelled as simple instructions.
+;; We have already modelled all i387 floating point instructions, so all
+;; other instructions execute on either port 0 or port 1. This includes
+;; the ALU units, and the MMX units.
+;;
+;; reg-reg instructions produce 1 uop so they can be decoded on any of
+;; the three decoders.
+(define_insn_reservation "ppro_insn" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "none,unknown")
+ (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,icmov,push,pop,fxch,sseiadd,sseishft,sseimul,mmx,mmxadd,mmxcmp")))
+ "decodern,(p0|p1)")
+
+;; read-modify and register-memory instructions have 2 or three uops,
+;; so they have to be decoded on decoder0.
+(define_insn_reservation "ppro_insn_load" 3
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "load")
+ (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,icmov,push,pop,fxch,sseiadd,sseishft,sseimul,mmx,mmxadd,mmxcmp")))
+ "decoder0,p2+(p0|p1)")
+
+(define_insn_reservation "ppro_insn_store" 1
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "store")
+ (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,icmov,push,pop,fxch,sseiadd,sseishft,sseimul,mmx,mmxadd,mmxcmp")))
+ "decoder0,(p0|p1),p4+p3")
+
+;; read-modify-store instructions produce 4 uops so they have to be
+;; decoded on decoder0 as well.
+(define_insn_reservation "ppro_insn_both" 4
+ (and (eq_attr "cpu" "pentiumpro,generic32")
+ (and (eq_attr "memory" "both")
+ (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,icmov,push,pop,fxch,sseiadd,sseishft,sseimul,mmx,mmxadd,mmxcmp")))
+ "decoder0,p2+(p0|p1),p4+p3")
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/predicates.md b/gcc-4.2.1-5666.3/gcc/config/i386/predicates.md
new file mode 100644
index 000000000..f988d11e4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/predicates.md
@@ -0,0 +1,1037 @@
+;; Predicate definitions for IA-32 and x86-64.
+;; Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Return nonzero if OP is either a i387 or SSE fp register.
+(define_predicate "any_fp_register_operand"
+ (and (match_code "reg")
+ (match_test "ANY_FP_REGNO_P (REGNO (op))")))
+
+;; Return nonzero if OP is an i387 fp register.
+(define_predicate "fp_register_operand"
+ (and (match_code "reg")
+ (match_test "FP_REGNO_P (REGNO (op))")))
+
+;; Return nonzero if OP is a non-fp register_operand.
+(define_predicate "register_and_not_any_fp_reg_operand"
+ (and (match_code "reg")
+ (not (match_test "ANY_FP_REGNO_P (REGNO (op))"))))
+
+;; Return nonzero if OP is a register operand other than an i387 fp register.
+(define_predicate "register_and_not_fp_reg_operand"
+ (and (match_code "reg")
+ (not (match_test "FP_REGNO_P (REGNO (op))"))))
+
+;; True if the operand is an MMX register.
+(define_predicate "mmx_reg_operand"
+ (and (match_code "reg")
+ (match_test "MMX_REGNO_P (REGNO (op))")))
+
+;; True if the operand is a Q_REGS class register.
+(define_predicate "q_regs_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return ANY_QI_REG_P (op);
+})
+
+;; Return true if op is a NON_Q_REGS class register.
+(define_predicate "non_q_regs_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return NON_QI_REG_P (op);
+})
+
+;; Match an SI or HImode register for a zero_extract.
+(define_special_predicate "ext_register_operand"
+ (match_operand 0 "register_operand")
+{
+ if ((!TARGET_64BIT || GET_MODE (op) != DImode)
+ && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* Be careful to accept only registers having upper parts. */
+ return REGNO (op) > LAST_VIRTUAL_REGISTER || REGNO (op) < 4;
+})
+
+;; Return true if op is the AX register.
+(define_predicate "ax_reg_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == 0")))
+
+;; Return true if op is the flags register.
+(define_predicate "flags_reg_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == FLAGS_REG")))
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; Return true if op is not xmm0 register.
+(define_predicate "reg_not_xmm0_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) != FIRST_SSE_REG")))
+
+;; As above, but allow nonimmediate operands.
+(define_predicate "nonimm_not_xmm0_operand"
+ (and (match_operand 0 "nonimmediate_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) != FIRST_SSE_REG")))
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; Return 1 if VALUE can be stored in a sign extended immediate field.
+(define_predicate "x86_64_immediate_operand"
+ (match_code "const_int,symbol_ref,label_ref,const")
+{
+ if (!TARGET_64BIT)
+ return immediate_operand (op, mode);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_INT:
+ /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known
+ to be at least 32 and this all acceptable constants are
+ represented as CONST_INT. */
+ if (HOST_BITS_PER_WIDE_INT == 32)
+ return 1;
+ else
+ {
+ HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (op), DImode);
+ return trunc_int_for_mode (val, SImode) == val;
+ }
+ break;
+
+ case SYMBOL_REF:
+ /* For certain code models, the symbolic references are known to fit.
+ in CM_SMALL_PIC model we know it fits if it is local to the shared
+ library. Don't count TLS SYMBOL_REFs here, since they should fit
+ only if inside of UNSPEC handled below. */
+ /* TLS symbols are not constant. */
+ if (SYMBOL_REF_TLS_MODEL (op))
+ return false;
+ return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
+ || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
+
+ case LABEL_REF:
+ /* For certain code models, the code is near as well. */
+ return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
+ || ix86_cmodel == CM_KERNEL);
+
+ case CONST:
+ /* We also may accept the offsetted memory references in certain
+ special cases. */
+ if (GET_CODE (XEXP (op, 0)) == UNSPEC)
+ switch (XINT (XEXP (op, 0), 1))
+ {
+ case UNSPEC_GOTPCREL:
+ case UNSPEC_DTPOFF:
+ case UNSPEC_GOTNTPOFF:
+ case UNSPEC_NTPOFF:
+ return 1;
+ default:
+ break;
+ }
+
+ if (GET_CODE (XEXP (op, 0)) == PLUS)
+ {
+ rtx op1 = XEXP (XEXP (op, 0), 0);
+ rtx op2 = XEXP (XEXP (op, 0), 1);
+ HOST_WIDE_INT offset;
+
+ if (ix86_cmodel == CM_LARGE)
+ return 0;
+ if (GET_CODE (op2) != CONST_INT)
+ return 0;
+ offset = trunc_int_for_mode (INTVAL (op2), DImode);
+ switch (GET_CODE (op1))
+ {
+ case SYMBOL_REF:
+ /* TLS symbols are not constant. */
+ if (SYMBOL_REF_TLS_MODEL (op1))
+ return 0;
+ /* For CM_SMALL assume that latest object is 16MB before
+ end of 31bits boundary. We may also accept pretty
+ large negative constants knowing that all objects are
+ in the positive half of address space. */
+ if ((ix86_cmodel == CM_SMALL
+ || (ix86_cmodel == CM_MEDIUM
+ && !SYMBOL_REF_FAR_ADDR_P (op1)))
+ && offset < 16*1024*1024
+ && trunc_int_for_mode (offset, SImode) == offset)
+ return 1;
+ /* For CM_KERNEL we know that all object resist in the
+ negative half of 32bits address space. We may not
+ accept negative offsets, since they may be just off
+ and we may accept pretty large positive ones. */
+ if (ix86_cmodel == CM_KERNEL
+ && offset > 0
+ && trunc_int_for_mode (offset, SImode) == offset)
+ return 1;
+ break;
+
+ case LABEL_REF:
+ /* These conditions are similar to SYMBOL_REF ones, just the
+ constraints for code models differ. */
+ if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
+ && offset < 16*1024*1024
+ && trunc_int_for_mode (offset, SImode) == offset)
+ return 1;
+ if (ix86_cmodel == CM_KERNEL
+ && offset > 0
+ && trunc_int_for_mode (offset, SImode) == offset)
+ return 1;
+ break;
+
+ case UNSPEC:
+ switch (XINT (op1, 1))
+ {
+ case UNSPEC_DTPOFF:
+ case UNSPEC_NTPOFF:
+ if (offset > 0
+ && trunc_int_for_mode (offset, SImode) == offset)
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return 0;
+})
+
+;; Return 1 if VALUE can be stored in the zero extended immediate field.
+(define_predicate "x86_64_zext_immediate_operand"
+ (match_code "const_double,const_int,symbol_ref,label_ref,const")
+{
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ if (HOST_BITS_PER_WIDE_INT == 32)
+ return (GET_MODE (op) == VOIDmode && !CONST_DOUBLE_HIGH (op));
+ else
+ return 0;
+
+ case CONST_INT:
+ if (HOST_BITS_PER_WIDE_INT == 32)
+ return INTVAL (op) >= 0;
+ else
+ return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
+
+ case SYMBOL_REF:
+ /* For certain code models, the symbolic references are known to fit. */
+ /* TLS symbols are not constant. */
+ if (SYMBOL_REF_TLS_MODEL (op))
+ return false;
+ return (ix86_cmodel == CM_SMALL
+ || (ix86_cmodel == CM_MEDIUM
+ && !SYMBOL_REF_FAR_ADDR_P (op)));
+
+ case LABEL_REF:
+ /* For certain code models, the code is near as well. */
+ return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
+
+ case CONST:
+ /* We also may accept the offsetted memory references in certain
+ special cases. */
+ if (GET_CODE (XEXP (op, 0)) == PLUS)
+ {
+ rtx op1 = XEXP (XEXP (op, 0), 0);
+ rtx op2 = XEXP (XEXP (op, 0), 1);
+
+ if (ix86_cmodel == CM_LARGE)
+ return 0;
+ switch (GET_CODE (op1))
+ {
+ case SYMBOL_REF:
+ /* TLS symbols are not constant. */
+ if (SYMBOL_REF_TLS_MODEL (op1))
+ return 0;
+ /* For small code model we may accept pretty large positive
+ offsets, since one bit is available for free. Negative
+ offsets are limited by the size of NULL pointer area
+ specified by the ABI. */
+ if ((ix86_cmodel == CM_SMALL
+ || (ix86_cmodel == CM_MEDIUM
+ && !SYMBOL_REF_FAR_ADDR_P (op1)))
+ && GET_CODE (op2) == CONST_INT
+ && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
+ && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
+ return 1;
+ /* ??? For the kernel, we may accept adjustment of
+ -0x10000000, since we know that it will just convert
+ negative address space to positive, but perhaps this
+ is not worthwhile. */
+ break;
+
+ case LABEL_REF:
+ /* These conditions are similar to SYMBOL_REF ones, just the
+ constraints for code models differ. */
+ if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
+ && GET_CODE (op2) == CONST_INT
+ && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
+ && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
+ return 1;
+ break;
+
+ default:
+ return 0;
+ }
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return 0;
+})
+
+;; Return nonzero if OP is general operand representable on x86_64.
+(define_predicate "x86_64_general_operand"
+ (if_then_else (match_test "TARGET_64BIT")
+ (ior (match_operand 0 "nonimmediate_operand")
+ (match_operand 0 "x86_64_immediate_operand"))
+ (match_operand 0 "general_operand")))
+
+;; Return nonzero if OP is general operand representable on x86_64
+;; as either sign extended or zero extended constant.
+(define_predicate "x86_64_szext_general_operand"
+ (if_then_else (match_test "TARGET_64BIT")
+ (ior (match_operand 0 "nonimmediate_operand")
+ (ior (match_operand 0 "x86_64_immediate_operand")
+ (match_operand 0 "x86_64_zext_immediate_operand")))
+ (match_operand 0 "general_operand")))
+
+;; Return nonzero if OP is nonmemory operand representable on x86_64.
+(define_predicate "x86_64_nonmemory_operand"
+ (if_then_else (match_test "TARGET_64BIT")
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "x86_64_immediate_operand"))
+ (match_operand 0 "nonmemory_operand")))
+
+;; Return nonzero if OP is nonmemory operand representable on x86_64.
+(define_predicate "x86_64_szext_nonmemory_operand"
+ (if_then_else (match_test "TARGET_64BIT")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_operand 0 "x86_64_immediate_operand")
+ (match_operand 0 "x86_64_zext_immediate_operand")))
+ (match_operand 0 "nonmemory_operand")))
+
+;; Return true when operand is PIC expression that can be computed by lea
+;; operation.
+(define_predicate "pic_32bit_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ if (!flag_pic)
+ return 0;
+ /* Rule out relocations that translate into 64bit constants. */
+ if (TARGET_64BIT && GET_CODE (op) == CONST)
+ {
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == UNSPEC
+ && (XINT (op, 1) == UNSPEC_GOTOFF
+ || XINT (op, 1) == UNSPEC_GOT))
+ return 0;
+ }
+ return symbolic_operand (op, mode);
+})
+
+
+;; Return nonzero if OP is nonmemory operand acceptable by movabs patterns.
+(define_predicate "x86_64_movabs_operand"
+ (if_then_else (match_test "!TARGET_64BIT || !flag_pic")
+ (match_operand 0 "nonmemory_operand")
+ (ior (match_operand 0 "register_operand")
+ (and (match_operand 0 "const_double_operand")
+ (match_test "GET_MODE_SIZE (mode) <= 8")))))
+
+;; Returns nonzero if OP is either a symbol reference or a sum of a symbol
+;; reference and a constant.
+(define_predicate "symbolic_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ case CONST:
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == LABEL_REF
+ || (GET_CODE (op) == UNSPEC
+ && (XINT (op, 1) == UNSPEC_GOT
+ || XINT (op, 1) == UNSPEC_GOTOFF
+ || XINT (op, 1) == UNSPEC_GOTPCREL)))
+ return 1;
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return 0;
+
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == LABEL_REF)
+ return 1;
+ /* Only @GOTOFF gets offsets. */
+ if (GET_CODE (op) != UNSPEC
+ || XINT (op, 1) != UNSPEC_GOTOFF)
+ return 0;
+
+ op = XVECEXP (op, 0, 0);
+ if (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == LABEL_REF)
+ return 1;
+ return 0;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+;; Return true if the operand contains a @GOT or @GOTOFF reference.
+(define_predicate "pic_symbolic_operand"
+ (match_code "const")
+{
+ op = XEXP (op, 0);
+ if (TARGET_64BIT)
+ {
+ if (GET_CODE (op) == UNSPEC
+ && XINT (op, 1) == UNSPEC_GOTPCREL)
+ return 1;
+ if (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 0)) == UNSPEC
+ && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL)
+ return 1;
+ }
+ else
+ {
+ if (GET_CODE (op) == UNSPEC)
+ return 1;
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == UNSPEC)
+ return 1;
+ }
+ return 0;
+})
+
+;; Return true if OP is a symbolic operand that resolves locally.
+(define_predicate "local_symbolic_operand"
+ (match_code "const,label_ref,symbol_ref")
+{
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
+ op = XEXP (XEXP (op, 0), 0);
+
+ if (GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ if (GET_CODE (op) != SYMBOL_REF)
+ return 0;
+
+ if (SYMBOL_REF_TLS_MODEL (op) != 0)
+ return 0;
+
+/* APPLE LOCAL begin fix-and-continue 6358507 */
+ if (SYMBOL_REF_LOCAL_P (op))
+ {
+#if TARGET_MACHO
+ if (!indirect_data (op)
+ || machopic_data_defined_p (op))
+#endif
+ return 1;
+ }
+/* APPLE LOCAL end fix-and-continue 6358507 */
+
+ /* There is, however, a not insubstantial body of code in the rest of
+ the compiler that assumes it can just stick the results of
+ ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done. */
+ /* ??? This is a hack. Should update the body of the compiler to
+ always create a DECL an invoke targetm.encode_section_info. */
+ if (strncmp (XSTR (op, 0), internal_label_prefix,
+ internal_label_prefix_len) == 0)
+ return 1;
+
+ return 0;
+})
+
+;; Test for various thread-local symbols.
+(define_predicate "tls_symbolic_operand"
+ (and (match_code "symbol_ref")
+ (match_test "SYMBOL_REF_TLS_MODEL (op) != 0")))
+
+(define_predicate "tls_modbase_operand"
+ (and (match_code "symbol_ref")
+ (match_test "op == ix86_tls_module_base ()")))
+
+(define_predicate "tp_or_register_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "unspec")
+ (match_test "XINT (op, 1) == UNSPEC_TP"))))
+
+;; Test for a pc-relative call operand
+(define_predicate "constant_call_address_operand"
+ (ior (match_code "symbol_ref")
+ (match_operand 0 "local_symbolic_operand")))
+
+;; True for any non-virtual or eliminable register. Used in places where
+;; instantiation of such a register may cause the pattern to not be recognized.
+(define_predicate "register_no_elim_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ return !(op == arg_pointer_rtx
+ || op == frame_pointer_rtx
+ || (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ && REGNO (op) <= LAST_VIRTUAL_REGISTER));
+})
+
+;; Similarly, but include the stack pointer. This is used to prevent esp
+;; from being used as an index reg.
+(define_predicate "index_register_operand"
+ (match_operand 0 "register_operand")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (reload_in_progress || reload_completed)
+ return REG_OK_FOR_INDEX_STRICT_P (op);
+ else
+ return REG_OK_FOR_INDEX_NONSTRICT_P (op);
+})
+
+;; Return false if this is any eliminable register. Otherwise general_operand.
+(define_predicate "general_no_elim_operand"
+ (if_then_else (match_code "reg,subreg")
+ (match_operand 0 "register_no_elim_operand")
+ (match_operand 0 "general_operand")))
+
+;; Return false if this is any eliminable register. Otherwise
+;; register_operand or a constant.
+(define_predicate "nonmemory_no_elim_operand"
+ (ior (match_operand 0 "register_no_elim_operand")
+ (match_operand 0 "immediate_operand")))
+
+;; Test for a valid operand for a call instruction.
+(define_predicate "call_insn_operand"
+ (ior (match_operand 0 "constant_call_address_operand")
+ (ior (match_operand 0 "register_no_elim_operand")
+ (match_operand 0 "memory_operand"))))
+
+;; Similarly, but for tail calls, in which we cannot allow memory references.
+(define_predicate "sibcall_insn_operand"
+ (ior (match_operand 0 "constant_call_address_operand")
+ (match_operand 0 "register_no_elim_operand")))
+
+;; Match exactly zero.
+(define_predicate "const0_operand"
+ (match_code "const_int,const_double,const_vector")
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+ return op == CONST0_RTX (mode);
+})
+
+;; Match exactly one.
+(define_predicate "const1_operand"
+ (and (match_code "const_int")
+ (match_test "op == const1_rtx")))
+
+;; Match exactly eight.
+(define_predicate "const8_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 8")))
+
+;; Match 2, 4, or 8. Used for leal multiplicands.
+(define_predicate "const248_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT i = INTVAL (op);
+ return i == 2 || i == 4 || i == 8;
+})
+
+;; Match 0 or 1.
+(define_predicate "const_0_to_1_operand"
+ (and (match_code "const_int")
+ (match_test "op == const0_rtx || op == const1_rtx")))
+
+;; Match 0 to 3.
+(define_predicate "const_0_to_3_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 3")))
+
+;; Match 0 to 7.
+(define_predicate "const_0_to_7_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 7")))
+
+;; Match 0 to 15.
+(define_predicate "const_0_to_15_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 15")))
+
+;; Match 0 to 63.
+(define_predicate "const_0_to_63_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 63")))
+
+;; Match 0 to 255.
+(define_predicate "const_0_to_255_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 255")))
+
+;; Match (0 to 255) * 8
+(define_predicate "const_0_to_255_mul_8_operand"
+ (match_code "const_int")
+{
+ unsigned HOST_WIDE_INT val = INTVAL (op);
+ return val <= 255*8 && val % 8 == 0;
+})
+
+;; Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand
+;; for shift & compare patterns, as shifting by 0 does not change flags).
+(define_predicate "const_1_to_31_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 1 && INTVAL (op) <= 31")))
+
+;; Match 2 or 3.
+(define_predicate "const_2_to_3_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 2 || INTVAL (op) == 3")))
+
+;; Match 4 to 7.
+(define_predicate "const_4_to_7_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 4 && INTVAL (op) <= 7")))
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; Match exactly one bit in 2-bit mask.
+(define_predicate "const_pow2_1_to_2_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 1 || INTVAL (op) == 2")))
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; Match exactly one bit in 4-bit mask.
+(define_predicate "const_pow2_1_to_8_operand"
+ (match_code "const_int")
+{
+ unsigned int log = exact_log2 (INTVAL (op));
+ return log <= 3;
+})
+
+;; Match exactly one bit in 8-bit mask.
+(define_predicate "const_pow2_1_to_128_operand"
+ (match_code "const_int")
+{
+ unsigned int log = exact_log2 (INTVAL (op));
+ return log <= 7;
+})
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; Match exactly one bit in 16-bit mask.
+(define_predicate "const_pow2_1_to_32768_operand"
+ (match_code "const_int")
+{
+ unsigned int log = exact_log2 (INTVAL (op));
+ return log <= 15;
+})
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; True if this is a constant appropriate for an increment or decrement.
+(define_predicate "incdec_operand"
+ (match_code "const_int")
+{
+ /* On Pentium4, the inc and dec operations causes extra dependency on flag
+ registers, since carry flag is not set. */
+ if (!TARGET_USE_INCDEC && !optimize_size)
+ return 0;
+ return op == const1_rtx || op == constm1_rtx;
+})
+
+;; True for registers, or 1 or -1. Used to optimize double-word shifts.
+(define_predicate "reg_or_pm1_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_int")
+ (match_test "op == const1_rtx || op == constm1_rtx"))))
+
+;; True if OP is acceptable as operand of DImode shift expander.
+(define_predicate "shiftdi_operand"
+ (if_then_else (match_test "TARGET_64BIT")
+ (match_operand 0 "nonimmediate_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "ashldi_input_operand"
+ (if_then_else (match_test "TARGET_64BIT")
+ (match_operand 0 "nonimmediate_operand")
+ (match_operand 0 "reg_or_pm1_operand")))
+
+;; Return true if OP is a vector load from the constant pool with just
+;; the first element nonzero.
+(define_predicate "zero_extended_scalar_load_operand"
+ (match_code "mem")
+{
+ unsigned n_elts;
+ op = maybe_get_pool_constant (op);
+ if (!op)
+ return 0;
+ if (GET_CODE (op) != CONST_VECTOR)
+ return 0;
+ n_elts =
+ (GET_MODE_SIZE (GET_MODE (op)) /
+ GET_MODE_SIZE (GET_MODE_INNER (GET_MODE (op))));
+ for (n_elts--; n_elts > 0; n_elts--)
+ {
+ rtx elt = CONST_VECTOR_ELT (op, n_elts);
+ if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
+ return 0;
+ }
+ return 1;
+})
+
+/* Return true if operand is a vector constant that is all ones. */
+(define_predicate "vector_all_ones_operand"
+ (match_code "const_vector")
+{
+ int nunits = GET_MODE_NUNITS (mode);
+
+ if (GET_CODE (op) == CONST_VECTOR
+ && CONST_VECTOR_NUNITS (op) == nunits)
+ {
+ int i;
+ for (i = 0; i < nunits; ++i)
+ {
+ rtx x = CONST_VECTOR_ELT (op, i);
+ if (x != constm1_rtx)
+ return 0;
+ }
+ return 1;
+ }
+
+ return 0;
+})
+
+; Return 1 when OP is operand acceptable for standard SSE move.
+(define_predicate "vector_move_operand"
+ (ior (match_operand 0 "nonimmediate_operand")
+ (match_operand 0 "const0_operand")))
+
+;; Return 1 when OP is nonimmediate or standard SSE constant.
+(define_predicate "nonimmediate_or_sse_const_operand"
+ (match_operand 0 "general_operand")
+{
+ if (nonimmediate_operand (op, mode))
+ return 1;
+ if (standard_sse_constant_p (op) > 0)
+ return 1;
+ return 0;
+})
+
+;; APPLE LOCAL begin mainline
+/* MERGE FIXME was this replaced by reg_or_0_operand below */
+;; Return true if OP is a nonimmediate or a zero.
+(define_predicate "nonimmediate_or_0_operand"
+ (ior (match_operand 0 "nonimmediate_operand")
+ (match_operand 0 "const0_operand")))
+;; APPLE LOCAL end mainline
+
+;; Return true if OP is a register or a zero.
+(define_predicate "reg_or_0_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const0_operand")))
+
+;; Return true if op if a valid address, and does not contain
+;; a segment override.
+(define_special_predicate "no_seg_address_operand"
+ (match_operand 0 "address_operand")
+{
+ struct ix86_address parts;
+ int ok;
+
+ ok = ix86_decompose_address (op, &parts);
+ gcc_assert (ok);
+ return parts.seg == SEG_DEFAULT;
+})
+
+;; Return nonzero if the rtx is known to be at least 32 bits aligned.
+(define_predicate "aligned_operand"
+ (match_operand 0 "general_operand")
+{
+ struct ix86_address parts;
+ int ok;
+
+ /* Registers and immediate operands are always "aligned". */
+ if (GET_CODE (op) != MEM)
+ return 1;
+
+ /* All patterns using aligned_operand on memory operands ends up
+ in promoting memory operand to 64bit and thus causing memory mismatch. */
+ if (TARGET_MEMORY_MISMATCH_STALL && !optimize_size)
+ return 0;
+
+ /* Don't even try to do any aligned optimizations with volatiles. */
+ if (MEM_VOLATILE_P (op))
+ return 0;
+
+ if (MEM_ALIGN (op) >= 32)
+ return 1;
+
+ op = XEXP (op, 0);
+
+ /* Pushes and pops are only valid on the stack pointer. */
+ if (GET_CODE (op) == PRE_DEC
+ || GET_CODE (op) == POST_INC)
+ return 1;
+
+ /* Decode the address. */
+ ok = ix86_decompose_address (op, &parts);
+ gcc_assert (ok);
+
+ /* Look for some component that isn't known to be aligned. */
+ if (parts.index)
+ {
+ if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
+ return 0;
+ }
+ if (parts.base)
+ {
+ if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
+ return 0;
+ }
+ if (parts.disp)
+ {
+ if (GET_CODE (parts.disp) != CONST_INT
+ || (INTVAL (parts.disp) & 3) != 0)
+ return 0;
+ }
+
+ /* Didn't find one -- this must be an aligned address. */
+ return 1;
+})
+
+;; Returns 1 if OP is memory operand with a displacement.
+(define_predicate "memory_displacement_operand"
+ (match_operand 0 "memory_operand")
+{
+ struct ix86_address parts;
+ int ok;
+
+ ok = ix86_decompose_address (XEXP (op, 0), &parts);
+ gcc_assert (ok);
+ return parts.disp != NULL_RTX;
+})
+
+;; Returns 1 if OP is memory operand with a displacement only.
+(define_predicate "memory_displacement_only_operand"
+ (match_operand 0 "memory_operand")
+{
+ struct ix86_address parts;
+ int ok;
+
+ ok = ix86_decompose_address (XEXP (op, 0), &parts);
+ gcc_assert (ok);
+
+ if (parts.base || parts.index)
+ return 0;
+
+ return parts.disp != NULL_RTX;
+})
+
+;; Returns 1 if OP is memory operand that cannot be represented
+;; by the modRM array.
+(define_predicate "long_memory_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_test "memory_address_length (op) != 0")))
+
+;; Return 1 if OP is a comparison operator that can be issued by fcmov.
+(define_predicate "fcmov_comparison_operator"
+ (match_operand 0 "comparison_operator")
+{
+ enum machine_mode inmode = GET_MODE (XEXP (op, 0));
+ enum rtx_code code = GET_CODE (op);
+
+ if (inmode == CCFPmode || inmode == CCFPUmode)
+ {
+ enum rtx_code second_code, bypass_code;
+ ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
+ if (bypass_code != UNKNOWN || second_code != UNKNOWN)
+ return 0;
+ code = ix86_fp_compare_code_to_integer (code);
+ }
+ /* i387 supports just limited amount of conditional codes. */
+ switch (code)
+ {
+ case LTU: case GTU: case LEU: case GEU:
+ if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode)
+ return 1;
+ return 0;
+ case ORDERED: case UNORDERED:
+ case EQ: case NE:
+ return 1;
+ default:
+ return 0;
+ }
+})
+
+;; Return 1 if OP is a comparison that can be used in the CMPSS/CMPPS insns.
+;; The first set are supported directly; the second set can't be done with
+;; full IEEE support, i.e. NaNs.
+;;
+;; ??? It would seem that we have a lot of uses of this predicate that pass
+;; it the wrong mode. We got away with this because the old function didn't
+;; check the mode at all. Mirror that for now by calling this a special
+;; predicate.
+
+(define_special_predicate "sse_comparison_operator"
+ (match_code "eq,lt,le,unordered,ne,unge,ungt,ordered"))
+
+;; Return 1 if OP is a valid comparison operator in valid mode.
+(define_predicate "ix86_comparison_operator"
+ (match_operand 0 "comparison_operator")
+{
+ enum machine_mode inmode = GET_MODE (XEXP (op, 0));
+ enum rtx_code code = GET_CODE (op);
+
+ if (inmode == CCFPmode || inmode == CCFPUmode)
+ {
+ enum rtx_code second_code, bypass_code;
+ ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
+ return (bypass_code == UNKNOWN && second_code == UNKNOWN);
+ }
+ switch (code)
+ {
+ case EQ: case NE:
+ return 1;
+ case LT: case GE:
+ if (inmode == CCmode || inmode == CCGCmode
+ || inmode == CCGOCmode || inmode == CCNOmode)
+ return 1;
+ return 0;
+ case LTU: case GTU: case LEU: case ORDERED: case UNORDERED: case GEU:
+ if (inmode == CCmode)
+ return 1;
+ return 0;
+ case GT: case LE:
+ if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
+ return 1;
+ return 0;
+ default:
+ return 0;
+ }
+})
+
+;; Return 1 if OP is a valid comparison operator testing carry flag to be set.
+(define_predicate "ix86_carry_flag_operator"
+ (match_code "ltu,lt,unlt,gt,ungt,le,unle,ge,unge,ltgt,uneq")
+{
+ enum machine_mode inmode = GET_MODE (XEXP (op, 0));
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_CODE (XEXP (op, 0)) != REG
+ || REGNO (XEXP (op, 0)) != FLAGS_REG
+ || XEXP (op, 1) != const0_rtx)
+ return 0;
+
+ if (inmode == CCFPmode || inmode == CCFPUmode)
+ {
+ enum rtx_code second_code, bypass_code;
+ ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
+ if (bypass_code != UNKNOWN || second_code != UNKNOWN)
+ return 0;
+ code = ix86_fp_compare_code_to_integer (code);
+ }
+ else if (inmode != CCmode)
+ return 0;
+
+ return code == LTU;
+})
+
+;; Nearly general operand, but accept any const_double, since we wish
+;; to be able to drop them into memory rather than have them get pulled
+;; into registers.
+(define_predicate "cmp_fp_expander_operand"
+ (ior (match_code "const_double")
+ (match_operand 0 "general_operand")))
+
+;; Return true if this is a valid binary floating-point operation.
+(define_predicate "binary_fp_operator"
+ (match_code "plus,minus,mult,div"))
+
+;; Return true if this is a multiply operation.
+(define_predicate "mult_operator"
+ (match_code "mult"))
+
+;; Return true if this is a division operation.
+(define_predicate "div_operator"
+ (match_code "div"))
+
+;; Return true if this is a float extend operation.
+(define_predicate "float_operator"
+ (match_code "float"))
+
+;; Return true for ARITHMETIC_P.
+(define_predicate "arith_or_logical_operator"
+ (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
+ mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
+
+;; Return 1 if OP is a binary operator that can be promoted to wider mode.
+;; Modern CPUs have same latency for HImode and SImode multiply,
+;; but 386 and 486 do HImode multiply faster. */
+(define_predicate "promotable_binary_operator"
+ (ior (match_code "plus,and,ior,xor,ashift")
+ (and (match_code "mult")
+ (match_test "ix86_tune > PROCESSOR_I486"))))
+
+;; To avoid problems when jump re-emits comparisons like testqi_ext_ccno_0,
+;; re-recognize the operand to avoid a copy_to_mode_reg that will fail.
+;;
+;; ??? It seems likely that this will only work because cmpsi is an
+;; expander, and no actual insns use this.
+
+(define_predicate "cmpsi_operand"
+ (ior (match_operand 0 "nonimmediate_operand")
+ (and (match_code "and")
+ (match_code "zero_extract" "0")
+ (match_code "const_int" "1")
+ (match_code "const_int" "01")
+ (match_code "const_int" "02")
+ (match_test "INTVAL (XEXP (XEXP (op, 0), 1)) == 8")
+ (match_test "INTVAL (XEXP (XEXP (op, 0), 2)) == 8")
+ )))
+
+(define_predicate "compare_operator"
+ (match_code "compare"))
+
+(define_predicate "absneg_operator"
+ (match_code "abs,neg"))
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/smmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/smmintrin.h
new file mode 100644
index 000000000..2da9a7460
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/smmintrin.h
@@ -0,0 +1,836 @@
+/* APPLE LOCAL file 5612787 mainline sse4 */
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 10.0. */
+
+#ifndef _SMMINTRIN_H_INCLUDED
+#define _SMMINTRIN_H_INCLUDED
+
+#ifndef __SSE4_1__
+# error "SSE4.1 instruction set not enabled"
+#else
+
+/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
+ files. */
+#include <tmmintrin.h>
+
+/* SSE4.1 */
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT \
+ (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_FLOOR \
+ (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_CEIL \
+ (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_TRUNC \
+ (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_RINT \
+ (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_NEARBYINT \
+ (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+/* Integer blend instructions - select data from 2 sources using
+ constant/variable mask. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M)
+{
+ return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X,
+ (__v8hi)__Y,
+ __M);
+}
+#else
+#define _mm_blend_epi16(X, Y, M) \
+ ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(X), (__v8hi)(Y), (M)))
+#endif
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M)
+{
+ return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ (__v16qi)__M);
+}
+
+/* Single precision floating point blend instructions - select data
+ from 2 sources using constant/variable mask. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+_mm_blend_ps (__m128 __X, __m128 __Y, const int __M)
+{
+ return (__m128) __builtin_ia32_blendps ((__v4sf)__X,
+ (__v4sf)__Y,
+ __M);
+}
+#else
+#define _mm_blend_ps(X, Y, M) \
+ ((__m128) __builtin_ia32_blendps ((__v4sf)(X), (__v4sf)(Y), (M)))
+#endif
+
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+_mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M)
+{
+ return (__m128) __builtin_ia32_blendvps ((__v4sf)__X,
+ (__v4sf)__Y,
+ (__v4sf)__M);
+}
+
+/* Double precision floating point blend instructions - select data
+ from 2 sources using constant/variable mask. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+_mm_blend_pd (__m128d __X, __m128d __Y, const int __M)
+{
+ return (__m128d) __builtin_ia32_blendpd ((__v2df)__X,
+ (__v2df)__Y,
+ __M);
+}
+#else
+#define _mm_blend_pd(X, Y, M) \
+ ((__m128d) __builtin_ia32_blendpd ((__v2df)(X), (__v2df)(Y), (M)))
+#endif
+
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+_mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M)
+{
+ return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X,
+ (__v2df)__Y,
+ (__v2df)__M);
+}
+
+/* Dot product instructions with mask-defined summing and zeroing parts
+ of result. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+_mm_dp_ps (__m128 __X, __m128 __Y, const int __M)
+{
+ return (__m128) __builtin_ia32_dpps ((__v4sf)__X,
+ (__v4sf)__Y,
+ __M);
+}
+
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+_mm_dp_pd (__m128d __X, __m128d __Y, const int __M)
+{
+ return (__m128d) __builtin_ia32_dppd ((__v2df)__X,
+ (__v2df)__Y,
+ __M);
+}
+#else
+#define _mm_dp_ps(X, Y, M) \
+ ((__m128) __builtin_ia32_dpps ((__v4sf)(X), (__v4sf)(Y), (M)))
+
+#define _mm_dp_pd(X, Y, M) \
+ ((__m128d) __builtin_ia32_dppd ((__v2df)(X), (__v2df)(Y), (M)))
+#endif
+
+/* Packed integer 64-bit comparison, zeroing or filling with ones
+ corresponding parts of result. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cmpeq_epi64 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pcmpeqq ((__v2di)__X, (__v2di)__Y);
+}
+
+/* Min/max packed integer instructions. */
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_min_epi8 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_max_epi8 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_min_epu16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_max_epu16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_min_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_max_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_min_epu32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_max_epu32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* Packed integer 32-bit multiplication with truncation of upper
+ halves of results. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_mullo_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* Packed integer 32-bit multiplication of 2 pairs of operands
+ with two 64-bit results. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_mul_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* Packed integer 128-bit bitwise comparison. Return 1 if
+ (__V & __M) == 0. */
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_testz_si128 (__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V);
+}
+
+/* Packed integer 128-bit bitwise comparison. Return 1 if
+ (__V & ~__M) == 0. */
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_testc_si128 (__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V);
+}
+
+/* Packed integer 128-bit bitwise comparison. Return 1 if
+ (__V & __M) != 0 && (__V & ~__M) != 0. */
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_testnzc_si128 (__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V);
+}
+
+/* Macros for packed integer 128-bit comparison intrinsics. */
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+
+#define _mm_test_all_ones(V) \
+ _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
+
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
+
+/* Insert single precision float into packed single precision array
+ element selected by index N. The bits [7-6] of N define S
+ index, the bits [5-4] define D index, and bits [3-0] define
+ zeroing mask for D. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+_mm_insert_ps (__m128 __D, __m128 __S, const int __N)
+{
+ return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D,
+ (__v4sf)__S,
+ __N);
+}
+#else
+#define _mm_insert_ps(D, S, N) \
+ ((__m128) __builtin_ia32_insertps128 ((__v4sf)(D), (__v4sf)(S), (N)))
+#endif
+
+/* Helper macro to create the N value for _mm_insert_ps. */
+#define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M))
+
+/* Extract binary representation of single precision float from packed
+ single precision array element of X selected by index N. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_extract_ps (__m128 __X, const int __N)
+{
+ union { int i; float f; } __tmp;
+ __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N);
+ return __tmp.i;
+}
+#else
+#define _mm_extract_ps(X, N) \
+ (__extension__ \
+ ({ \
+ union { int i; float f; } __tmp; \
+ __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(X), (N)); \
+ __tmp.i; \
+ }) \
+ )
+#endif
+
+/* Extract binary representation of single precision float into
+ D from packed single precision array element of S selected
+ by index N. */
+#define _MM_EXTRACT_FLOAT(D, S, N) \
+ { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); }
+
+/* Extract specified single precision float element into the lower
+ part of __m128. */
+#define _MM_PICK_OUT_PS(X, N) \
+ _mm_insert_ps (_mm_setzero_ps (), (X), \
+ _MM_MK_INSERTPS_NDX ((N), 0, 0x0e))
+
+/* Insert integer, S, into packed integer array element of D
+ selected by index N. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_insert_epi8 (__m128i __D, int __S, const int __N)
+{
+ return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D,
+ __S, __N);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_insert_epi32 (__m128i __D, int __S, const int __N)
+{
+ return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D,
+ __S, __N);
+}
+
+#ifdef __x86_64__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_insert_epi64 (__m128i __D, long long __S, const int __N)
+{
+ return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D,
+ __S, __N);
+}
+#endif
+#else
+#define _mm_insert_epi8(D, S, N) \
+ ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(D), (S), (N)))
+
+#define _mm_insert_epi32(D, S, N) \
+ ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(D), (S), (N)))
+
+#ifdef __x86_64__
+#define _mm_insert_epi64(D, S, N) \
+ ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(D), (S), (N)))
+#endif
+#endif
+
+/* Extract integer from packed integer array element of X selected by
+ index N. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_extract_epi8 (__m128i __X, const int __N)
+{
+ return __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_extract_epi32 (__m128i __X, const int __N)
+{
+ return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N);
+}
+
+#ifdef __x86_64__
+__STATIC_INLINE long long __attribute__((__always_inline__))
+_mm_extract_epi64 (__m128i __X, const int __N)
+{
+ return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N);
+}
+#endif
+#else
+#define _mm_extract_epi8(X, N) \
+ __builtin_ia32_vec_ext_v16qi ((__v16qi) X, (N))
+#define _mm_extract_epi32(X, N) \
+ __builtin_ia32_vec_ext_v4si ((__v4si) X, (N))
+
+#ifdef __x86_64__
+#define _mm_extract_epi64(X, N) \
+ ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(X), (N)))
+#endif
+#endif
+
+/* Return horizontal packed word minimum and its index in bits [15:0]
+ and bits [18:16] respectively. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_minpos_epu16 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
+}
+
+/* Packed/scalar double precision floating point rounding. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+_mm_round_pd (__m128d __V, const int __M)
+{
+ return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
+}
+
+__STATIC_INLINE __m128d __attribute__((__always_inline__))
+_mm_round_sd(__m128d __D, __m128d __V, const int __M)
+{
+ return (__m128d) __builtin_ia32_roundsd ((__v2df)__D,
+ (__v2df)__V,
+ __M);
+}
+#else
+#define _mm_round_pd(V, M) \
+ ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M)))
+
+#define _mm_round_sd(D, V, M) \
+ ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M)))
+#endif
+
+/* Packed/scalar single precision floating point rounding. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+_mm_round_ps (__m128 __V, const int __M)
+{
+ return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
+}
+
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+_mm_round_ss (__m128 __D, __m128 __V, const int __M)
+{
+ return (__m128) __builtin_ia32_roundss ((__v4sf)__D,
+ (__v4sf)__V,
+ __M);
+}
+#else
+#define _mm_round_ps(V, M) \
+ ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M)))
+
+#define _mm_round_ss(D, V, M) \
+ ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M)))
+#endif
+
+/* Macros for ceil/floor intrinsics. */
+#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
+
+#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
+
+/* Packed integer sign-extension. */
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepi8_epi32 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepi16_epi32 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepi8_epi64 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepi32_epi64 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepi16_epi64 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepi8_epi16 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X);
+}
+
+/* Packed integer zero-extension. */
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepu8_epi32 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepu16_epi32 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepu8_epi64 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepu32_epi64 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepu16_epi64 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cvtepu8_epi16 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X);
+}
+
+/* Pack 8 double words from 2 operands into 8 words of result with
+ unsigned saturation. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_packus_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* Sum absolute 8-bit integer difference of adjacent groups of 4
+ byte integers in the first 2 operands. Starting offsets within
+ operands are determined by the 3rd mask operand. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M)
+{
+ return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X,
+ (__v16qi)__Y, __M);
+}
+#else
+#define _mm_mpsadbw_epu8(X, Y, M) \
+ ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(X), (__v16qi)(Y), (M)))
+#endif
+
+/* Load double quadword using non-temporal aligned hint. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_stream_load_si128 (__m128i *__X)
+{
+ return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X);
+}
+
+#ifdef __SSE4_2__
+
+/* These macros specify the source data format. */
+#define SIDD_UBYTE_OPS 0x00
+#define SIDD_UWORD_OPS 0x01
+#define SIDD_SBYTE_OPS 0x02
+#define SIDD_SWORD_OPS 0x03
+
+/* These macros specify the comparison operation. */
+#define SIDD_CMP_EQUAL_ANY 0x00
+#define SIDD_CMP_RANGES 0x04
+#define SIDD_CMP_EQUAL_EACH 0x08
+#define SIDD_CMP_EQUAL_ORDERED 0x0c
+
+/* These macros specify the the polarity. */
+#define SIDD_POSITIVE_POLARITY 0x00
+#define SIDD_NEGATIVE_POLARITY 0x10
+#define SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define SIDD_MASKED_NEGATIVE_POLARITY 0x30
+
+/* These macros specify the output selection in _mm_cmpXstri (). */
+#define SIDD_LEAST_SIGNIFICANT 0x00
+#define SIDD_MOST_SIGNIFICANT 0x40
+
+/* These macros specify the output selection in _mm_cmpXstrm (). */
+#define SIDD_BIT_MASK 0x00
+#define SIDD_UNIT_MASK 0x40
+
+/* Intrinsics for text/string processing. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M)
+{
+ return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpistri (__m128i __X, __m128i __Y, const int __M)
+{
+ return __builtin_ia32_pcmpistri128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+#else
+#define _mm_cmpistrm(X, Y, M) \
+ ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(X), (__v16qi)(Y), (M)))
+#define _mm_cmpistri(X, Y, M) \
+ __builtin_ia32_pcmpistri128 ((__v16qi)(X), (__v16qi)(Y), (M))
+
+#define _mm_cmpestrm(X, LX, Y, LY, M) \
+ ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M)))
+#define _mm_cmpestri(X, LX, Y, LY, M) \
+ __builtin_ia32_pcmpestri128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M))
+#endif
+
+/* Intrinsics for text/string processing and reading values of
+ EFlags. */
+
+#ifdef __OPTIMIZE__
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpistra (__m128i __X, __m128i __Y, const int __M)
+{
+ return __builtin_ia32_pcmpistria128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpistrc (__m128i __X, __m128i __Y, const int __M)
+{
+ return __builtin_ia32_pcmpistric128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpistro (__m128i __X, __m128i __Y, const int __M)
+{
+ return __builtin_ia32_pcmpistrio128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpistrs (__m128i __X, __m128i __Y, const int __M)
+{
+ return __builtin_ia32_pcmpistris128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpistrz (__m128i __X, __m128i __Y, const int __M)
+{
+ return __builtin_ia32_pcmpistriz128 ((__v16qi)__X,
+ (__v16qi)__Y,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M)
+{
+ return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX,
+ (__v16qi)__Y, __LY,
+ __M);
+}
+#else
+#define _mm_cmpistra(X, Y, M) \
+ __builtin_ia32_pcmpistria128 ((__v16qi)(X), (__v16qi)(Y), (M))
+#define _mm_cmpistrc(X, Y, M) \
+ __builtin_ia32_pcmpistric128 ((__v16qi)(X), (__v16qi)(Y), (M))
+#define _mm_cmpistro(X, Y, M) \
+ __builtin_ia32_pcmpistrio128 ((__v16qi)(X), (__v16qi)(Y), (M))
+#define _mm_cmpistrs(X, Y, M) \
+ __builtin_ia32_pcmpistris128 ((__v16qi)(X), (__v16qi)(Y), (M))
+#define _mm_cmpistrz(X, Y, M) \
+ __builtin_ia32_pcmpistriz128 ((__v16qi)(X), (__v16qi)(Y), (M))
+
+#define _mm_cmpestra(X, LX, Y, LY, M) \
+ __builtin_ia32_pcmpestria128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M))
+#define _mm_cmpestrc(X, LX, Y, LY, M) \
+ __builtin_ia32_pcmpestric128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M))
+#define _mm_cmpestro(X, LX, Y, LY, M) \
+ __builtin_ia32_pcmpestrio128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M))
+#define _mm_cmpestrs(X, LX, Y, LY, M) \
+ __builtin_ia32_pcmpestris128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M))
+#define _mm_cmpestrz(X, LX, Y, LY, M) \
+ __builtin_ia32_pcmpestriz128 ((__v16qi)(X), (int)(LX), \
+ (__v16qi)(Y), (int)(LY), (M))
+#endif
+
+/* Packed integer 64-bit comparison, zeroing or filling with ones
+ corresponding parts of result. */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+_mm_cmpgt_epi64 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pcmpgtq ((__v2di)__X, (__v2di)__Y);
+}
+
+/* Calculate a number of bits set to 1. */
+__STATIC_INLINE int __attribute__((__always_inline__))
+_mm_popcnt_u32 (unsigned int __X)
+{
+ return __builtin_popcount (__X);
+}
+
+#ifdef __x86_64__
+__STATIC_INLINE long long __attribute__((__always_inline__))
+_mm_popcnt_u64 (unsigned long long __X)
+{
+ return __builtin_popcountll (__X);
+}
+#endif
+
+/* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+_mm_crc32_u8 (unsigned int __C, unsigned char __V)
+{
+ return __builtin_ia32_crc32qi (__C, __V);
+}
+
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+_mm_crc32_u16 (unsigned int __C, unsigned short __V)
+{
+ return __builtin_ia32_crc32hi (__C, __V);
+}
+
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+_mm_crc32_u32 (unsigned int __C, unsigned int __V)
+{
+ return __builtin_ia32_crc32si (__C, __V);
+}
+
+#ifdef __x86_64__
+__STATIC_INLINE unsigned long long __attribute__((__always_inline__))
+_mm_crc32_u64 (unsigned long long __C, unsigned long long __V)
+{
+ return __builtin_ia32_crc32di (__C, __V);
+}
+#endif
+
+#endif /* __SSE4_2__ */
+
+#endif /* __SSE4_1__ */
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+#endif /* _SMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/sse.md b/gcc-4.2.1-5666.3/gcc/config/i386/sse.md
new file mode 100644
index 000000000..40318d83a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/sse.md
@@ -0,0 +1,6218 @@
+;; GCC machine description for SSE instructions
+;; Copyright (C) 2005, 2006
+;; Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+
+;; 16 byte integral modes handled by SSE, minus TImode, which gets
+;; special-cased for TARGET_64BIT.
+(define_mode_macro SSEMODEI [V16QI V8HI V4SI V2DI])
+
+;; All 16-byte vector modes handled by SSE
+(define_mode_macro SSEMODE [V16QI V8HI V4SI V2DI V4SF V2DF])
+
+;; Mix-n-match
+(define_mode_macro SSEMODE12 [V16QI V8HI])
+(define_mode_macro SSEMODE24 [V8HI V4SI])
+(define_mode_macro SSEMODE14 [V16QI V4SI])
+(define_mode_macro SSEMODE124 [V16QI V8HI V4SI])
+(define_mode_macro SSEMODE248 [V8HI V4SI V2DI])
+
+;; Mapping from integer vector mode to mnemonic suffix
+(define_mode_attr ssevecsize [(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q")])
+
+;; Patterns whose name begins with "sse{,2,3}_" are invoked by intrinsics.
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Move patterns
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; All of these patterns are enabled for SSE1 as well as SSE2.
+;; This is essential for maintaining stable calling conventions.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:SSEMODEI 0 "nonimmediate_operand" "")
+ (match_operand:SSEMODEI 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_move (<MODE>mode, operands);
+ DONE;
+})
+
+(define_insn "*mov<mode>_internal"
+ [(set (match_operand:SSEMODEI 0 "nonimmediate_operand" "=x,x ,m")
+ (match_operand:SSEMODEI 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
+ "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return standard_sse_constant_opcode (insn, operands[1]);
+ case 1:
+ case 2:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movdqa\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "sselog1,ssemov,ssemov")
+ (set (attr "mode")
+ (if_then_else
+ (ior (ior (ne (symbol_ref "optimize_size") (const_int 0))
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (and (eq_attr "alternative" "2")
+ (ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
+ (const_int 0))))
+ (const_string "V4SF")
+ (const_string "TI")))])
+
+(define_expand "movv4sf"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "")
+ (match_operand:V4SF 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_move (V4SFmode, operands);
+ DONE;
+})
+
+(define_insn "*movv4sf_internal"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,m")
+ (match_operand:V4SF 1 "nonimmediate_or_sse_const_operand" "C,xm,x"))]
+ "TARGET_SSE"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return standard_sse_constant_opcode (insn, operands[1]);
+ case 1:
+ case 2:
+ return "movaps\t{%1, %0|%0, %1}";
+ default:
+ abort();
+ }
+}
+ [(set_attr "type" "sselog1,ssemov,ssemov")
+ (set_attr "mode" "V4SF")])
+
+(define_split
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "zero_extended_scalar_load_operand" ""))]
+ "TARGET_SSE && reload_completed"
+ [(set (match_dup 0)
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF (match_dup 1))
+ (match_dup 2)
+ (const_int 1)))]
+{
+ operands[1] = simplify_gen_subreg (SFmode, operands[1], V4SFmode, 0);
+ operands[2] = CONST0_RTX (V4SFmode);
+})
+
+(define_expand "movv2df"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "")
+ (match_operand:V2DF 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_move (V2DFmode, operands);
+ DONE;
+})
+
+(define_insn "*movv2df_internal"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
+ (match_operand:V2DF 1 "nonimmediate_or_sse_const_operand" "C,xm,x"))]
+ "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return standard_sse_constant_opcode (insn, operands[1]);
+ case 1:
+ case 2:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movapd\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "sselog1,ssemov,ssemov")
+ (set (attr "mode")
+ (if_then_else
+ (ior (ior (ne (symbol_ref "optimize_size") (const_int 0))
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (and (eq_attr "alternative" "2")
+ (ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
+ (const_int 0))))
+ (const_string "V4SF")
+ (const_string "V2DF")))])
+
+(define_split
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (match_operand:V2DF 1 "zero_extended_scalar_load_operand" ""))]
+ "TARGET_SSE2 && reload_completed"
+ [(set (match_dup 0) (vec_concat:V2DF (match_dup 1) (match_dup 2)))]
+{
+ operands[1] = simplify_gen_subreg (DFmode, operands[1], V2DFmode, 0);
+ operands[2] = CONST0_RTX (DFmode);
+})
+
+(define_expand "push<mode>1"
+ [(match_operand:SSEMODE 0 "register_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_push (<MODE>mode, operands[0]);
+ DONE;
+})
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:SSEMODE 0 "nonimmediate_operand" "")
+ (match_operand:SSEMODE 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_move_misalign (<MODE>mode, operands);
+ DONE;
+})
+
+(define_insn "sse_movups"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,m")
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,x")]
+ UNSPEC_MOVU))]
+ "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "movups\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_movupd"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,m")
+ (unspec:V2DF [(match_operand:V2DF 1 "nonimmediate_operand" "xm,x")]
+ UNSPEC_MOVU))]
+ "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "movupd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_movdqu"
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,m")
+ (unspec:V16QI [(match_operand:V16QI 1 "nonimmediate_operand" "xm,x")]
+ UNSPEC_MOVU))]
+ "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "movdqu\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse_movntv4sf"
+ [(set (match_operand:V4SF 0 "memory_operand" "=m")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE"
+ "movntps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse2_movntv2df"
+ [(set (match_operand:V2DF 0 "memory_operand" "=m")
+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE2"
+ "movntpd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_movntv2di"
+ [(set (match_operand:V2DI 0 "memory_operand" "=m")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE2"
+ "movntdq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_movntsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE2"
+ "movnti\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+;; APPLE LOCAL begin 4099020, 4121692
+(define_insn "sse_loadqv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=rx")
+ (unspec:V4SI [(zero_extend:V4SI (match_operand:V2SI 1 "memory_operand" "m"))] UNSPEC_LDQ))]
+ "TARGET_SSE"
+ "movq\t{%1, %0|%0, %1}")
+
+;; APPLE LOCAL begin 4279065
+(define_insn "sse_storeqv4si"
+ [(set (match_operand:V2SI 0 "memory_operand" "=m")
+ (unspec:V2SI [(match_operand:V4SI 1 "register_operand" "x")] UNSPEC_STOQ))]
+ "TARGET_SSE"
+ "movq\t{%1, %0|%0, %1}")
+;; APPLE LOCAL end 4279065
+
+(define_insn "sse_movqv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (unspec:V4SI [(zero_extend:V4SI (subreg:V2SI
+ (match_operand:V4SI 1 "register_operand" "x") 0))] UNSPEC_MOVQ))]
+ "TARGET_SSE"
+ "movq\t{%1, %0|%0, %1}")
+;; APPLE LOCAL end 4099020, 4121692
+
+(define_insn "sse3_lddqu"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "m")]
+ UNSPEC_LDQQU))]
+ "TARGET_SSE3"
+ "lddqu\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point arithmetic
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "negv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (neg:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_expand_fp_absneg_operator (NEG, V4SFmode, operands); DONE;")
+
+(define_expand "absv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (abs:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_expand_fp_absneg_operator (ABS, V4SFmode, operands); DONE;")
+
+(define_expand "addv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (plus:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (PLUS, V4SFmode, operands);")
+
+(define_insn "*addv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (plus:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && ix86_binary_operator_ok (PLUS, V4SFmode, operands)"
+ "addps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmaddv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (plus:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE && ix86_binary_operator_ok (PLUS, V4SFmode, operands)"
+ "addss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "SF")])
+
+(define_expand "subv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (MINUS, V4SFmode, operands);")
+
+(define_insn "*subv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE"
+ "subps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmsubv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE"
+ "subss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "SF")])
+
+(define_expand "mulv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (mult:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (MULT, V4SFmode, operands);")
+
+(define_insn "*mulv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (mult:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && ix86_binary_operator_ok (MULT, V4SFmode, operands)"
+ "mulps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemul")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmmulv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (mult:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE && ix86_binary_operator_ok (MULT, V4SFmode, operands)"
+ "mulss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemul")
+ (set_attr "mode" "SF")])
+
+(define_expand "divv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (div:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (DIV, V4SFmode, operands);")
+
+(define_insn "*divv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (div:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE"
+ "divps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssediv")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmdivv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (div:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE"
+ "divss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssediv")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_rcpv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF
+ [(match_operand:V4SF 1 "nonimmediate_operand" "xm")] UNSPEC_RCP))]
+ "TARGET_SSE"
+ "rcpps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmrcpv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_RCP)
+ (match_operand:V4SF 2 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE"
+ "rcpss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_rsqrtv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF
+ [(match_operand:V4SF 1 "nonimmediate_operand" "xm")] UNSPEC_RSQRT))]
+ "TARGET_SSE"
+ "rsqrtps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmrsqrtv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_RSQRT)
+ (match_operand:V4SF 2 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE"
+ "rsqrtss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "SF")])
+
+(define_insn "sqrtv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (sqrt:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE"
+ "sqrtps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmsqrtv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (sqrt:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "xm"))
+ (match_operand:V4SF 2 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE"
+ "sqrtss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "SF")])
+
+;; ??? For !flag_finite_math_only, the representation with SMIN/SMAX
+;; isn't really correct, as those rtl operators aren't defined when
+;; applied to NaNs. Hopefully the optimizers won't get too smart on us.
+
+(define_expand "smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (smax:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+{
+ if (!flag_finite_math_only)
+ operands[1] = force_reg (V4SFmode, operands[1]);
+ ix86_fixup_binary_operands_no_copy (SMAX, V4SFmode, operands);
+})
+
+(define_insn "*smaxv4sf3_finite"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (smax:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && flag_finite_math_only
+ && ix86_binary_operator_ok (SMAX, V4SFmode, operands)"
+ "maxps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (smax:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE"
+ "maxps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmsmaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (smax:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE"
+ "maxss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "SF")])
+
+(define_expand "sminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (smin:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+{
+ if (!flag_finite_math_only)
+ operands[1] = force_reg (V4SFmode, operands[1]);
+ ix86_fixup_binary_operands_no_copy (SMIN, V4SFmode, operands);
+})
+
+(define_insn "*sminv4sf3_finite"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (smin:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && flag_finite_math_only
+ && ix86_binary_operator_ok (SMIN, V4SFmode, operands)"
+ "minps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*sminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (smin:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE"
+ "minps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmsminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (smin:V4SF (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE"
+ "minss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "SF")])
+
+;; These versions of the min/max patterns implement exactly the operations
+;; min = (op1 < op2 ? op1 : op2)
+;; max = (!(op1 < op2) ? op1 : op2)
+;; Their operands are not commutative, and thus they may be used in the
+;; presence of -0.0 and NaN.
+
+(define_insn "*ieee_sminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MIN))]
+ "TARGET_SSE"
+ "minps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*ieee_smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MAX))]
+ "TARGET_SSE"
+ "maxps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*ieee_sminv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MIN))]
+ "TARGET_SSE2"
+ "minpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "*ieee_smaxv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (unspec:V2DF [(match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_IEEE_MAX))]
+ "TARGET_SSE2"
+ "maxpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse3_addsubv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (plus:V4SF
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (minus:V4SF (match_dup 1) (match_dup 2))
+ (const_int 5)))]
+ "TARGET_SSE3"
+ "addsubps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse3_haddv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_concat:V4SF
+ (vec_concat:V2SF
+ (plus:SF
+ (vec_select:SF
+ (match_operand:V4SF 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
+ (plus:SF
+ (vec_select:SF (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:SF (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2SF
+ (plus:SF
+ (vec_select:SF
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
+ (plus:SF
+ (vec_select:SF (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSE3"
+ "haddps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse3_hsubv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_concat:V4SF
+ (vec_concat:V2SF
+ (minus:SF
+ (vec_select:SF
+ (match_operand:V4SF 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
+ (minus:SF
+ (vec_select:SF (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:SF (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2SF
+ (minus:SF
+ (vec_select:SF
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
+ (minus:SF
+ (vec_select:SF (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSE3"
+ "hsubps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V4SF")])
+
+(define_expand "reduc_splus_v4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")]
+ "TARGET_SSE"
+{
+ if (TARGET_SSE3)
+ {
+ rtx tmp = gen_reg_rtx (V4SFmode);
+ emit_insn (gen_sse3_haddv4sf3 (tmp, operands[1], operands[1]));
+ emit_insn (gen_sse3_haddv4sf3 (operands[0], tmp, tmp));
+ }
+ else
+ ix86_expand_reduc_v4sf (gen_addv4sf3, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "reduc_smax_v4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_reduc_v4sf (gen_smaxv4sf3, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "reduc_smin_v4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_reduc_v4sf (gen_sminv4sf3, operands[0], operands[1]);
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point comparisons
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse_maskcmpv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (match_operator:V4SF 3 "sse_comparison_operator"
+ [(match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")]))]
+ "TARGET_SSE"
+ "cmp%D3ps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_vmmaskcmpv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (match_operator:V4SF 3 "sse_comparison_operator"
+ [(match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "register_operand" "x")])
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE"
+ "cmp%D3ss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_comi"
+ [(set (reg:CCFP FLAGS_REG)
+ (compare:CCFP
+ (vec_select:SF
+ (match_operand:V4SF 0 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE"
+ "comiss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecomi")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_ucomi"
+ [(set (reg:CCFPU FLAGS_REG)
+ (compare:CCFPU
+ (vec_select:SF
+ (match_operand:V4SF 0 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE"
+ "ucomiss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecomi")
+ (set_attr "mode" "SF")])
+
+(define_expand "vcondv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (if_then_else:V4SF
+ (match_operator 3 ""
+ [(match_operand:V4SF 4 "nonimmediate_operand" "")
+ (match_operand:V4SF 5 "nonimmediate_operand" "")])
+ (match_operand:V4SF 1 "general_operand" "")
+ (match_operand:V4SF 2 "general_operand" "")))]
+ "TARGET_SSE"
+{
+ if (ix86_expand_fp_vcond (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point logical operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "andv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (and:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (AND, V4SFmode, operands);")
+
+(define_insn "*andv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (and:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && ix86_binary_operator_ok (AND, V4SFmode, operands)"
+ "andps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_nandv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (and:V4SF (not:V4SF (match_operand:V4SF 1 "register_operand" "0"))
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE"
+ "andnps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_expand "iorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (ior:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (IOR, V4SFmode, operands);")
+
+(define_insn "*iorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (ior:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && ix86_binary_operator_ok (IOR, V4SFmode, operands)"
+ "orps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_expand "xorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (xor:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+ "ix86_fixup_binary_operands_no_copy (XOR, V4SFmode, operands);")
+
+(define_insn "*xorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (xor:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE && ix86_binary_operator_ok (XOR, V4SFmode, operands)"
+ "xorps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+;; Also define scalar versions. These are used for abs, neg, and
+;; conditional move. Using subregs into vector modes causes register
+;; allocation lossage. These patterns do not allow memory operands
+;; because the native instructions read the full 128-bits.
+
+(define_insn "*andsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (and:SF (match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "register_operand" "x")))]
+ "TARGET_SSE"
+ "andps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*nandsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (and:SF (not:SF (match_operand:SF 1 "register_operand" "0"))
+ (match_operand:SF 2 "register_operand" "x")))]
+ "TARGET_SSE"
+ "andnps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*iorsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (ior:SF (match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "register_operand" "x")))]
+ "TARGET_SSE"
+ "orps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*xorsf3"
+ [(set (match_operand:SF 0 "register_operand" "=x")
+ (xor:SF (match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "register_operand" "x")))]
+ "TARGET_SSE"
+ "xorps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point conversion operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse_cvtpi2ps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (float:V2SF (match_operand:V2SI 2 "nonimmediate_operand" "ym")))
+ (match_operand:V4SF 1 "register_operand" "0")
+ (const_int 3)))]
+ "TARGET_SSE"
+ "cvtpi2ps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_cvtps2pi"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_select:V2SI
+ (unspec:V4SI [(match_operand:V4SF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_FIX_NOTRUNC)
+ (parallel [(const_int 0) (const_int 1)])))]
+ "TARGET_SSE"
+ "cvtps2pi\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "unit" "mmx")
+ (set_attr "mode" "DI")])
+
+(define_insn "sse_cvttps2pi"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_select:V2SI
+ (fix:V4SI (match_operand:V4SF 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 1)])))]
+ "TARGET_SSE"
+ "cvttps2pi\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "unit" "mmx")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_cvtsi2ss"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (float:SF (match_operand:SI 2 "nonimmediate_operand" "r,m")))
+ (match_operand:V4SF 1 "register_operand" "0,0")
+ (const_int 1)))]
+ "TARGET_SSE"
+ "cvtsi2ss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "vector,double")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_cvtsi2ssq"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (float:SF (match_operand:DI 2 "nonimmediate_operand" "r,rm")))
+ (match_operand:V4SF 1 "register_operand" "0,0")
+ (const_int 1)))]
+ "TARGET_SSE && TARGET_64BIT"
+ "cvtsi2ssq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "vector,double")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse_cvtss2si"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI
+ [(vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE"
+ "cvtss2si\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "double,vector")
+ (set_attr "mode" "SI")])
+
+(define_insn "sse_cvtss2siq"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (unspec:DI
+ [(vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE && TARGET_64BIT"
+ "cvtss2siq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "double,vector")
+ (set_attr "mode" "DI")])
+
+(define_insn "sse_cvttss2si"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI
+ (vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE"
+ "cvttss2si\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "double,vector")
+ (set_attr "mode" "SI")])
+
+(define_insn "sse_cvttss2siq"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (fix:DI
+ (vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE && TARGET_64BIT"
+ "cvttss2siq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "double,vector")
+ (set_attr "mode" "DI")])
+
+(define_insn "sse2_cvtdq2ps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (float:V4SF (match_operand:V4SI 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "cvtdq2ps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_cvtps2dq"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (unspec:V4SI [(match_operand:V4SF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE2"
+ "cvtps2dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_cvttps2dq"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (fix:V4SI (match_operand:V4SF 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "cvttps2dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel single-precision floating point element swizzling
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse_movhlps"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,m")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "nonimmediate_operand" " 0,0,0")
+ (match_operand:V4SF 2 "nonimmediate_operand" " x,o,x"))
+ (parallel [(const_int 6)
+ (const_int 7)
+ (const_int 2)
+ (const_int 3)])))]
+ "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "@
+ movhlps\t{%2, %0|%0, %2}
+ movlps\t{%H2, %0|%0, %H2}
+ movhps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V4SF,V2SF,V2SF")])
+
+; APPLE LOCAL begin radar 4099352
+(define_insn "sse_movlhps"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,x,o")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "nonimmediate_operand" " 0,0,0,0")
+ /* APPLE LOCAL mainline */
+ (match_operand:V4SF 2 "nonimmediate_or_0_operand" " C,x,m,x"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 4)
+ (const_int 5)])))]
+ "TARGET_SSE && ix86_binary_operator_ok (UNKNOWN, V4SFmode, operands)"
+ "@
+ xorps\t%0, %0
+ movlhps\t{%2, %0|%0, %2}
+ movhps\t{%2, %0|%0, %2}
+ movlps\t{%2, %H0|%H0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V4SF,V4SF,V2SF,V2SF")])
+; APPLE LOCAL end radar 4099352
+
+(define_insn "sse_unpckhps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 2) (const_int 6)
+ (const_int 3) (const_int 7)])))]
+ "TARGET_SSE"
+ "unpckhps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_unpcklps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 1) (const_int 5)])))]
+ "TARGET_SSE"
+ "unpcklps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+;; These are modeled with the same vec_concat as the others so that we
+;; capture users of shufps that can use the new instructions
+(define_insn "sse3_movshdup"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "xm")
+ (match_dup 1))
+ (parallel [(const_int 1)
+ (const_int 1)
+ (const_int 7)
+ (const_int 7)])))]
+ "TARGET_SSE3"
+ "movshdup\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse3_movsldup"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "xm")
+ (match_dup 1))
+ (parallel [(const_int 0)
+ (const_int 0)
+ (const_int 6)
+ (const_int 6)])))]
+ "TARGET_SSE3"
+ "movsldup\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V4SF")])
+
+(define_expand "sse_shufps"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ int mask = INTVAL (operands[3]);
+ emit_insn (gen_sse_shufps_1 (operands[0], operands[1], operands[2],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT (((mask >> 4) & 3) + 4),
+ GEN_INT (((mask >> 6) & 3) + 4)));
+ DONE;
+})
+
+(define_insn "sse_shufps_1"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_select:V4SF
+ (vec_concat:V8SF
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm"))
+ (parallel [(match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_4_to_7_operand" "")
+ (match_operand 6 "const_4_to_7_operand" "")])))]
+ "TARGET_SSE"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[3]) << 0;
+ mask |= INTVAL (operands[4]) << 2;
+ mask |= (INTVAL (operands[5]) - 4) << 4;
+ mask |= (INTVAL (operands[6]) - 4) << 6;
+ operands[3] = GEN_INT (mask);
+
+ return "shufps\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse_storehps"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,x,o")
+ (parallel [(const_int 2) (const_int 3)])))]
+ "TARGET_SSE"
+ "@
+ movhps\t{%1, %0|%0, %1}
+ movhlps\t{%1, %0|%0, %1}
+ movlps\t{%H1, %0|%0, %H1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
+(define_insn "sse_loadhps"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,o")
+ (vec_concat:V4SF
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "0,0,0")
+ (parallel [(const_int 0) (const_int 1)]))
+ (match_operand:V2SF 2 "nonimmediate_operand" "m,x,x")))]
+ "TARGET_SSE"
+ "@
+ movhps\t{%2, %0|%0, %2}
+ movlhps\t{%2, %0|%0, %2}
+ movlps\t{%2, %H0|%H0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
+(define_insn "sse_storelps"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,x,m")
+ (parallel [(const_int 0) (const_int 1)])))]
+ "TARGET_SSE"
+ "@
+ movlps\t{%1, %0|%0, %1}
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
+(define_insn "sse_loadlps"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,m")
+ (vec_concat:V4SF
+ (match_operand:V2SF 2 "nonimmediate_operand" "0,m,x")
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,0,0")
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_SSE"
+ "@
+ shufps\t{$0xe4, %1, %0|%0, %1, 0xe4}
+ movlps\t{%2, %0|%0, %2}
+ movlps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "mode" "V4SF,V2SF,V2SF")])
+
+(define_insn "sse_movss"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (match_operand:V4SF 2 "register_operand" "x")
+ (match_operand:V4SF 1 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE"
+ "movss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "SF")])
+
+(define_insn "*vec_dupv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_duplicate:V4SF
+ (match_operand:SF 1 "register_operand" "0")))]
+ "TARGET_SSE"
+ "shufps\t{$0, %0, %0|%0, %0, 0}"
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "V4SF")])
+
+;; ??? In theory we can match memory for the MMX alternative, but allowing
+;; nonimmediate_operand for operand 2 and *not* allowing memory for the SSE
+;; alternatives pretty much forces the MMX alternative to be chosen.
+(define_insn "*sse_concatv2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=x,x,*y,*y")
+ (vec_concat:V2SF
+ (match_operand:SF 1 "nonimmediate_operand" " 0,m, 0, m")
+ (match_operand:SF 2 "reg_or_0_operand" " x,C,*y, C")))]
+ "TARGET_SSE"
+ "@
+ unpcklps\t{%2, %0|%0, %2}
+ movss\t{%1, %0|%0, %1}
+ punpckldq\t{%2, %0|%0, %2}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog,ssemov,mmxcvt,mmxmov")
+ (set_attr "mode" "V4SF,SF,DI,DI")])
+
+(define_insn "*sse_concatv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (vec_concat:V4SF
+ (match_operand:V2SF 1 "register_operand" " 0,0")
+ (match_operand:V2SF 2 "nonimmediate_operand" " x,m")))]
+ "TARGET_SSE"
+ "@
+ movlhps\t{%2, %0|%0, %2}
+ movhps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V4SF,V2SF")])
+
+(define_expand "vec_initv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "*vec_setv4sf_0"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,Yt,m")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (match_operand:SF 2 "general_operand" " x,m,*r,x*rfF"))
+ /* APPLE LOCAL mainline */
+ (match_operand:V4SF 1 "nonimmediate_or_0_operand" " 0,C,C ,0")
+ (const_int 1)))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE"
+ "@
+ movss\t{%2, %0|%0, %2}
+ movss\t{%2, %0|%0, %2}
+ movd\t{%2, %0|%0, %2}
+ #"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "SF")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; A subset is vec_setv4sf.
+(define_insn "*vec_setv4sf_sse4_1"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (match_operand:SF 2 "nonimmediate_operand" "xm"))
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:SI 3 "const_pow2_1_to_8_operand" "n")))]
+ "TARGET_SSE4_1"
+{
+ operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])) << 4);
+ return "insertps\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse4_1_insertps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF [(match_operand:V4SF 2 "register_operand" "x")
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")]
+ UNSPEC_INSERTPS))]
+ "TARGET_SSE4_1"
+ "insertps\t{%3, %2, %0|%0, %2, %3}";
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_split
+ [(set (match_operand:V4SF 0 "memory_operand" "")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (match_operand:SF 1 "nonmemory_operand" ""))
+ (match_dup 0)
+ (const_int 1)))]
+ "TARGET_SSE && reload_completed"
+ [(const_int 0)]
+{
+ emit_move_insn (adjust_address (operands[0], SFmode, 0), operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_insn_and_split "*vec_extractv4sf_0"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=x,m,fr")
+ (vec_select:SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "xm,x,m")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx op1 = operands[1];
+ if (REG_P (op1))
+ op1 = gen_rtx_REG (SFmode, REGNO (op1));
+ else
+ op1 = gen_lowpart (SFmode, op1);
+ emit_move_insn (operands[0], op1);
+ DONE;
+})
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "*sse4_1_extractps"
+ [(set (match_operand:SF 0 "register_operand" "=rm")
+ (vec_select:SF
+ (match_operand:V4SF 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_3_operand" "n")])))]
+ "TARGET_SSE4_1"
+ "extractps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_expand "vec_extractv4sf"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel double-precision floating point arithmetic
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "negv2df2"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (neg:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_expand_fp_absneg_operator (NEG, V2DFmode, operands); DONE;")
+
+(define_expand "absv2df2"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (abs:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_expand_fp_absneg_operator (ABS, V2DFmode, operands); DONE;")
+
+(define_expand "addv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (plus:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (PLUS, V2DFmode, operands);")
+
+(define_insn "*addv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (plus:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (PLUS, V2DFmode, operands)"
+ "addpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmaddv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (plus:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (PLUS, V4SFmode, operands)"
+ "addsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+(define_expand "subv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (minus:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (MINUS, V2DFmode, operands);")
+
+(define_insn "*subv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (minus:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "subpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmsubv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (minus:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "subsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+(define_expand "mulv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (mult:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V2DFmode, operands);")
+
+(define_insn "*mulv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (mult:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V2DFmode, operands)"
+ "mulpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemul")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmmulv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (mult:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V2DFmode, operands)"
+ "mulsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemul")
+ (set_attr "mode" "DF")])
+
+(define_expand "divv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (div:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (DIV, V2DFmode, operands);")
+
+(define_insn "*divv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (div:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "divpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssediv")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmdivv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (div:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "divsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssediv")
+ (set_attr "mode" "DF")])
+
+(define_insn "sqrtv2df2"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (sqrt:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "sqrtpd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmsqrtv2df2"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (sqrt:V2DF (match_operand:V2DF 1 "register_operand" "xm"))
+ (match_operand:V2DF 2 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "sqrtsd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sse")
+ (set_attr "mode" "DF")])
+
+;; ??? For !flag_finite_math_only, the representation with SMIN/SMAX
+;; isn't really correct, as those rtl operators aren't defined when
+;; applied to NaNs. Hopefully the optimizers won't get too smart on us.
+
+(define_expand "smaxv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (smax:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (!flag_finite_math_only)
+ operands[1] = force_reg (V2DFmode, operands[1]);
+ ix86_fixup_binary_operands_no_copy (SMAX, V2DFmode, operands);
+})
+
+(define_insn "*smaxv2df3_finite"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (smax:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && flag_finite_math_only
+ && ix86_binary_operator_ok (SMAX, V2DFmode, operands)"
+ "maxpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "*smaxv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (smax:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "maxpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmsmaxv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (smax:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "maxsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+(define_expand "sminv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (smin:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (!flag_finite_math_only)
+ operands[1] = force_reg (V2DFmode, operands[1]);
+ ix86_fixup_binary_operands_no_copy (SMIN, V2DFmode, operands);
+})
+
+(define_insn "*sminv2df3_finite"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (smin:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && flag_finite_math_only
+ && ix86_binary_operator_ok (SMIN, V2DFmode, operands)"
+ "minpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "*sminv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (smin:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "minpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmsminv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (smin:V2DF (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "minsd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "DF")])
+
+(define_insn "sse3_addsubv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (plus:V2DF
+ (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (minus:V2DF (match_dup 1) (match_dup 2))
+ (const_int 1)))]
+ "TARGET_SSE3"
+ "addsubpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse3_haddv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_concat:V2DF
+ (plus:DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
+ (plus:DF
+ (vec_select:DF
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))]
+ "TARGET_SSE3"
+ "haddpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse3_hsubv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_concat:V2DF
+ (minus:DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
+ (minus:DF
+ (vec_select:DF
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))]
+ "TARGET_SSE3"
+ "hsubpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseadd")
+ (set_attr "mode" "V2DF")])
+
+(define_expand "reduc_splus_v2df"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand:V2DF 1 "register_operand" "")]
+ "TARGET_SSE3"
+{
+ emit_insn (gen_sse3_haddv2df3 (operands[0], operands[1], operands[1]));
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel double-precision floating point comparisons
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse2_maskcmpv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (match_operator:V2DF 3 "sse_comparison_operator"
+ [(match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")]))]
+ "TARGET_SSE2"
+ "cmp%D3pd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_vmmaskcmpv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (match_operator:V2DF 3 "sse_comparison_operator"
+ [(match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")])
+ (match_dup 1)
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "cmp%D3sd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "DF")])
+
+(define_insn "sse2_comi"
+ [(set (reg:CCFP FLAGS_REG)
+ (compare:CCFP
+ (vec_select:DF
+ (match_operand:V2DF 0 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE2"
+ "comisd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecomi")
+ (set_attr "mode" "DF")])
+
+(define_insn "sse2_ucomi"
+ [(set (reg:CCFPU FLAGS_REG)
+ (compare:CCFPU
+ (vec_select:DF
+ (match_operand:V2DF 0 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE2"
+ "ucomisd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecomi")
+ (set_attr "mode" "DF")])
+
+(define_expand "vcondv2df"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (if_then_else:V2DF
+ (match_operator 3 ""
+ [(match_operand:V2DF 4 "nonimmediate_operand" "")
+ (match_operand:V2DF 5 "nonimmediate_operand" "")])
+ (match_operand:V2DF 1 "general_operand" "")
+ (match_operand:V2DF 2 "general_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (ix86_expand_fp_vcond (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel double-precision floating point logical operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "andv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (and:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (AND, V2DFmode, operands);")
+
+(define_insn "*andv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (and:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (AND, V2DFmode, operands)"
+ "andpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_nandv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (and:V2DF (not:V2DF (match_operand:V2DF 1 "register_operand" "0"))
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "andnpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_expand "iorv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (ior:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (IOR, V2DFmode, operands);")
+
+(define_insn "*iorv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (ior:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (IOR, V2DFmode, operands)"
+ "orpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_expand "xorv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (xor:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (XOR, V2DFmode, operands);")
+
+(define_insn "*xorv2df3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (xor:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (XOR, V2DFmode, operands)"
+ "xorpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+;; Also define scalar versions. These are used for abs, neg, and
+;; conditional move. Using subregs into vector modes causes register
+;; allocation lossage. These patterns do not allow memory operands
+;; because the native instructions read the full 128-bits.
+
+(define_insn "*anddf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (and:DF (match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "register_operand" "x")))]
+ "TARGET_SSE2"
+ "andpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "*nanddf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (and:DF (not:DF (match_operand:DF 1 "register_operand" "0"))
+ (match_operand:DF 2 "register_operand" "x")))]
+ "TARGET_SSE2"
+ "andnpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "*iordf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (ior:DF (match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "register_operand" "x")))]
+ "TARGET_SSE2"
+ "orpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "*xordf3"
+ [(set (match_operand:DF 0 "register_operand" "=x")
+ (xor:DF (match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "register_operand" "x")))]
+ "TARGET_SSE2"
+ "xorpd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel double-precision floating point conversion operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse2_cvtpi2pd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (float:V2DF (match_operand:V2SI 1 "nonimmediate_operand" "y,m")))]
+ "TARGET_SSE2"
+ "cvtpi2pd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "unit" "mmx,*")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_cvtpd2pi"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE2"
+ "cvtpd2pi\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "unit" "mmx")
+ (set_attr "mode" "DI")])
+
+(define_insn "sse2_cvttpd2pi"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "cvttpd2pi\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "unit" "mmx")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_cvtsi2sd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (vec_merge:V2DF
+ (vec_duplicate:V2DF
+ (float:DF (match_operand:SI 2 "nonimmediate_operand" "r,m")))
+ (match_operand:V2DF 1 "register_operand" "0,0")
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "cvtsi2sd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,direct")])
+
+(define_insn "sse2_cvtsi2sdq"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (vec_merge:V2DF
+ (vec_duplicate:V2DF
+ (float:DF (match_operand:DI 2 "nonimmediate_operand" "r,m")))
+ (match_operand:V2DF 1 "register_operand" "0,0")
+ (const_int 1)))]
+ "TARGET_SSE2 && TARGET_64BIT"
+ "cvtsi2sdq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DF")
+ (set_attr "athlon_decode" "double,direct")])
+
+(define_insn "sse2_cvtsd2si"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI
+ [(vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE2"
+ "cvtsd2si\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "double,vector")
+ (set_attr "mode" "SI")])
+
+(define_insn "sse2_cvtsd2siq"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (unspec:DI
+ [(vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_SSE2 && TARGET_64BIT"
+ "cvtsd2siq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "athlon_decode" "double,vector")
+ (set_attr "mode" "DI")])
+
+(define_insn "sse2_cvttsd2si"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE2"
+ "cvttsd2si\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "SI")
+ (set_attr "athlon_decode" "double,vector")])
+
+(define_insn "sse2_cvttsd2siq"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (fix:DI
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,m")
+ (parallel [(const_int 0)]))))]
+ "TARGET_SSE2 && TARGET_64BIT"
+ "cvttsd2siq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sseicvt")
+ (set_attr "mode" "DI")
+ (set_attr "athlon_decode" "double,vector")])
+
+(define_insn "sse2_cvtdq2pd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (float:V2DF
+ (vec_select:V2SI
+ (match_operand:V4SI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_SSE2"
+ "cvtdq2pd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+(define_expand "sse2_cvtpd2dq"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (vec_concat:V4SI
+ (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "")]
+ UNSPEC_FIX_NOTRUNC)
+ (match_dup 2)))]
+ "TARGET_SSE2"
+ "operands[2] = CONST0_RTX (V2SImode);")
+
+(define_insn "*sse2_cvtpd2dq"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_concat:V4SI
+ (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_FIX_NOTRUNC)
+ (match_operand:V2SI 2 "const0_operand" "")))]
+ "TARGET_SSE2"
+ "cvtpd2dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+(define_expand "sse2_cvttpd2dq"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (vec_concat:V4SI
+ (fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" ""))
+ (match_dup 2)))]
+ "TARGET_SSE2"
+ "operands[2] = CONST0_RTX (V2SImode);")
+
+(define_insn "*sse2_cvttpd2dq"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_concat:V4SI
+ (fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "xm"))
+ (match_operand:V2SI 2 "const0_operand" "")))]
+ "TARGET_SSE2"
+ "cvttpd2dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_cvtsd2ss"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (vec_merge:V4SF
+ (vec_duplicate:V4SF
+ (float_truncate:V2SF
+ (match_operand:V2DF 2 "nonimmediate_operand" "x,m")))
+ (match_operand:V4SF 1 "register_operand" "0,0")
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "cvtsd2ss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "athlon_decode" "vector,double")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse2_cvtss2sd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (float_extend:V2DF
+ (vec_select:V2SF
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 1)])))
+ (match_operand:V2DF 1 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "cvtss2sd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "DF")])
+
+(define_expand "sse2_cvtpd2ps"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (vec_concat:V4SF
+ (float_truncate:V2SF
+ (match_operand:V2DF 1 "nonimmediate_operand" "xm"))
+ (match_dup 2)))]
+ "TARGET_SSE2"
+ "operands[2] = CONST0_RTX (V2SFmode);")
+
+(define_insn "*sse2_cvtpd2ps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_concat:V4SF
+ (float_truncate:V2SF
+ (match_operand:V2DF 1 "nonimmediate_operand" "xm"))
+ (match_operand:V2SF 2 "const0_operand" "")))]
+ "TARGET_SSE2"
+ "cvtpd2ps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse2_cvtps2pd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (float_extend:V2DF
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_SSE2"
+ "cvtps2pd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel double-precision floating point element swizzling
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse2_unpckhpd"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
+ (vec_select:V2DF
+ (vec_concat:V4DF
+ (match_operand:V2DF 1 "nonimmediate_operand" " 0,o,x")
+ (match_operand:V2DF 2 "nonimmediate_operand" " x,0,0"))
+ (parallel [(const_int 1)
+ (const_int 3)])))]
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "@
+ unpckhpd\t{%2, %0|%0, %2}
+ movlpd\t{%H1, %0|%0, %H1}
+ movhpd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "mode" "V2DF,V1DF,V1DF")])
+
+(define_insn "*sse3_movddup"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,o")
+ (vec_select:V2DF
+ (vec_concat:V4DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "xm,x")
+ (match_dup 1))
+ (parallel [(const_int 0)
+ (const_int 2)])))]
+ "TARGET_SSE3 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movddup\t{%1, %0|%0, %1}
+ #"
+ [(set_attr "type" "sselog1,ssemov")
+ (set_attr "mode" "V2DF")])
+
+(define_split
+ [(set (match_operand:V2DF 0 "memory_operand" "")
+ (vec_select:V2DF
+ (vec_concat:V4DF
+ (match_operand:V2DF 1 "register_operand" "")
+ (match_dup 1))
+ (parallel [(const_int 0)
+ (const_int 2)])))]
+ "TARGET_SSE3 && reload_completed"
+ [(const_int 0)]
+{
+ rtx low = gen_rtx_REG (DFmode, REGNO (operands[1]));
+ emit_move_insn (adjust_address (operands[0], DFmode, 0), low);
+ emit_move_insn (adjust_address (operands[0], DFmode, 8), low);
+ DONE;
+})
+
+(define_insn "sse2_unpcklpd"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,o")
+ (vec_select:V2DF
+ (vec_concat:V4DF
+ (match_operand:V2DF 1 "nonimmediate_operand" " 0,0,0")
+ (match_operand:V2DF 2 "nonimmediate_operand" " x,m,x"))
+ (parallel [(const_int 0)
+ (const_int 2)])))]
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "@
+ unpcklpd\t{%2, %0|%0, %2}
+ movhpd\t{%2, %0|%0, %2}
+ movlpd\t{%2, %H0|%H0, %2}"
+ [(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "mode" "V2DF,V1DF,V1DF")])
+
+(define_expand "sse2_shufpd"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ "TARGET_SSE2"
+{
+ int mask = INTVAL (operands[3]);
+ emit_insn (gen_sse2_shufpd_1 (operands[0], operands[1], operands[2],
+ GEN_INT (mask & 1),
+ GEN_INT (mask & 2 ? 3 : 2)));
+ DONE;
+})
+
+(define_insn "sse2_shufpd_1"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_select:V2DF
+ (vec_concat:V4DF
+ (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm"))
+ (parallel [(match_operand 3 "const_0_to_1_operand" "")
+ (match_operand 4 "const_2_to_3_operand" "")])))]
+ "TARGET_SSE2"
+{
+ int mask;
+ mask = INTVAL (operands[3]);
+ mask |= (INTVAL (operands[4]) - 2) << 1;
+ operands[3] = GEN_INT (mask);
+
+ return "shufpd\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_storehpd"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x*fr")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" " x,0,o")
+ (parallel [(const_int 1)])))]
+ "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movhpd\t{%1, %0|%0, %1}
+ unpckhpd\t%0, %0
+ #"
+ [(set_attr "type" "ssemov,sselog1,ssemov")
+ (set_attr "mode" "V1DF,V2DF,DF")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (vec_select:DF
+ (match_operand:V2DF 1 "memory_operand" "")
+ (parallel [(const_int 1)])))]
+ "TARGET_SSE2 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[1] = adjust_address (operands[1], DFmode, 8);
+})
+
+(define_insn "sse2_storelpd"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x*fr")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" " x,x,m")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movlpd\t{%1, %0|%0, %1}
+ #
+ #"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V1DF,DF,DF")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE2 && reload_completed"
+ [(const_int 0)]
+{
+ rtx op1 = operands[1];
+ if (REG_P (op1))
+ op1 = gen_rtx_REG (DFmode, REGNO (op1));
+ else
+ op1 = gen_lowpart (DFmode, op1);
+ emit_move_insn (operands[0], op1);
+ DONE;
+})
+
+(define_insn "sse2_loadhpd"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,x,o")
+ (vec_concat:V2DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" " 0,0,x,0")
+ (parallel [(const_int 0)]))
+ (match_operand:DF 2 "nonimmediate_operand" " m,x,0,x*fr")))]
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "@
+ movhpd\t{%2, %0|%0, %2}
+ unpcklpd\t{%2, %0|%0, %2}
+ shufpd\t{$1, %1, %0|%0, %1, 1}
+ #"
+ [(set_attr "type" "ssemov,sselog,sselog,other")
+ (set_attr "mode" "V1DF,V2DF,V2DF,DF")])
+
+(define_split
+ [(set (match_operand:V2DF 0 "memory_operand" "")
+ (vec_concat:V2DF
+ (vec_select:DF (match_dup 0) (parallel [(const_int 0)]))
+ (match_operand:DF 1 "register_operand" "")))]
+ "TARGET_SSE2 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[0] = adjust_address (operands[0], DFmode, 8);
+})
+
+(define_insn "sse2_loadlpd"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,x,x,x,m")
+ (vec_concat:V2DF
+ (match_operand:DF 2 "nonimmediate_operand" " m,m,x,0,0,x*fr")
+ (vec_select:DF
+ /* APPLE LOCAL mainline */
+ (match_operand:V2DF 1 "nonimmediate_or_0_operand" " C,0,0,x,o,0")
+ (parallel [(const_int 1)]))))]
+ "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))"
+ "@
+ movsd\t{%2, %0|%0, %2}
+ movlpd\t{%2, %0|%0, %2}
+ movsd\t{%2, %0|%0, %2}
+ shufpd\t{$2, %2, %0|%0, %2, 2}
+ movhpd\t{%H1, %0|%0, %H1}
+ #"
+ [(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,other")
+ (set_attr "mode" "DF,V1DF,V1DF,V2DF,V1DF,DF")])
+
+(define_split
+ [(set (match_operand:V2DF 0 "memory_operand" "")
+ (vec_concat:V2DF
+ (match_operand:DF 1 "register_operand" "")
+ (vec_select:DF (match_dup 0) (parallel [(const_int 1)]))))]
+ "TARGET_SSE2 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[0] = adjust_address (operands[0], DFmode, 8);
+})
+
+;; Not sure these two are ever used, but it doesn't hurt to have
+;; them. -aoliva
+(define_insn "*vec_extractv2df_1_sse"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x,o")
+ (parallel [(const_int 1)])))]
+ "!TARGET_SSE2 && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movhps\t{%1, %0|%0, %1}
+ movhlps\t{%1, %0|%0, %1}
+ movlps\t{%H1, %0|%0, %H1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
+(define_insn "*vec_extractv2df_0_sse"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x,m")
+ (parallel [(const_int 0)])))]
+ "!TARGET_SSE2 && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movlps\t{%1, %0|%0, %1}
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
+(define_insn "sse2_movsd"
+ [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m,x,x,o")
+ (vec_merge:V2DF
+ (match_operand:V2DF 2 "nonimmediate_operand" " x,m,x,0,0,0")
+ (match_operand:V2DF 1 "nonimmediate_operand" " 0,0,0,x,o,x")
+ (const_int 1)))]
+ "TARGET_SSE2"
+ "@
+ movsd\t{%2, %0|%0, %2}
+ movlpd\t{%2, %0|%0, %2}
+ movlpd\t{%2, %0|%0, %2}
+ shufpd\t{$2, %2, %0|%0, %2, 2}
+ movhps\t{%H1, %0|%0, %H1}
+ movhps\t{%1, %H0|%H0, %1}"
+ [(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov")
+ (set_attr "mode" "DF,V1DF,V1DF,V2DF,V1DF,V1DF")])
+
+(define_insn "*vec_dupv2df_sse3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_duplicate:V2DF
+ (match_operand:DF 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE3"
+ "movddup\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "DF")])
+
+(define_insn "*vec_dupv2df"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_duplicate:V2DF
+ (match_operand:DF 1 "register_operand" "0")))]
+ "TARGET_SSE2"
+ "unpcklpd\t%0, %0"
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "*vec_concatv2df_sse3"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_concat:V2DF
+ (match_operand:DF 1 "nonimmediate_operand" "xm")
+ (match_dup 1)))]
+ "TARGET_SSE3"
+ "movddup\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "DF")])
+
+(define_insn "*vec_concatv2df"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V2DF 0 "register_operand" "=Yt,Yt,Yt,x,x")
+ (vec_concat:V2DF
+ (match_operand:DF 1 "nonimmediate_operand" " 0 ,0 ,m ,0,0")
+ /* APPLE LOCAL mainline */
+ (match_operand:DF 2 "nonimmediate_or_0_operand" " Yt,m,C,x,m")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE"
+ "@
+ unpcklpd\t{%2, %0|%0, %2}
+ movhpd\t{%2, %0|%0, %2}
+ movsd\t{%1, %0|%0, %1}
+ movlhps\t{%2, %0|%0, %2}
+ movhps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog,ssemov,ssemov,ssemov,ssemov")
+ (set_attr "mode" "V2DF,V1DF,DF,V4SF,V2SF")])
+
+(define_expand "vec_setv2df"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv2df"
+ [(match_operand:DF 0 "register_operand" "")
+ (match_operand:V2DF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv2df"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral arithmetic
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "neg<mode>2"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (minus:SSEMODEI
+ (match_dup 2)
+ (match_operand:SSEMODEI 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "operands[2] = force_reg (<MODE>mode, CONST0_RTX (<MODE>mode));")
+
+(define_expand "add<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (plus:SSEMODEI (match_operand:SSEMODEI 1 "nonimmediate_operand" "")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (PLUS, <MODE>mode, operands);")
+
+(define_insn "*add<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "=x")
+ (plus:SSEMODEI
+ (match_operand:SSEMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (PLUS, <MODE>mode, operands)"
+ "padd<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_ssadd<mode>3"
+ [(set (match_operand:SSEMODE12 0 "register_operand" "=x")
+ (ss_plus:SSEMODE12
+ (match_operand:SSEMODE12 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE12 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (SS_PLUS, <MODE>mode, operands)"
+ "padds<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_usadd<mode>3"
+ [(set (match_operand:SSEMODE12 0 "register_operand" "=x")
+ (us_plus:SSEMODE12
+ (match_operand:SSEMODE12 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE12 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (US_PLUS, <MODE>mode, operands)"
+ "paddus<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (minus:SSEMODEI (match_operand:SSEMODEI 1 "register_operand" "")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (MINUS, <MODE>mode, operands);")
+
+(define_insn "*sub<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "=x")
+ (minus:SSEMODEI
+ (match_operand:SSEMODEI 1 "register_operand" "0")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "psub<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_sssub<mode>3"
+ [(set (match_operand:SSEMODE12 0 "register_operand" "=x")
+ (ss_minus:SSEMODE12
+ (match_operand:SSEMODE12 1 "register_operand" "0")
+ (match_operand:SSEMODE12 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "psubs<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_ussub<mode>3"
+ [(set (match_operand:SSEMODE12 0 "register_operand" "=x")
+ (us_minus:SSEMODE12
+ (match_operand:SSEMODE12 1 "register_operand" "0")
+ (match_operand:SSEMODE12 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "psubus<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "mulv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (mult:V16QI (match_operand:V16QI 1 "register_operand" "")
+ (match_operand:V16QI 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ rtx t[12], op0;
+ int i;
+
+ for (i = 0; i < 12; ++i)
+ t[i] = gen_reg_rtx (V16QImode);
+
+ /* Unpack data such that we've got a source byte in each low byte of
+ each word. We don't care what goes into the high byte of each word.
+ Rather than trying to get zero in there, most convenient is to let
+ it be a copy of the low byte. */
+ emit_insn (gen_sse2_punpckhbw (t[0], operands[1], operands[1]));
+ emit_insn (gen_sse2_punpckhbw (t[1], operands[2], operands[2]));
+ emit_insn (gen_sse2_punpcklbw (t[2], operands[1], operands[1]));
+ emit_insn (gen_sse2_punpcklbw (t[3], operands[2], operands[2]));
+
+ /* Multiply words. The end-of-line annotations here give a picture of what
+ the output of that instruction looks like. Dot means don't care; the
+ letters are the bytes of the result with A being the most significant. */
+ emit_insn (gen_mulv8hi3 (gen_lowpart (V8HImode, t[4]), /* .A.B.C.D.E.F.G.H */
+ gen_lowpart (V8HImode, t[0]),
+ gen_lowpart (V8HImode, t[1])));
+ emit_insn (gen_mulv8hi3 (gen_lowpart (V8HImode, t[5]), /* .I.J.K.L.M.N.O.P */
+ gen_lowpart (V8HImode, t[2]),
+ gen_lowpart (V8HImode, t[3])));
+
+ /* Extract the relevant bytes and merge them back together. */
+ emit_insn (gen_sse2_punpckhbw (t[6], t[5], t[4])); /* ..AI..BJ..CK..DL */
+ emit_insn (gen_sse2_punpcklbw (t[7], t[5], t[4])); /* ..EM..FN..GO..HP */
+ emit_insn (gen_sse2_punpckhbw (t[8], t[7], t[6])); /* ....AEIM....BFJN */
+ emit_insn (gen_sse2_punpcklbw (t[9], t[7], t[6])); /* ....CGKO....DHLP */
+ emit_insn (gen_sse2_punpckhbw (t[10], t[9], t[8])); /* ........ACEGIKMO */
+ emit_insn (gen_sse2_punpcklbw (t[11], t[9], t[8])); /* ........BDFHJLNP */
+
+ op0 = operands[0];
+ emit_insn (gen_sse2_punpcklbw (op0, t[11], t[10])); /* ABCDEFGHIJKLMNOP */
+ DONE;
+})
+
+(define_expand "mulv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (match_operand:V8HI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);")
+
+(define_insn "*mulv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+ "pmullw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_expand "smulv8hi3_highpart"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (truncate:V8HI
+ (lshiftrt:V8SI
+ (mult:V8SI
+ (sign_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" ""))
+ (sign_extend:V8SI
+ (match_operand:V8HI 2 "nonimmediate_operand" "")))
+ (const_int 16))))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);")
+
+(define_insn "*smulv8hi3_highpart"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (truncate:V8HI
+ (lshiftrt:V8SI
+ (mult:V8SI
+ (sign_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:V8SI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))
+ (const_int 16))))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+ "pmulhw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix_data16" "1")
+ (set_attr "mode" "TI")])
+
+(define_expand "umulv8hi3_highpart"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (truncate:V8HI
+ (lshiftrt:V8SI
+ (mult:V8SI
+ (zero_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" ""))
+ (zero_extend:V8SI
+ (match_operand:V8HI 2 "nonimmediate_operand" "")))
+ (const_int 16))))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);")
+
+(define_insn "*umulv8hi3_highpart"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (truncate:V8HI
+ (lshiftrt:V8SI
+ (mult:V8SI
+ (zero_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:V8SI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))
+ (const_int 16))))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+ "pmulhuw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix_data16" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_umulv2siv2di3"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (mult:V2DI
+ (zero_extend:V2DI
+ (vec_select:V2SI
+ (match_operand:V4SI 1 "nonimmediate_operand" "%0")
+ (parallel [(const_int 0) (const_int 2)])))
+ (zero_extend:V2DI
+ (vec_select:V2SI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 2)])))))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V4SImode, operands)"
+ "pmuludq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix_data16" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_mulv2siv2di3"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (mult:V2DI
+ (sign_extend:V2DI
+ (vec_select:V2SI
+ (match_operand:V4SI 1 "nonimmediate_operand" "%0")
+ (parallel [(const_int 0) (const_int 2)])))
+ (sign_extend:V2DI
+ (vec_select:V2SI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 2)])))))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, V4SImode, operands)"
+ "pmuldq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+(define_insn "sse2_pmaddwd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (plus:V4SI
+ (mult:V4SI
+ (sign_extend:V4SI
+ (vec_select:V4HI
+ (match_operand:V8HI 1 "nonimmediate_operand" "%0")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)])))
+ (sign_extend:V4SI
+ (vec_select:V4HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)]))))
+ (mult:V4SI
+ (sign_extend:V4SI
+ (vec_select:V4HI (match_dup 1)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)])))
+ (sign_extend:V4SI
+ (vec_select:V4HI (match_dup 2)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)]))))))]
+ "TARGET_SSE2"
+ "pmaddwd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "mulv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (mult:V4SI (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ rtx t1, t2, t3, t4, t5, t6, thirtytwo;
+ rtx op0, op1, op2;
+
+ op0 = operands[0];
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V4SImode);
+ t2 = gen_reg_rtx (V4SImode);
+ t3 = gen_reg_rtx (V4SImode);
+ t4 = gen_reg_rtx (V4SImode);
+ t5 = gen_reg_rtx (V4SImode);
+ t6 = gen_reg_rtx (V4SImode);
+ thirtytwo = GEN_INT (32);
+
+ /* Multiply elements 2 and 0. */
+ emit_insn (gen_sse2_umulv2siv2di3 (gen_lowpart (V2DImode, t1), op1, op2));
+
+ /* Shift both input vectors down one element, so that elements 3 and 1
+ are now in the slots for elements 2 and 0. For K8, at least, this is
+ faster than using a shuffle. */
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, t2),
+ gen_lowpart (TImode, op1), thirtytwo));
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, t3),
+ gen_lowpart (TImode, op2), thirtytwo));
+
+ /* Multiply elements 3 and 1. */
+ emit_insn (gen_sse2_umulv2siv2di3 (gen_lowpart (V2DImode, t4), t2, t3));
+
+ /* Move the results in element 2 down to element 1; we don't care what
+ goes in elements 2 and 3. */
+ emit_insn (gen_sse2_pshufd_1 (t5, t1, const0_rtx, const2_rtx,
+ const0_rtx, const0_rtx));
+ emit_insn (gen_sse2_pshufd_1 (t6, t4, const0_rtx, const2_rtx,
+ const0_rtx, const0_rtx));
+
+ /* Merge the parts back together. */
+ emit_insn (gen_sse2_punpckldq (op0, t5, t6));
+ DONE;
+})
+
+(define_expand "mulv2di3"
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (mult:V2DI (match_operand:V2DI 1 "register_operand" "")
+ (match_operand:V2DI 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ rtx t1, t2, t3, t4, t5, t6, thirtytwo;
+ rtx op0, op1, op2;
+
+ op0 = operands[0];
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V2DImode);
+ t2 = gen_reg_rtx (V2DImode);
+ t3 = gen_reg_rtx (V2DImode);
+ t4 = gen_reg_rtx (V2DImode);
+ t5 = gen_reg_rtx (V2DImode);
+ t6 = gen_reg_rtx (V2DImode);
+ thirtytwo = GEN_INT (32);
+
+ /* Multiply low parts. */
+ emit_insn (gen_sse2_umulv2siv2di3 (t1, gen_lowpart (V4SImode, op1),
+ gen_lowpart (V4SImode, op2)));
+
+ /* Shift input vectors left 32 bits so we can multiply high parts. */
+ emit_insn (gen_lshrv2di3 (t2, op1, thirtytwo));
+ emit_insn (gen_lshrv2di3 (t3, op2, thirtytwo));
+
+ /* Multiply high parts by low parts. */
+ emit_insn (gen_sse2_umulv2siv2di3 (t4, gen_lowpart (V4SImode, op1),
+ gen_lowpart (V4SImode, t3)));
+ emit_insn (gen_sse2_umulv2siv2di3 (t5, gen_lowpart (V4SImode, op2),
+ gen_lowpart (V4SImode, t2)));
+
+ /* Shift them back. */
+ emit_insn (gen_ashlv2di3 (t4, t4, thirtytwo));
+ emit_insn (gen_ashlv2di3 (t5, t5, thirtytwo));
+
+ /* Add the three parts together. */
+ emit_insn (gen_addv2di3 (t6, t1, t4));
+ emit_insn (gen_addv2di3 (op0, t6, t5));
+ DONE;
+})
+
+(define_expand "sdot_prodv8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (match_operand:V8HI 2 "nonimmediate_operand" "")
+ (match_operand:V4SI 3 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx t = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_pmaddwd (t, operands[1], operands[2]));
+ emit_insn (gen_addv4si3 (operands[0], operands[3], t));
+ DONE;
+})
+
+(define_expand "udot_prodv4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")
+ (match_operand:V2DI 3 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx t1, t2, t3, t4;
+
+ t1 = gen_reg_rtx (V2DImode);
+ emit_insn (gen_sse2_umulv2siv2di3 (t1, operands[1], operands[2]));
+ emit_insn (gen_addv2di3 (t1, t1, operands[3]));
+
+ t2 = gen_reg_rtx (V4SImode);
+ t3 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, t2),
+ gen_lowpart (TImode, operands[1]),
+ GEN_INT (32)));
+ emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, t3),
+ gen_lowpart (TImode, operands[2]),
+ GEN_INT (32)));
+
+ t4 = gen_reg_rtx (V2DImode);
+ emit_insn (gen_sse2_umulv2siv2di3 (t4, t2, t3));
+
+ emit_insn (gen_addv2di3 (operands[0], t1, t4));
+ DONE;
+})
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "*sse4_1_mulv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, V4SImode, operands)"
+ "pmulld\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_expand "vec_widen_smult_hi_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand:V8HI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2, dest;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V8HImode);
+ t2 = gen_reg_rtx (V8HImode);
+ dest = gen_lowpart (V8HImode, operands[0]);
+
+ emit_insn (gen_mulv8hi3 (t1, op1, op2));
+ emit_insn (gen_smulv8hi3_highpart (t2, op1, op2));
+ emit_insn (gen_vec_interleave_highv8hi (dest, t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_smult_lo_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand:V8HI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2, dest;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V8HImode);
+ t2 = gen_reg_rtx (V8HImode);
+ dest = gen_lowpart (V8HImode, operands[0]);
+
+ emit_insn (gen_mulv8hi3 (t1, op1, op2));
+ emit_insn (gen_smulv8hi3_highpart (t2, op1, op2));
+ emit_insn (gen_vec_interleave_lowv8hi (dest, t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_umult_hi_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand:V8HI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2, dest;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V8HImode);
+ t2 = gen_reg_rtx (V8HImode);
+ dest = gen_lowpart (V8HImode, operands[0]);
+
+ emit_insn (gen_mulv8hi3 (t1, op1, op2));
+ emit_insn (gen_umulv8hi3_highpart (t2, op1, op2));
+ emit_insn (gen_vec_interleave_highv8hi (dest, t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_umult_lo_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand:V8HI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2, dest;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V8HImode);
+ t2 = gen_reg_rtx (V8HImode);
+ dest = gen_lowpart (V8HImode, operands[0]);
+
+ emit_insn (gen_mulv8hi3 (t1, op1, op2));
+ emit_insn (gen_umulv8hi3_highpart (t2, op1, op2));
+ emit_insn (gen_vec_interleave_lowv8hi (dest, t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_smult_hi_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V4SImode);
+ t2 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_interleave_highv4si (t1, op1, op1));
+ emit_insn (gen_vec_interleave_highv4si (t2, op2, op2));
+ emit_insn (gen_sse2_umulv2siv2di3 (operands[0], t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_smult_lo_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V4SImode);
+ t2 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_interleave_lowv4si (t1, op1, op1));
+ emit_insn (gen_vec_interleave_lowv4si (t2, op2, op2));
+ emit_insn (gen_sse2_umulv2siv2di3 (operands[0], t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_umult_hi_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V4SImode);
+ t2 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_interleave_highv4si (t1, op1, op1));
+ emit_insn (gen_vec_interleave_highv4si (t2, op2, op2));
+ emit_insn (gen_sse2_umulv2siv2di3 (operands[0], t1, t2));
+ DONE;
+})
+
+(define_expand "vec_widen_umult_lo_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx op1, op2, t1, t2;
+
+ op1 = operands[1];
+ op2 = operands[2];
+ t1 = gen_reg_rtx (V4SImode);
+ t2 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_interleave_lowv4si (t1, op1, op1));
+ emit_insn (gen_vec_interleave_lowv4si (t2, op2, op2));
+ emit_insn (gen_sse2_umulv2siv2di3 (operands[0], t1, t2));
+ DONE;
+})
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_insn "ashr<mode>3"
+ [(set (match_operand:SSEMODE24 0 "register_operand" "=x")
+ (ashiftrt:SSEMODE24
+ (match_operand:SSEMODE24 1 "register_operand" "0")
+ (match_operand:TI 2 "nonmemory_operand" "xn")))]
+ "TARGET_SSE2"
+ "psra<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseishft")
+ (set_attr "mode" "TI")])
+
+(define_insn "lshr<mode>3"
+ [(set (match_operand:SSEMODE248 0 "register_operand" "=x")
+ (lshiftrt:SSEMODE248
+ (match_operand:SSEMODE248 1 "register_operand" "0")
+ (match_operand:TI 2 "nonmemory_operand" "xn")))]
+ "TARGET_SSE2"
+ "psrl<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseishft")
+ (set_attr "mode" "TI")])
+
+(define_insn "ashl<mode>3"
+ [(set (match_operand:SSEMODE248 0 "register_operand" "=x")
+ (ashift:SSEMODE248
+ (match_operand:SSEMODE248 1 "register_operand" "0")
+ (match_operand:TI 2 "nonmemory_operand" "xn")))]
+ "TARGET_SSE2"
+ "psll<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseishft")
+ (set_attr "mode" "TI")])
+
+/* APPLE LOCAL begin 6440204 */
+/* Moved to i386.md. */
+/* APPLE LOCAL end 6440204 */
+
+(define_expand "vec_shl_<mode>"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (ashift:TI (match_operand:SSEMODEI 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (!const_0_to_255_mul_8_operand (operands[2], SImode))
+ FAIL;
+ operands[0] = gen_lowpart (TImode, operands[0]);
+ operands[1] = gen_lowpart (TImode, operands[1]);
+})
+
+;; APPLE LOCAL begin mainline 5951842
+;; moved sse2_lshrti3 to i386.md
+;; APPLE LOCAL end mainline 5951842
+
+(define_expand "vec_shr_<mode>"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (lshiftrt:TI (match_operand:SSEMODEI 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (!const_0_to_255_mul_8_operand (operands[2], SImode))
+ FAIL;
+ operands[0] = gen_lowpart (TImode, operands[0]);
+ operands[1] = gen_lowpart (TImode, operands[1]);
+})
+
+(define_expand "umaxv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (umax:V16QI (match_operand:V16QI 1 "nonimmediate_operand" "")
+ (match_operand:V16QI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (UMAX, V16QImode, operands);")
+
+(define_insn "*umaxv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (umax:V16QI (match_operand:V16QI 1 "nonimmediate_operand" "%0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (UMAX, V16QImode, operands)"
+ "pmaxub\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "smaxv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (smax:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (match_operand:V8HI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (SMAX, V8HImode, operands);")
+
+(define_insn "*smaxv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (smax:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (SMAX, V8HImode, operands)"
+ "pmaxsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "umaxv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (us_minus:V8HI (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))
+ (set (match_dup 3)
+ (plus:V8HI (match_dup 0) (match_dup 2)))]
+ "TARGET_SSE2"
+{
+ operands[3] = operands[0];
+ if (rtx_equal_p (operands[0], operands[2]))
+ operands[0] = gen_reg_rtx (V8HImode);
+})
+
+(define_expand "smax<mode>3"
+ [(set (match_operand:SSEMODE14 0 "register_operand" "")
+ (smax:SSEMODE14 (match_operand:SSEMODE14 1 "register_operand" "")
+ (match_operand:SSEMODE14 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ rtx xops[6];
+ bool ok;
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
+ xops[4] = operands[1];
+ xops[5] = operands[2];
+ ok = ix86_expand_int_vcond (xops);
+ gcc_assert (ok);
+ DONE;
+})
+
+(define_expand "umaxv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (umax:V4SI (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ if (TARGET_SSE4_1)
+ ix86_fixup_binary_operands_no_copy (UMAX, V4SImode, operands);
+ else
+ {
+ rtx xops[6];
+ bool ok;
+
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
+ xops[4] = operands[1];
+ xops[5] = operands[2];
+ ok = ix86_expand_int_vcond (xops);
+ gcc_assert (ok);
+ DONE;
+ }
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+})
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "sse4_1_smax<mode>3"
+ [(set (match_operand:SSEMODE14 0 "register_operand" "=x")
+ (smax:SSEMODE14
+ (match_operand:SSEMODE14 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE14 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (SMAX, <MODE>mode, operands)"
+ "pmaxs<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_umax<mode>3"
+ [(set (match_operand:SSEMODE24 0 "register_operand" "=x")
+ (umax:SSEMODE24
+ (match_operand:SSEMODE24 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE24 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (UMAX, <MODE>mode, operands)"
+ "pmaxu<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_expand "uminv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (umin:V16QI (match_operand:V16QI 1 "nonimmediate_operand" "")
+ (match_operand:V16QI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (UMIN, V16QImode, operands);")
+
+(define_insn "*uminv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (umin:V16QI (match_operand:V16QI 1 "nonimmediate_operand" "%0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (UMIN, V16QImode, operands)"
+ "pminub\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "sminv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (smin:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (match_operand:V8HI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (SMIN, V8HImode, operands);")
+
+(define_insn "*sminv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (smin:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (SMIN, V8HImode, operands)"
+ "pminsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_expand "smin<mode>3"
+ [(set (match_operand:SSEMODE14 0 "register_operand" "")
+ (smin:SSEMODE14 (match_operand:SSEMODE14 1 "register_operand" "")
+ (match_operand:SSEMODE14 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ rtx xops[6];
+ bool ok;
+
+ xops[0] = operands[0];
+ xops[1] = operands[2];
+ xops[2] = operands[1];
+ xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
+ xops[4] = operands[1];
+ xops[5] = operands[2];
+ ok = ix86_expand_int_vcond (xops);
+ gcc_assert (ok);
+ DONE;
+})
+
+(define_expand "umin<mode>3"
+ [(set (match_operand:SSEMODE24 0 "register_operand" "")
+ (umin:SSEMODE24 (match_operand:SSEMODE24 1 "register_operand" "")
+ (match_operand:SSEMODE24 2 "register_operand" "")))]
+ "TARGET_SSE2"
+{
+ /* APPLE LOCAL begin 5612787 mainline sse4 */
+ if (TARGET_SSE4_1)
+ ix86_fixup_binary_operands_no_copy (UMIN, <MODE>mode, operands);
+ else
+ {
+ rtx xops[6];
+ bool ok;
+
+ xops[0] = operands[0];
+ xops[1] = operands[2];
+ xops[2] = operands[1];
+ xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
+ xops[4] = operands[1];
+ xops[5] = operands[2];
+ ok = ix86_expand_int_vcond (xops);
+ gcc_assert (ok);
+ DONE;
+ }
+ /* APPLE LOCAL end 5612787 mainline sse4 */
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral comparisons
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse2_eq<mode>3"
+ [(set (match_operand:SSEMODE124 0 "register_operand" "=x")
+ (eq:SSEMODE124
+ (match_operand:SSEMODE124 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE124 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ "pcmpeq<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "sse4_1_eqv2di3"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (eq:V2DI
+ (match_operand:V2DI 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (EQ, V2DImode, operands)"
+ "pcmpeqq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_insn "sse2_gt<mode>3"
+ [(set (match_operand:SSEMODE124 0 "register_operand" "=x")
+ (gt:SSEMODE124
+ (match_operand:SSEMODE124 1 "register_operand" "0")
+ (match_operand:SSEMODE124 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "pcmpgt<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "sse4_2_gtv2di3"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (gt:V2DI
+ (match_operand:V2DI 1 "nonimmediate_operand" "0")
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_2"
+ "pcmpgtq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_expand "vcond<mode>"
+ [(set (match_operand:SSEMODE124 0 "register_operand" "")
+ (if_then_else:SSEMODE124
+ (match_operator 3 ""
+ [(match_operand:SSEMODE124 4 "nonimmediate_operand" "")
+ (match_operand:SSEMODE124 5 "nonimmediate_operand" "")])
+ (match_operand:SSEMODE124 1 "general_operand" "")
+ (match_operand:SSEMODE124 2 "general_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (ix86_expand_int_vcond (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "vcondu<mode>"
+ [(set (match_operand:SSEMODE124 0 "register_operand" "")
+ (if_then_else:SSEMODE124
+ (match_operator 3 ""
+ [(match_operand:SSEMODE124 4 "nonimmediate_operand" "")
+ (match_operand:SSEMODE124 5 "nonimmediate_operand" "")])
+ (match_operand:SSEMODE124 1 "general_operand" "")
+ (match_operand:SSEMODE124 2 "general_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (ix86_expand_int_vcond (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral logical operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "one_cmpl<mode>2"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (xor:SSEMODEI (match_operand:SSEMODEI 1 "nonimmediate_operand" "")
+ (match_dup 2)))]
+ "TARGET_SSE2"
+{
+ int i, n = GET_MODE_NUNITS (<MODE>mode);
+ rtvec v = rtvec_alloc (n);
+
+ for (i = 0; i < n; ++i)
+ RTVEC_ELT (v, i) = constm1_rtx;
+
+ operands[2] = force_reg (<MODE>mode, gen_rtx_CONST_VECTOR (<MODE>mode, v));
+})
+
+(define_expand "and<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (and:SSEMODEI (match_operand:SSEMODEI 1 "nonimmediate_operand" "")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (AND, <MODE>mode, operands);")
+
+(define_insn "*and<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "=x")
+ (and:SSEMODEI
+ (match_operand:SSEMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (AND, <MODE>mode, operands)"
+ "pand\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_nand<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "=x")
+ (and:SSEMODEI
+ (not:SSEMODEI (match_operand:SSEMODEI 1 "register_operand" "0"))
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2"
+ "pandn\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_expand "ior<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (ior:SSEMODEI (match_operand:SSEMODEI 1 "nonimmediate_operand" "")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (IOR, <MODE>mode, operands);")
+
+(define_insn "*ior<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "=x")
+ (ior:SSEMODEI
+ (match_operand:SSEMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (IOR, <MODE>mode, operands)"
+ "por\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_expand "xor<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "")
+ (xor:SSEMODEI (match_operand:SSEMODEI 1 "nonimmediate_operand" "")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (XOR, <MODE>mode, operands);")
+
+(define_insn "*xor<mode>3"
+ [(set (match_operand:SSEMODEI 0 "register_operand" "=x")
+ (xor:SSEMODEI
+ (match_operand:SSEMODEI 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODEI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (XOR, <MODE>mode, operands)"
+ "pxor\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "sse4_1_smin<mode>3"
+ [(set (match_operand:SSEMODE14 0 "register_operand" "=x")
+ (smin:SSEMODE14
+ (match_operand:SSEMODE14 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE14 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (SMIN, <MODE>mode, operands)"
+ "pmins<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_umin<mode>3"
+ [(set (match_operand:SSEMODE24 0 "register_operand" "=x")
+ (umin:SSEMODE24
+ (match_operand:SSEMODE24 1 "nonimmediate_operand" "%0")
+ (match_operand:SSEMODE24 2 "nonimmediate_operand" "xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (UMIN, <MODE>mode, operands)"
+ "pminu<ssevecsize>\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_expand "vec_interleave_highv16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_select:V16QI
+ (vec_concat:V32QI
+ (match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 8) (const_int 24)
+ (const_int 9) (const_int 25)
+ (const_int 10) (const_int 26)
+ (const_int 11) (const_int 27)
+ (const_int 12) (const_int 28)
+ (const_int 13) (const_int 29)
+ (const_int 14) (const_int 30)
+ (const_int 15) (const_int 31)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpckhbw (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_lowv16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_select:V16QI
+ (vec_concat:V32QI
+ (match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 16)
+ (const_int 1) (const_int 17)
+ (const_int 2) (const_int 18)
+ (const_int 3) (const_int 19)
+ (const_int 4) (const_int 20)
+ (const_int 5) (const_int 21)
+ (const_int 6) (const_int 22)
+ (const_int 7) (const_int 23)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpcklbw (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_highv8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_select:V8HI
+ (vec_concat:V16HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 4) (const_int 12)
+ (const_int 5) (const_int 13)
+ (const_int 6) (const_int 14)
+ (const_int 7) (const_int 15)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpckhwd (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_lowv8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_select:V8HI
+ (vec_concat:V16HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 8)
+ (const_int 1) (const_int 9)
+ (const_int 2) (const_int 10)
+ (const_int 3) (const_int 11)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpcklwd (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_highv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_select:V4SI
+ (vec_concat:V8SI
+ (match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 2) (const_int 6)
+ (const_int 3) (const_int 7)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpckhdq (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_lowv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_select:V4SI
+ (vec_concat:V8SI
+ (match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 1) (const_int 5)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpckldq (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_highv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_select:V2DI
+ (vec_concat:V4DI
+ (match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 1)
+ (const_int 3)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpckhqdq (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "vec_interleave_lowv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_select:V2DI
+ (vec_concat:V4DI
+ (match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 2)])))]
+ "TARGET_SSE2"
+{
+ emit_insn (gen_sse2_punpcklqdq (operands[0], operands[1], operands[2]));
+ DONE;
+})
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Parallel integral element swizzling
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse2_packsswb"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_concat:V16QI
+ (ss_truncate:V8QI
+ (match_operand:V8HI 1 "register_operand" "0"))
+ (ss_truncate:V8QI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm"))))]
+ "TARGET_SSE2"
+ "packsswb\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_packssdw"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_concat:V8HI
+ (ss_truncate:V4HI
+ (match_operand:V4SI 1 "register_operand" "0"))
+ (ss_truncate:V4HI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm"))))]
+ "TARGET_SSE2"
+ "packssdw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_packuswb"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_concat:V16QI
+ (us_truncate:V8QI
+ (match_operand:V8HI 1 "register_operand" "0"))
+ (us_truncate:V8QI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm"))))]
+ "TARGET_SSE2"
+ "packuswb\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpckhbw"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_select:V16QI
+ (vec_concat:V32QI
+ (match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 8) (const_int 24)
+ (const_int 9) (const_int 25)
+ (const_int 10) (const_int 26)
+ (const_int 11) (const_int 27)
+ (const_int 12) (const_int 28)
+ (const_int 13) (const_int 29)
+ (const_int 14) (const_int 30)
+ (const_int 15) (const_int 31)])))]
+ "TARGET_SSE2"
+ "punpckhbw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpcklbw"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_select:V16QI
+ (vec_concat:V32QI
+ (match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 16)
+ (const_int 1) (const_int 17)
+ (const_int 2) (const_int 18)
+ (const_int 3) (const_int 19)
+ (const_int 4) (const_int 20)
+ (const_int 5) (const_int 21)
+ (const_int 6) (const_int 22)
+ (const_int 7) (const_int 23)])))]
+ "TARGET_SSE2"
+ "punpcklbw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpckhwd"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_select:V8HI
+ (vec_concat:V16HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 4) (const_int 12)
+ (const_int 5) (const_int 13)
+ (const_int 6) (const_int 14)
+ (const_int 7) (const_int 15)])))]
+ "TARGET_SSE2"
+ "punpckhwd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpcklwd"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_select:V8HI
+ (vec_concat:V16HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 8)
+ (const_int 1) (const_int 9)
+ (const_int 2) (const_int 10)
+ (const_int 3) (const_int 11)])))]
+ "TARGET_SSE2"
+ "punpcklwd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpckhdq"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_select:V4SI
+ (vec_concat:V8SI
+ (match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 2) (const_int 6)
+ (const_int 3) (const_int 7)])))]
+ "TARGET_SSE2"
+ "punpckhdq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpckldq"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_select:V4SI
+ (vec_concat:V8SI
+ (match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 1) (const_int 5)])))]
+ "TARGET_SSE2"
+ "punpckldq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpckhqdq"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_select:V2DI
+ (vec_concat:V4DI
+ (match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 1)
+ (const_int 3)])))]
+ "TARGET_SSE2"
+ "punpckhqdq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_punpcklqdq"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_select:V2DI
+ (vec_concat:V4DI
+ (match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 2)])))]
+ "TARGET_SSE2"
+ "punpcklqdq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "*sse4_1_pinsrb"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (vec_merge:V16QI
+ (vec_duplicate:V16QI
+ (match_operand:QI 2 "nonimmediate_operand" "rm"))
+ (match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:SI 3 "const_pow2_1_to_32768_operand" "n")))]
+ "TARGET_SSE4_1"
+{
+ operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])));
+ return "pinsrb\t{%3, %k2, %0|%0, %k2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_expand "sse2_pinsrw"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (vec_merge:V8HI
+ (vec_duplicate:V8HI
+ (match_operand:SI 2 "nonimmediate_operand" ""))
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand:SI 3 "const_0_to_7_operand" "")))]
+ "TARGET_SSE2"
+{
+ operands[2] = gen_lowpart (HImode, operands[2]);
+ operands[3] = GEN_INT ((1 << INTVAL (operands[3])));
+})
+
+(define_insn "*sse2_pinsrw"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_merge:V8HI
+ (vec_duplicate:V8HI
+ (match_operand:HI 2 "nonimmediate_operand" "rm"))
+ (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:SI 3 "const_pow2_1_to_128_operand" "n")))]
+ "TARGET_SSE2"
+{
+ operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])));
+ return "pinsrw\t{%3, %k2, %0|%0, %k2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;; It must come before sse2_loadld since it is preferred.
+(define_insn "*sse4_1_pinsrd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_merge:V4SI
+ (vec_duplicate:V4SI
+ (match_operand:SI 2 "nonimmediate_operand" "rm"))
+ (match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:SI 3 "const_pow2_1_to_8_operand" "n")))]
+ "TARGET_SSE4_1"
+{
+ operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])));
+ return "pinsrd\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_pinsrq"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_merge:V2DI
+ (vec_duplicate:V2DI
+ (match_operand:DI 2 "nonimmediate_operand" "rm"))
+ (match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:SI 3 "const_pow2_1_to_2_operand" "n")))]
+ "TARGET_SSE4_1"
+{
+ operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])));
+ return "pinsrq\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_pextrb"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (vec_select:QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_15_operand" "n")]))))]
+ "TARGET_SSE4_1"
+ "pextrb\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_pextrb_memory"
+ [(set (match_operand:QI 0 "memory_operand" "=m")
+ (vec_select:QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_15_operand" "n")])))]
+ "TARGET_SSE4_1"
+ "pextrb\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_insn "sse2_pextrw"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (vec_select:HI
+ (match_operand:V8HI 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_7_operand" "n")]))))]
+ "TARGET_SSE2"
+ "pextrw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "*sse4_1_pextrw_memory"
+ [(set (match_operand:HI 0 "memory_operand" "=m")
+ (vec_select:HI
+ (match_operand:V8HI 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_7_operand" "n")])))]
+ "TARGET_SSE4_1"
+ "pextrw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_pextrd"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (vec_select:SI
+ (match_operand:V4SI 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_3_operand" "n")])))]
+ "TARGET_SSE4_1"
+ "pextrd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+;; It must come before *vec_extractv2di_1_sse since it is preferred.
+(define_insn "*sse4_1_pextrq"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (vec_select:DI
+ (match_operand:V2DI 1 "register_operand" "x")
+ (parallel [(match_operand:SI 2 "const_0_to_1_operand" "n")])))]
+ "TARGET_SSE4_1 && TARGET_64BIT"
+ "pextrq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_expand "sse2_pshufd"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V4SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")]
+ "TARGET_SSE2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_sse2_pshufd_1 (operands[0], operands[1],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT ((mask >> 4) & 3),
+ GEN_INT ((mask >> 6) & 3)));
+ DONE;
+})
+
+(define_insn "sse2_pshufd_1"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_select:V4SI
+ (match_operand:V4SI 1 "nonimmediate_operand" "xm")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "")
+ (match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_0_to_3_operand" "")])))]
+ "TARGET_SSE2"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[2]) << 0;
+ mask |= INTVAL (operands[3]) << 2;
+ mask |= INTVAL (operands[4]) << 4;
+ mask |= INTVAL (operands[5]) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "pshufd\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "TI")])
+
+(define_expand "sse2_pshuflw"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")]
+ "TARGET_SSE2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_sse2_pshuflw_1 (operands[0], operands[1],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT ((mask >> 4) & 3),
+ GEN_INT ((mask >> 6) & 3)));
+ DONE;
+})
+
+(define_insn "sse2_pshuflw_1"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_select:V8HI
+ (match_operand:V8HI 1 "nonimmediate_operand" "xm")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "")
+ (match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_0_to_3_operand" "")
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)])))]
+ "TARGET_SSE2"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[2]) << 0;
+ mask |= INTVAL (operands[3]) << 2;
+ mask |= INTVAL (operands[4]) << 4;
+ mask |= INTVAL (operands[5]) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "pshuflw\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_expand "sse2_pshufhw"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")]
+ "TARGET_SSE2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_sse2_pshufhw_1 (operands[0], operands[1],
+ GEN_INT (((mask >> 0) & 3) + 4),
+ GEN_INT (((mask >> 2) & 3) + 4),
+ GEN_INT (((mask >> 4) & 3) + 4),
+ GEN_INT (((mask >> 6) & 3) + 4)));
+ DONE;
+})
+
+(define_insn "sse2_pshufhw_1"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_select:V8HI
+ (match_operand:V8HI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (match_operand 2 "const_4_to_7_operand" "")
+ (match_operand 3 "const_4_to_7_operand" "")
+ (match_operand 4 "const_4_to_7_operand" "")
+ (match_operand 5 "const_4_to_7_operand" "")])))]
+ "TARGET_SSE2"
+{
+ int mask = 0;
+ mask |= (INTVAL (operands[2]) - 4) << 0;
+ mask |= (INTVAL (operands[3]) - 4) << 2;
+ mask |= (INTVAL (operands[4]) - 4) << 4;
+ mask |= (INTVAL (operands[5]) - 4) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "pshufhw\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "mode" "TI")])
+
+(define_expand "sse2_loadd"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (vec_merge:V4SI
+ (vec_duplicate:V4SI
+ (match_operand:SI 1 "nonimmediate_operand" ""))
+ (match_dup 2)
+ (const_int 1)))]
+ "TARGET_SSE"
+ "operands[2] = CONST0_RTX (V4SImode);")
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn "sse2_loadld"
+ [(set (match_operand:V4SI 0 "register_operand" "=Yt,Yi,x,x")
+ (vec_merge:V4SI
+ (vec_duplicate:V4SI
+ (match_operand:SI 2 "nonimmediate_operand" "m ,r ,m,x"))
+ (match_operand:V4SI 1 "reg_or_0_operand" "C ,C ,C,0")
+ (const_int 1)))]
+ "TARGET_SSE"
+ "@
+ movd\t{%2, %0|%0, %2}
+ movd\t{%2, %0|%0, %2}
+ movss\t{%2, %0|%0, %2}
+ movss\t{%2, %0|%0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "TI,TI,V4SF,SF")])
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;; ??? The hardware supports more, but TARGET_INTER_UNIT_MOVES must
+;; be taken into account, and movdi isn't fully populated even without.
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_insn_and_split "sse2_stored"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=mx,r")
+ (vec_select:SI
+ (match_operand:V4SI 1 "register_operand" "x,Yi")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE"
+ "#"
+ "&& reload_completed
+ && (TARGET_INTER_UNIT_MOVES
+ || MEM_P (operands [0])
+ || !GENERAL_REGNO_P (true_regnum (operands [0])))"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]));
+})
+;; APPLE LOCAL end 5612787 mainline sse4
+
+(define_expand "sse_storeq"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (vec_select:DI
+ (match_operand:V2DI 1 "register_operand" "")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE"
+ "")
+
+;; ??? The hardware supports more, but TARGET_INTER_UNIT_MOVES must
+;; be taken into account, and movdi isn't fully populated even without.
+(define_insn "*sse2_storeq"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=mx")
+ (vec_select:DI
+ (match_operand:V2DI 1 "register_operand" "x")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE"
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (vec_select:DI
+ (match_operand:V2DI 1 "register_operand" "")
+ (parallel [(const_int 0)])))]
+ "TARGET_SSE && reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
+})
+
+(define_insn "*vec_extractv2di_1_sse2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:DI
+ (match_operand:V2DI 1 "nonimmediate_operand" "x,0,o")
+ (parallel [(const_int 1)])))]
+ "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movhps\t{%1, %0|%0, %1}
+ psrldq\t{$8, %0|%0, 8}
+ movq\t{%H1, %0|%0, %H1}"
+ [(set_attr "type" "ssemov,sseishft,ssemov")
+ (set_attr "memory" "*,none,*")
+ (set_attr "mode" "V2SF,TI,TI")])
+
+;; Not sure this is ever used, but it doesn't hurt to have it. -aoliva
+(define_insn "*vec_extractv2di_1_sse"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:DI
+ (match_operand:V2DI 1 "nonimmediate_operand" "x,x,o")
+ (parallel [(const_int 1)])))]
+ "!TARGET_SSE2 && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movhps\t{%1, %0|%0, %1}
+ movhlps\t{%1, %0|%0, %1}
+ movlps\t{%H1, %0|%0, %H1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
+(define_insn "*vec_dupv4si"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V4SI 0 "register_operand" "=Yt,x")
+ (vec_duplicate:V4SI
+ (match_operand:SI 1 "register_operand" " Yt,0")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE"
+ "@
+ pshufd\t{$0, %1, %0|%0, %1, 0}
+ shufps\t{$0, %0, %0|%0, %0, 0}"
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "TI,V4SF")])
+
+(define_insn "*vec_dupv2di"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V2DI 0 "register_operand" "=Yt,x")
+ (vec_duplicate:V2DI
+ (match_operand:DI 1 "register_operand" " 0 ,0")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE"
+ "@
+ punpcklqdq\t%0, %0
+ movlhps\t%0, %0"
+ [(set_attr "type" "sselog1,ssemov")
+ (set_attr "mode" "TI,V4SF")])
+
+;; ??? In theory we can match memory for the MMX alternative, but allowing
+;; nonimmediate_operand for operand 2 and *not* allowing memory for the SSE
+;; alternatives pretty much forces the MMX alternative to be chosen.
+(define_insn "*sse2_concatv2si"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V2SI 0 "register_operand" "=Yt, Yt,*y,*y")
+ (vec_concat:V2SI
+ (match_operand:SI 1 "nonimmediate_operand" " 0 ,rm , 0,rm")
+ (match_operand:SI 2 "reg_or_0_operand" " Yt,C ,*y, C")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE2"
+ "@
+ punpckldq\t{%2, %0|%0, %2}
+ movd\t{%1, %0|%0, %1}
+ punpckldq\t{%2, %0|%0, %2}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog,ssemov,mmxcvt,mmxmov")
+ (set_attr "mode" "TI,TI,DI,DI")])
+
+(define_insn "*sse1_concatv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=x,x,*y,*y")
+ (vec_concat:V2SI
+ (match_operand:SI 1 "nonimmediate_operand" " 0,m, 0,*rm")
+ (match_operand:SI 2 "reg_or_0_operand" " x,C,*y,C")))]
+ "TARGET_SSE"
+ "@
+ unpcklps\t{%2, %0|%0, %2}
+ movss\t{%1, %0|%0, %1}
+ punpckldq\t{%2, %0|%0, %2}
+ movd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog,ssemov,mmxcvt,mmxmov")
+ (set_attr "mode" "V4SF,V4SF,DI,DI")])
+
+(define_insn "*vec_concatv4si_1"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V4SI 0 "register_operand" "=Yt,x,x")
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" " 0 ,0,0")
+ (match_operand:V2SI 2 "nonimmediate_operand" " Yt,x,m")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE"
+ "@
+ punpcklqdq\t{%2, %0|%0, %2}
+ movlhps\t{%2, %0|%0, %2}
+ movhps\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "mode" "TI,V4SF,V2SF")])
+
+(define_insn "*vec_concatv2di"
+;; APPLE LOCAL begin 5612787 mainline sse4
+ [(set (match_operand:V2DI 0 "register_operand" "=Yt,?Yt,Yt,x,x,x")
+ (vec_concat:V2DI
+ (match_operand:DI 1 "nonimmediate_operand" " m,*y ,0 ,0,0,m")
+ /* APPLE LOCAL mainline */
+ (match_operand:DI 2 "nonimmediate_or_0_operand" " C, C,Yt,x,m,0")))]
+;; APPLE LOCAL end 5612787 mainline sse4
+ "TARGET_SSE"
+ "@
+ movq\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ punpcklqdq\t{%2, %0|%0, %2}
+ movlhps\t{%2, %0|%0, %2}
+ movhps\t{%2, %0|%0, %2}
+ movlps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov,ssemov,sselog,ssemov,ssemov,ssemov")
+ (set_attr "mode" "TI,TI,TI,V4SF,V2SF,V2SF")])
+
+(define_expand "vec_setv2di"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv2di"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:V2DI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv2di"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv4si"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4si"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv4si"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv8hi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv8hi"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv8hi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv16qi"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv16qi"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_extract (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_initv16qi"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SSE"
+{
+ ix86_expand_vector_init (false, operands[0], operands[1]);
+ DONE;
+})
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+(define_expand "vec_unpacku_hi_v16qi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, true, true);
+ else
+ ix86_expand_sse_unpack (operands, true, true);
+ DONE;
+})
+
+(define_expand "vec_unpacks_hi_v16qi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, false, true);
+ else
+ ix86_expand_sse_unpack (operands, false, true);
+ DONE;
+})
+
+(define_expand "vec_unpacku_lo_v16qi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, true, false);
+ else
+ ix86_expand_sse_unpack (operands, true, false);
+ DONE;
+})
+
+(define_expand "vec_unpacks_lo_v16qi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, false, false);
+ else
+ ix86_expand_sse_unpack (operands, false, false);
+ DONE;
+})
+
+(define_expand "vec_unpacku_hi_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, true, true);
+ else
+ ix86_expand_sse_unpack (operands, true, true);
+ DONE;
+})
+
+(define_expand "vec_unpacks_hi_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, false, true);
+ else
+ ix86_expand_sse_unpack (operands, false, true);
+ DONE;
+})
+
+(define_expand "vec_unpacku_lo_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, true, false);
+ else
+ ix86_expand_sse_unpack (operands, true, false);
+ DONE;
+})
+
+(define_expand "vec_unpacks_lo_v8hi"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, false, false);
+ else
+ ix86_expand_sse_unpack (operands, false, false);
+ DONE;
+})
+
+(define_expand "vec_unpacku_hi_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, true, true);
+ else
+ ix86_expand_sse_unpack (operands, true, true);
+ DONE;
+})
+
+(define_expand "vec_unpacks_hi_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, false, true);
+ else
+ ix86_expand_sse_unpack (operands, false, true);
+ DONE;
+})
+
+(define_expand "vec_unpacku_lo_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, true, false);
+ else
+ ix86_expand_sse_unpack (operands, true, false);
+ DONE;
+})
+
+(define_expand "vec_unpacks_lo_v4si"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1)
+ ix86_expand_sse4_unpack (operands, false, false);
+ else
+ ix86_expand_sse_unpack (operands, false, false);
+ DONE;
+})
+;; APPLE LOCAL end 5612787 mainline sse4
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Miscellaneous
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse2_uavgv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (truncate:V16QI
+ (lshiftrt:V16HI
+ (plus:V16HI
+ (plus:V16HI
+ (zero_extend:V16HI
+ (match_operand:V16QI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:V16HI
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")))
+ (const_vector:V16QI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (PLUS, V16QImode, operands)"
+ "pavgb\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse2_uavgv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (truncate:V8HI
+ (lshiftrt:V8SI
+ (plus:V8SI
+ (plus:V8SI
+ (zero_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" "%0"))
+ (zero_extend:V8SI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))
+ (const_vector:V8HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (PLUS, V8HImode, operands)"
+ "pavgw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+;; The correct representation for this is absolutely enormous, and
+;; surely not generally useful.
+(define_insn "sse2_psadbw"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")]
+ UNSPEC_PSADBW))]
+ "TARGET_SSE2"
+ "psadbw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse_movmskps"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V4SF 1 "register_operand" "x")]
+ UNSPEC_MOVMSK))]
+ "TARGET_SSE"
+ "movmskps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse2_movmskpd"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V2DF 1 "register_operand" "x")]
+ UNSPEC_MOVMSK))]
+ "TARGET_SSE2"
+ "movmskpd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse2_pmovmskb"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V16QI 1 "register_operand" "x")]
+ UNSPEC_MOVMSK))]
+ "TARGET_SSE2"
+ "pmovmskb\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "V2DF")])
+
+(define_expand "sse2_maskmovdqu"
+ [(set (match_operand:V16QI 0 "memory_operand" "")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x")
+ (match_operand:V16QI 2 "register_operand" "x")
+ (match_dup 0)]
+ UNSPEC_MASKMOV))]
+ "TARGET_SSE2"
+ "")
+
+(define_insn "*sse2_maskmovdqu"
+ [(set (mem:V16QI (match_operand:SI 0 "register_operand" "D"))
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x")
+ (match_operand:V16QI 2 "register_operand" "x")
+ (mem:V16QI (match_dup 0))]
+ UNSPEC_MASKMOV))]
+ "TARGET_SSE2 && !TARGET_64BIT"
+ ;; @@@ check ordering of operands in intel/nonintel syntax
+ "maskmovdqu\t{%2, %1|%1, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse2_maskmovdqu_rex64"
+ [(set (mem:V16QI (match_operand:DI 0 "register_operand" "D"))
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x")
+ (match_operand:V16QI 2 "register_operand" "x")
+ (mem:V16QI (match_dup 0))]
+ UNSPEC_MASKMOV))]
+ "TARGET_SSE2 && TARGET_64BIT"
+ ;; @@@ check ordering of operands in intel/nonintel syntax
+ "maskmovdqu\t{%2, %1|%1, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse_ldmxcsr"
+ [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")]
+ UNSPECV_LDMXCSR)]
+ "TARGET_SSE"
+ "ldmxcsr\t%0"
+ [(set_attr "type" "sse")
+ (set_attr "memory" "load")])
+
+(define_insn "sse_stmxcsr"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_STMXCSR))]
+ "TARGET_SSE"
+ "stmxcsr\t%0"
+ [(set_attr "type" "sse")
+ (set_attr "memory" "store")])
+
+(define_expand "sse_sfence"
+ [(set (match_dup 0)
+ (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*sse_sfence"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
+ "TARGET_SSE || TARGET_3DNOW_A"
+ "sfence"
+ [(set_attr "type" "sse")
+ (set_attr "memory" "unknown")])
+
+(define_insn "sse2_clflush"
+ [(unspec_volatile [(match_operand 0 "address_operand" "p")]
+ UNSPECV_CLFLUSH)]
+ "TARGET_SSE2"
+ "clflush\t%a0"
+ [(set_attr "type" "sse")
+ (set_attr "memory" "unknown")])
+
+(define_expand "sse2_mfence"
+ [(set (match_dup 0)
+ (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
+ "TARGET_SSE2"
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*sse2_mfence"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
+ "TARGET_SSE2"
+ "mfence"
+ [(set_attr "type" "sse")
+ (set_attr "memory" "unknown")])
+
+(define_expand "sse2_lfence"
+ [(set (match_dup 0)
+ (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
+ "TARGET_SSE2"
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*sse2_lfence"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
+ "TARGET_SSE2"
+ "lfence"
+ [(set_attr "type" "sse")
+ (set_attr "memory" "unknown")])
+
+(define_insn "sse3_mwait"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "a")
+ (match_operand:SI 1 "register_operand" "c")]
+ UNSPECV_MWAIT)]
+ "TARGET_SSE3"
+;; 64bit version is "mwait %rax,%rcx". But only lower 32bits are used.
+;; Since 32bit register operands are implicitly zero extended to 64bit,
+;; we only need to set up 32bit registers.
+ "mwait"
+ [(set_attr "length" "3")])
+
+(define_insn "sse3_monitor"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "a")
+ (match_operand:SI 1 "register_operand" "c")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPECV_MONITOR)]
+ "TARGET_SSE3 && !TARGET_64BIT"
+ "monitor\t%0, %1, %2"
+ [(set_attr "length" "3")])
+; APPLE LOCAL begin mainline
+;; SSSE3
+(define_insn "ssse3_phaddwv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))))]
+ "TARGET_SSSE3"
+ "phaddw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_phaddwv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI
+ (match_operand:V4HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSSE3"
+ "phaddw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_phadddv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_concat:V4SI
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI
+ (match_operand:V4SI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 1)])))
+ (plus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 1)])))
+ (plus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSSE3"
+ "phaddd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_phadddv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI
+ (match_operand:V2SI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 1)])))
+ (plus:SI
+ (vec_select:SI
+ (match_operand:V2SI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 1)])))))]
+ "TARGET_SSSE3"
+ "phaddd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_phaddswv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))))]
+ "TARGET_SSSE3"
+ "phaddsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_phaddswv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI
+ (match_operand:V4HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSSE3"
+ "phaddsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_phsubwv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))))]
+ "TARGET_SSSE3"
+ "phsubw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_phsubwv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI
+ (match_operand:V4HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSSE3"
+ "phsubw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_phsubdv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (vec_concat:V4SI
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI
+ (match_operand:V4SI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 1)])))
+ (minus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 1)])))
+ (minus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSSE3"
+ "phsubd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_phsubdv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=y")
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI
+ (match_operand:V2SI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 1)])))
+ (minus:SI
+ (vec_select:SI
+ (match_operand:V2SI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 1)])))))]
+ "TARGET_SSSE3"
+ "phsubd\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_phsubswv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI
+ (match_operand:V8HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))))]
+ "TARGET_SSSE3"
+ "phsubsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_phsubswv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI
+ (match_operand:V4HI 1 "register_operand" "0")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))))]
+ "TARGET_SSSE3"
+ "phsubsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_pmaddubswv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (ss_plus:V8HI
+ (mult:V8HI
+ (zero_extend:V8HI
+ (vec_select:V4QI
+ ;; APPLE LOCAL 6025404
+ (match_operand:V16QI 1 "nonimmediate_operand" "0")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)])))
+ (sign_extend:V8HI
+ (vec_select:V8QI
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)]))))
+ (mult:V8HI
+ (zero_extend:V8HI
+ (vec_select:V16QI (match_dup 1)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)])))
+ (sign_extend:V8HI
+ (vec_select:V16QI (match_dup 2)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)]))))))]
+ "TARGET_SSSE3"
+ "pmaddubsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_pmaddubswv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (ss_plus:V4HI
+ (mult:V4HI
+ (zero_extend:V4HI
+ (vec_select:V4QI
+ ;; APPLE LOCAL 6025404
+ (match_operand:V8QI 1 "nonimmediate_operand" "0")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)])))
+ (sign_extend:V4HI
+ (vec_select:V4QI
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)]))))
+ (mult:V4HI
+ (zero_extend:V4HI
+ (vec_select:V8QI (match_dup 1)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)])))
+ (sign_extend:V4HI
+ (vec_select:V8QI (match_dup 2)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)]))))))]
+ "TARGET_SSSE3"
+ "pmaddubsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_pmulhrswv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (truncate:V8HI
+ (lshiftrt:V8SI
+ (plus:V8SI
+ (lshiftrt:V8SI
+ (mult:V8SI
+ (sign_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:V8SI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")))
+ (const_int 14))
+ (const_vector:V8HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_SSSE3 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+ "pmulhrsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_pmulhrswv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=y")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (plus:V4SI
+ (lshiftrt:V4SI
+ (mult:V4SI
+ (sign_extend:V4SI
+ (match_operand:V4HI 1 "nonimmediate_operand" "%0"))
+ (sign_extend:V4SI
+ (match_operand:V4HI 2 "nonimmediate_operand" "ym")))
+ (const_int 14))
+ (const_vector:V4HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_SSSE3 && ix86_binary_operator_ok (MULT, V4HImode, operands)"
+ "pmulhrsw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_pshufbv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")]
+ UNSPEC_PSHUFB))]
+ "TARGET_SSSE3"
+ "pshufb\t{%2, %0|%0, %2}";
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "TI")])
+
+(define_insn "ssse3_pshufbv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=y")
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "0")
+ (match_operand:V8QI 2 "nonimmediate_operand" "ym")]
+ UNSPEC_PSHUFB))]
+ "TARGET_SSSE3"
+ "pshufb\t{%2, %0|%0, %2}";
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "DI")])
+
+(define_insn "ssse3_psign<mode>3"
+ [(set (match_operand:SSEMODE124 0 "register_operand" "=x")
+ (unspec:SSEMODE124 [(match_operand:SSEMODE124 1 "register_operand" "0")
+ (match_operand:SSEMODE124 2 "nonimmediate_operand" "xm")]
+ UNSPEC_PSIGN))]
+ "TARGET_SSSE3"
+ "psign<ssevecsize>\t{%2, %0|%0, %2}";
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "ssse3_psign<mode>3"
+ [(set (match_operand:MMXMODE124 0 "register_operand" "=y")
+ (unspec:MMXMODE124 [(match_operand:MMXMODE124 1 "register_operand" "0")
+ (match_operand:MMXMODE124 2 "nonimmediate_operand" "ym")]
+ UNSPEC_PSIGN))]
+ "TARGET_SSSE3"
+ "psign<mmxvecsize>\t{%2, %0|%0, %2}";
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "ssse3_palignrti"
+ [(set (match_operand:TI 0 "register_operand" "=x")
+ (unspec:TI [(match_operand:TI 1 "register_operand" "0")
+ (match_operand:TI 2 "nonimmediate_operand" "xm")
+ (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n")]
+ UNSPEC_PALIGNR))]
+ "TARGET_SSSE3"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) / 8);
+ return "palignr\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sseishft")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "ssse3_palignrv1di"
+ [(set (match_operand:V1DI 0 "register_operand" "=y")
+ (unspec:V1DI [(match_operand:V1DI 1 "register_operand" "0")
+ (match_operand:V1DI 2 "nonimmediate_operand" "ym")
+ (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n")]
+ UNSPEC_PALIGNR))]
+ "TARGET_SSSE3"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) / 8);
+ return "palignr\t{%3, %2, %0|%0, %2, %3}";
+}
+ [(set_attr "type" "sseishft")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+
+(define_insn "ssse3_pabs<mode>2"
+ [(set (match_operand:SSEMODE124 0 "register_operand" "=x")
+ (abs:SSEMODE124 (match_operand:SSEMODE124 1 "nonimmediate_operand" "xm")))]
+ "TARGET_SSSE3"
+ "pabs<ssevecsize>\t{%1, %0|%0, %1}";
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "TI")])
+
+;; APPLE LOCAL begin 4656532 use V1DImode for _m64
+(define_insn "ssse3_pabs<mode>2"
+ [(set (match_operand:MMXMODE124 0 "register_operand" "=y")
+ (abs:MMXMODE124 (match_operand:MMXMODE124 1 "nonimmediate_operand" "ym")))]
+ "TARGET_SSSE3"
+ "pabs<mmxvecsize>\t{%1, %0|%0, %1}";
+ [(set_attr "type" "sselog1")
+ (set_attr "mode" "DI")])
+;; APPLE LOCAL end 4656532 use V1DImode for _m64
+; APPLE LOCAL end mainline
+
+(define_insn "sse3_monitor64"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "a")
+ (match_operand:SI 1 "register_operand" "c")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPECV_MONITOR)]
+ "TARGET_SSE3 && TARGET_64BIT"
+;; 64bit version is "monitor %rax,%rcx,%rdx". But only lower 32bits in
+;; RCX and RDX are used. Since 32bit register operands are implicitly
+;; zero extended to 64bit, we only need to set up 32bit registers.
+ "monitor"
+ [(set_attr "length" "3")])
+
+;; APPLE LOCAL begin 5612787 mainline sse4
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; AMD SSE4A instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse4a_vmmovntv2df"
+ [(set (match_operand:DF 0 "memory_operand" "=m")
+ (unspec:DF [(vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "x")
+ (parallel [(const_int 0)]))]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE4A"
+ "movntsd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "DF")])
+
+(define_insn "sse4a_movntdf"
+ [(set (match_operand:DF 0 "memory_operand" "=m")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE4A"
+ "movntsd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "DF")])
+
+(define_insn "sse4a_vmmovntv4sf"
+ [(set (match_operand:SF 0 "memory_operand" "=m")
+ (unspec:SF [(vec_select:SF
+ (match_operand:V4SF 1 "register_operand" "x")
+ (parallel [(const_int 0)]))]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE4A"
+ "movntss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse4a_movntsf"
+ [(set (match_operand:SF 0 "memory_operand" "=m")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE4A"
+ "movntss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "SF")])
+
+(define_insn "sse4a_extrqi"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_EXTRQI))]
+ "TARGET_SSE4A"
+ "extrq\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "sse")
+ (set_attr "prefix_data16" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4a_extrq"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "x")]
+ UNSPEC_EXTRQ))]
+ "TARGET_SSE4A"
+ "extrq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sse")
+ (set_attr "prefix_data16" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4a_insertqi"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "register_operand" "x")
+ (match_operand 3 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" "")]
+ UNSPEC_INSERTQI))]
+ "TARGET_SSE4A"
+ "insertq\t{%4, %3, %2, %0|%0, %2, %3, %4}"
+ [(set_attr "type" "sseins")
+ (set_attr "prefix_rep" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4a_insertq"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "register_operand" "x")]
+ UNSPEC_INSERTQ))]
+ "TARGET_SSE4A"
+ "insertq\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sseins")
+ (set_attr "prefix_rep" "1")
+ (set_attr "mode" "TI")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Intel SSE4.1 instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "sse4_1_blendpd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")
+ (match_operand:V2DF 1 "register_operand" "0")
+ (match_operand:SI 3 "const_0_to_3_operand" "n")))]
+ "TARGET_SSE4_1"
+ "blendpd\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse4_1_blendps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")
+ (match_operand:V4SF 1 "register_operand" "0")
+ (match_operand:SI 3 "const_0_to_15_operand" "n")))]
+ "TARGET_SSE4_1"
+ "blendps\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse4_1_blendvpd"
+ [(set (match_operand:V2DF 0 "reg_not_xmm0_operand" "=x")
+ (unspec:V2DF [(match_operand:V2DF 1 "reg_not_xmm0_operand" "0")
+ (match_operand:V2DF 2 "nonimm_not_xmm0_operand" "xm")
+ (match_operand:V2DF 3 "register_operand" "Y0")]
+ UNSPEC_BLENDV))]
+ "TARGET_SSE4_1"
+ "blendvpd\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse4_1_blendvps"
+ [(set (match_operand:V4SF 0 "reg_not_xmm0_operand" "=x")
+ (unspec:V4SF [(match_operand:V4SF 1 "reg_not_xmm0_operand" "0")
+ (match_operand:V4SF 2 "nonimm_not_xmm0_operand" "xm")
+ (match_operand:V4SF 3 "register_operand" "Y0")]
+ UNSPEC_BLENDV))]
+ "TARGET_SSE4_1"
+ "blendvps\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse4_1_dppd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (unspec:V2DF [(match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")]
+ UNSPEC_DP))]
+ "TARGET_SSE4_1"
+ "dppd\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemul")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse4_1_dpps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")]
+ UNSPEC_DP))]
+ "TARGET_SSE4_1"
+ "dpps\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemul")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse4_1_movntdqa"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (unspec:V2DI [(match_operand:V2DI 1 "memory_operand" "m")]
+ UNSPEC_MOVNTDQA))]
+ "TARGET_SSE4_1"
+ "movntdqa\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_mpsadbw"
+ [(set (match_operand:V16QI 0 "register_operand" "=x")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "nonimmediate_operand" "xm")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")]
+ UNSPEC_MPSADBW))]
+ "TARGET_SSE4_1"
+ "mpsadbw\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_packusdw"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_concat:V8HI
+ (us_truncate:V4HI
+ (match_operand:V4SI 1 "register_operand" "0"))
+ (us_truncate:V4HI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm"))))]
+ "TARGET_SSE4_1"
+ "packusdw\t{%2, %0|%0, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_pblendvb"
+ [(set (match_operand:V16QI 0 "reg_not_xmm0_operand" "=x")
+ (unspec:V16QI [(match_operand:V16QI 1 "reg_not_xmm0_operand" "0")
+ (match_operand:V16QI 2 "nonimm_not_xmm0_operand" "xm")
+ (match_operand:V16QI 3 "register_operand" "Y0")]
+ UNSPEC_BLENDV))]
+ "TARGET_SSE4_1"
+ "pblendvb\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_pblendw"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (vec_merge:V8HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm")
+ (match_operand:V8HI 1 "register_operand" "0")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")))]
+ "TARGET_SSE4_1"
+ "pblendw\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_phminposuw"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (unspec:V8HI [(match_operand:V8HI 1 "nonimmediate_operand" "xm")]
+ UNSPEC_PHMINPOSUW))]
+ "TARGET_SSE4_1"
+ "phminposuw\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_extendv8qiv8hi2"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (sign_extend:V8HI
+ (vec_select:V8QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxbw\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_extendv8qiv8hi2"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (sign_extend:V8HI
+ (vec_select:V8QI
+ (vec_duplicate:V16QI
+ (match_operand:V8QI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxbw\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_extendv4qiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (sign_extend:V4SI
+ (vec_select:V4QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxbd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_extendv4qiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (sign_extend:V4SI
+ (vec_select:V4QI
+ (vec_duplicate:V16QI
+ (match_operand:V4QI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxbd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_extendv2qiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (sign_extend:V2DI
+ (vec_select:V2QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxbq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_extendv2qiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (sign_extend:V2DI
+ (vec_select:V2QI
+ (vec_duplicate:V16QI
+ (match_operand:V2QI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxbq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_extendv4hiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (sign_extend:V4SI
+ (vec_select:V4HI
+ (match_operand:V8HI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxwd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_extendv4hiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (sign_extend:V4SI
+ (vec_select:V4HI
+ (vec_duplicate:V8HI
+ (match_operand:V2HI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxwd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_extendv2hiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (sign_extend:V2DI
+ (vec_select:V2HI
+ (match_operand:V8HI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxwq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_extendv2hiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (sign_extend:V2DI
+ (vec_select:V2HI
+ (vec_duplicate:V8HI
+ (match_operand:V8HI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxwq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_extendv2siv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (sign_extend:V2DI
+ (vec_select:V2SI
+ (match_operand:V4SI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxdq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_extendv2siv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (sign_extend:V2DI
+ (vec_select:V2SI
+ (vec_duplicate:V4SI
+ (match_operand:V2SI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovsxdq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_zero_extendv8qiv8hi2"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (zero_extend:V8HI
+ (vec_select:V8QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxbw\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_zero_extendv8qiv8hi2"
+ [(set (match_operand:V8HI 0 "register_operand" "=x")
+ (zero_extend:V8HI
+ (vec_select:V8QI
+ (vec_duplicate:V16QI
+ (match_operand:V8QI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxbw\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_zero_extendv4qiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (zero_extend:V4SI
+ (vec_select:V4QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxbd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_zero_extendv4qiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (zero_extend:V4SI
+ (vec_select:V4QI
+ (vec_duplicate:V16QI
+ (match_operand:V4QI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxbd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_zero_extendv2qiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (zero_extend:V2DI
+ (vec_select:V2QI
+ (match_operand:V16QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxbq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_zero_extendv2qiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (zero_extend:V2DI
+ (vec_select:V2QI
+ (vec_duplicate:V16QI
+ (match_operand:V2QI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxbq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_zero_extendv4hiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (zero_extend:V4SI
+ (vec_select:V4HI
+ (match_operand:V8HI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxwd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_zero_extendv4hiv4si2"
+ [(set (match_operand:V4SI 0 "register_operand" "=x")
+ (zero_extend:V4SI
+ (vec_select:V4HI
+ (vec_duplicate:V8HI
+ (match_operand:V4HI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxwd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_zero_extendv2hiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (zero_extend:V2DI
+ (vec_select:V2HI
+ (match_operand:V8HI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxwq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_zero_extendv2hiv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (zero_extend:V2DI
+ (vec_select:V2HI
+ (vec_duplicate:V8HI
+ (match_operand:V2HI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxwq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_zero_extendv2siv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (zero_extend:V2DI
+ (vec_select:V2SI
+ (match_operand:V4SI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxdq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "*sse4_1_zero_extendv2siv2di2"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (zero_extend:V2DI
+ (vec_select:V2SI
+ (vec_duplicate:V4SI
+ (match_operand:V2SI 1 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 1)]))))]
+ "TARGET_SSE4_1"
+ "pmovzxdq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+;; ptest is very similar to comiss and ucomiss when setting FLAGS_REG.
+;; But it is not a really compare instruction.
+(define_insn "sse4_1_ptest"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC [(match_operand:V2DI 0 "register_operand" "x")
+ (match_operand:V2DI 1 "nonimmediate_operand" "xm")]
+ UNSPEC_PTEST))]
+ "TARGET_SSE4_1"
+ "ptest\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecomi")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_1_roundpd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (unspec:V2DF [(match_operand:V2DF 1 "nonimmediate_operand" "xm")
+ (match_operand:SI 2 "const_0_to_15_operand" "n")]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1"
+ "roundpd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse4_1_roundps"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm")
+ (match_operand:SI 2 "const_0_to_15_operand" "n")]
+ UNSPEC_ROUND))]
+ "TARGET_SSE4_1"
+ "roundps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "sse4_1_roundsd"
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (vec_merge:V2DF
+ (unspec:V2DF [(match_operand:V2DF 2 "register_operand" "x")
+ (match_operand:SI 3 "const_0_to_15_operand" "n")]
+ UNSPEC_ROUND)
+ (match_operand:V2DF 1 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE4_1"
+ "roundsd\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "sse4_1_roundss"
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (vec_merge:V4SF
+ (unspec:V4SF [(match_operand:V4SF 2 "register_operand" "x")
+ (match_operand:SI 3 "const_0_to_15_operand" "n")]
+ UNSPEC_ROUND)
+ (match_operand:V4SF 1 "register_operand" "0")
+ (const_int 1)))]
+ "TARGET_SSE4_1"
+ "roundss\t{%3, %2, %0|%0, %2, %3}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "V4SF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Intel SSE4.2 string/text processing instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn_and_split "sse4_2_pcmpestr"
+ [(set (match_operand:SI 0 "register_operand" "=c,c")
+ (unspec:SI
+ [(match_operand:V16QI 2 "register_operand" "x,x")
+ (match_operand:SI 3 "register_operand" "a,a")
+ (match_operand:V16QI 4 "nonimmediate_operand" "x,m")
+ (match_operand:SI 5 "register_operand" "d,d")
+ (match_operand:SI 6 "const_0_to_255_operand" "n,n")]
+ UNSPEC_PCMPESTR))
+ (set (match_operand:V16QI 1 "register_operand" "=Y0,Y0")
+ (unspec:V16QI
+ [(match_dup 2)
+ (match_dup 3)
+ (match_dup 4)
+ (match_dup 5)
+ (match_dup 6)]
+ UNSPEC_PCMPESTR))
+ (set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_dup 2)
+ (match_dup 3)
+ (match_dup 4)
+ (match_dup 5)
+ (match_dup 6)]
+ UNSPEC_PCMPESTR))]
+ "TARGET_SSE4_2
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ int ecx = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[0]));
+ int xmm0 = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[1]));
+ int flags = !find_regno_note (curr_insn, REG_UNUSED, FLAGS_REG);
+
+ if (ecx)
+ emit_insn (gen_sse4_2_pcmpestri (operands[0], operands[2],
+ operands[3], operands[4],
+ operands[5], operands[6]));
+ if (xmm0)
+ emit_insn (gen_sse4_2_pcmpestrm (operands[1], operands[2],
+ operands[3], operands[4],
+ operands[5], operands[6]));
+ if (flags && !(ecx || xmm0))
+ emit_insn (gen_sse4_2_pcmpestr_cconly (operands[2], operands[3],
+ operands[4], operands[5],
+ operands[6]));
+ DONE;
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_2_pcmpestri"
+ [(set (match_operand:SI 0 "register_operand" "=c,c")
+ (unspec:SI
+ [(match_operand:V16QI 1 "register_operand" "x,x")
+ (match_operand:SI 2 "register_operand" "a,a")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,m")
+ (match_operand:SI 4 "register_operand" "d,d")
+ (match_operand:SI 5 "const_0_to_255_operand" "n,n")]
+ UNSPEC_PCMPESTR))
+ (set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)
+ (match_dup 5)]
+ UNSPEC_PCMPESTR))]
+ "TARGET_SSE4_2"
+ "pcmpestri\t{%5, %3, %1|%1, %3, %5}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_2_pcmpestrm"
+ [(set (match_operand:V16QI 0 "register_operand" "=Y0,Y0")
+ (unspec:V16QI
+ [(match_operand:V16QI 1 "register_operand" "x,x")
+ (match_operand:SI 2 "register_operand" "a,a")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,m")
+ (match_operand:SI 4 "register_operand" "d,d")
+ (match_operand:SI 5 "const_0_to_255_operand" "n,n")]
+ UNSPEC_PCMPESTR))
+ (set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)
+ (match_dup 5)]
+ UNSPEC_PCMPESTR))]
+ "TARGET_SSE4_2"
+ "pcmpestrm\t{%5, %3, %1|%1, %3, %5}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_2_pcmpestr_cconly"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_operand:V16QI 0 "register_operand" "x,x,x,x")
+ (match_operand:SI 1 "register_operand" "a,a,a,a")
+ (match_operand:V16QI 2 "nonimmediate_operand" "x,m,x,m")
+ (match_operand:SI 3 "register_operand" "d,d,d,d")
+ (match_operand:SI 4 "const_0_to_255_operand" "n,n,n,n")]
+ UNSPEC_PCMPESTR))
+ (clobber (match_scratch:V16QI 5 "=Y0,Y0,X,X"))
+ (clobber (match_scratch:SI 6 "= X, X,c,c"))]
+ "TARGET_SSE4_2"
+ "@
+ pcmpestrm\t{%4, %2, %0|%0, %2, %4}
+ pcmpestrm\t{%4, %2, %0|%0, %2, %4}
+ pcmpestri\t{%4, %2, %0|%0, %2, %4}
+ pcmpestri\t{%4, %2, %0|%0, %2, %4}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load,none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn_and_split "sse4_2_pcmpistr"
+ [(set (match_operand:SI 0 "register_operand" "=c,c")
+ (unspec:SI
+ [(match_operand:V16QI 2 "register_operand" "x,x")
+ (match_operand:V16QI 3 "nonimmediate_operand" "x,m")
+ (match_operand:SI 4 "const_0_to_255_operand" "n,n")]
+ UNSPEC_PCMPISTR))
+ (set (match_operand:V16QI 1 "register_operand" "=Y0,Y0")
+ (unspec:V16QI
+ [(match_dup 2)
+ (match_dup 3)
+ (match_dup 4)]
+ UNSPEC_PCMPISTR))
+ (set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_dup 2)
+ (match_dup 3)
+ (match_dup 4)]
+ UNSPEC_PCMPISTR))]
+ "TARGET_SSE4_2
+ && !(reload_completed || reload_in_progress)"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+{
+ int ecx = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[0]));
+ int xmm0 = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[1]));
+ int flags = !find_regno_note (curr_insn, REG_UNUSED, FLAGS_REG);
+
+ if (ecx)
+ emit_insn (gen_sse4_2_pcmpistri (operands[0], operands[2],
+ operands[3], operands[4]));
+ if (xmm0)
+ emit_insn (gen_sse4_2_pcmpistrm (operands[1], operands[2],
+ operands[3], operands[4]));
+ if (flags && !(ecx || xmm0))
+ emit_insn (gen_sse4_2_pcmpistr_cconly (operands[2], operands[3],
+ operands[4]));
+ DONE;
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_2_pcmpistri"
+ [(set (match_operand:SI 0 "register_operand" "=c,c")
+ (unspec:SI
+ [(match_operand:V16QI 1 "register_operand" "x,x")
+ (match_operand:V16QI 2 "nonimmediate_operand" "x,m")
+ (match_operand:SI 3 "const_0_to_255_operand" "n,n")]
+ UNSPEC_PCMPISTR))
+ (set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)]
+ UNSPEC_PCMPISTR))]
+ "TARGET_SSE4_2"
+ "pcmpistri\t{%3, %2, %1|%1, %2, %3}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_2_pcmpistrm"
+ [(set (match_operand:V16QI 0 "register_operand" "=Y0,Y0")
+ (unspec:V16QI
+ [(match_operand:V16QI 1 "register_operand" "x,x")
+ (match_operand:V16QI 2 "nonimmediate_operand" "x,m")
+ (match_operand:SI 3 "const_0_to_255_operand" "n,n")]
+ UNSPEC_PCMPISTR))
+ (set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)]
+ UNSPEC_PCMPISTR))]
+ "TARGET_SSE4_2"
+ "pcmpistrm\t{%3, %2, %1|%1, %2, %3}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load")
+ (set_attr "mode" "TI")])
+
+(define_insn "sse4_2_pcmpistr_cconly"
+ [(set (reg:CC FLAGS_REG)
+ (unspec:CC
+ [(match_operand:V16QI 0 "register_operand" "x,x,x,x")
+ (match_operand:V16QI 1 "nonimmediate_operand" "x,m,x,m")
+ (match_operand:SI 2 "const_0_to_255_operand" "n,n,n,n")]
+ UNSPEC_PCMPISTR))
+ (clobber (match_scratch:V16QI 3 "=Y0,Y0,X,X"))
+ (clobber (match_scratch:SI 4 "= X, X,c,c"))]
+ "TARGET_SSE4_2"
+ "@
+ pcmpistrm\t{%2, %1, %0|%0, %1, %2}
+ pcmpistrm\t{%2, %1, %0|%0, %1, %2}
+ pcmpistri\t{%2, %1, %0|%0, %1, %2}
+ pcmpistri\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "memory" "none,load,none,load")
+ (set_attr "mode" "TI")])
+;; APPLE LOCAL end 5612787 mainline sse4
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/sync.md b/gcc-4.2.1-5666.3/gcc/config/i386/sync.md
new file mode 100644
index 000000000..8c2fdb230
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/sync.md
@@ -0,0 +1,291 @@
+;; GCC machine description for i386 synchronization instructions.
+;; Copyright (C) 2005, 2006
+;; Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+(define_mode_macro IMODE [QI HI SI (DI "TARGET_64BIT")])
+(define_mode_attr modesuffix [(QI "b") (HI "w") (SI "l") (DI "q")])
+(define_mode_attr modeconstraint [(QI "q") (HI "r") (SI "r") (DI "r")])
+(define_mode_attr immconstraint [(QI "i") (HI "i") (SI "i") (DI "e")])
+
+(define_mode_macro CASMODE [QI HI SI (DI "TARGET_64BIT || TARGET_CMPXCHG8B")
+ (TI "TARGET_64BIT && TARGET_CMPXCHG16B")])
+(define_mode_macro DCASMODE
+ [(DI "!TARGET_64BIT && TARGET_CMPXCHG8B && !flag_pic")
+ (TI "TARGET_64BIT && TARGET_CMPXCHG16B")])
+(define_mode_attr doublemodesuffix [(DI "8") (TI "16")])
+(define_mode_attr DCASHMODE [(DI "SI") (TI "DI")])
+
+;; ??? It would be possible to use cmpxchg8b on pentium for DImode
+;; changes. It's complicated because the insn uses ecx:ebx as the
+;; new value; note that the registers are reversed from the order
+;; that they'd be in with (reg:DI 2 ecx). Similarly for TImode
+;; data in 64-bit mode.
+
+(define_expand "sync_compare_and_swap<mode>"
+ [(parallel
+ [(set (match_operand:CASMODE 0 "register_operand" "")
+ (match_operand:CASMODE 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec_volatile:CASMODE
+ [(match_dup 1)
+ (match_operand:CASMODE 2 "register_operand" "")
+ (match_operand:CASMODE 3 "register_operand" "")]
+ UNSPECV_CMPXCHG_1))
+ (clobber (reg:CC FLAGS_REG))])]
+ "TARGET_CMPXCHG"
+{
+ if ((<MODE>mode == DImode && !TARGET_64BIT) || <MODE>mode == TImode)
+ {
+ enum machine_mode hmode = <MODE>mode == DImode ? SImode : DImode;
+ rtx low = simplify_gen_subreg (hmode, operands[3], <MODE>mode, 0);
+ rtx high = simplify_gen_subreg (hmode, operands[3], <MODE>mode,
+ GET_MODE_SIZE (hmode));
+ low = force_reg (hmode, low);
+ high = force_reg (hmode, high);
+ if (<MODE>mode == DImode)
+ emit_insn (gen_sync_double_compare_and_swapdi
+ (operands[0], operands[1], operands[2], low, high));
+ else if (<MODE>mode == TImode)
+ emit_insn (gen_sync_double_compare_and_swapti
+ (operands[0], operands[1], operands[2], low, high));
+ else
+ gcc_unreachable ();
+ DONE;
+ }
+})
+
+(define_insn "*sync_compare_and_swap<mode>"
+ [(set (match_operand:IMODE 0 "register_operand" "=a")
+ (match_operand:IMODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:IMODE
+ [(match_dup 1)
+ (match_operand:IMODE 2 "register_operand" "a")
+ (match_operand:IMODE 3 "register_operand" "<modeconstraint>")]
+ UNSPECV_CMPXCHG_1))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_CMPXCHG"
+ "lock\;cmpxchg{<modesuffix>}\t{%3, %1|%1, %3}")
+
+(define_insn "sync_double_compare_and_swap<mode>"
+ [(set (match_operand:DCASMODE 0 "register_operand" "=A")
+ (match_operand:DCASMODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:DCASMODE
+ [(match_dup 1)
+ (match_operand:DCASMODE 2 "register_operand" "A")
+ (match_operand:<DCASHMODE> 3 "register_operand" "b")
+ (match_operand:<DCASHMODE> 4 "register_operand" "c")]
+ UNSPECV_CMPXCHG_1))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "lock\;cmpxchg<doublemodesuffix>b\t%1")
+
+;; Theoretically we'd like to use constraint "r" (any reg) for operand
+;; 3, but that includes ecx. If operand 3 and 4 are the same (like when
+;; the input is -1LL) GCC might chose to allocate operand 3 to ecx, like
+;; operand 4. This breaks, as the xchg will move the PIC register contents
+;; to %ecx then --> boom. Operands 3 and 4 really need to be different
+;; registers, which in this case means operand 3 must not be ecx.
+;; Instead of playing tricks with fake early clobbers or the like we
+;; just enumerate all regs possible here, which (as this is !TARGET_64BIT)
+;; are just esi and edi.
+(define_insn "*sync_double_compare_and_swapdi_pic"
+ [(set (match_operand:DI 0 "register_operand" "=A")
+ (match_operand:DI 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:DI
+ [(match_dup 1)
+ (match_operand:DI 2 "register_operand" "A")
+ (match_operand:SI 3 "register_operand" "SD")
+ (match_operand:SI 4 "register_operand" "c")]
+ UNSPECV_CMPXCHG_1))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_64BIT && TARGET_CMPXCHG8B && flag_pic"
+ "xchg{l}\t%%ebx, %3\;lock\;cmpxchg8b\t%1\;xchg{l}\t%%ebx, %3")
+
+(define_expand "sync_compare_and_swap_cc<mode>"
+ [(parallel
+ [(set (match_operand:CASMODE 0 "register_operand" "")
+ (match_operand:CASMODE 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec_volatile:CASMODE
+ [(match_dup 1)
+ (match_operand:CASMODE 2 "register_operand" "")
+ (match_operand:CASMODE 3 "register_operand" "")]
+ UNSPECV_CMPXCHG_1))
+ (set (match_dup 4)
+ (compare:CCZ
+ (unspec_volatile:CASMODE
+ [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPECV_CMPXCHG_2)
+ (match_dup 2)))])]
+ "TARGET_CMPXCHG"
+{
+ operands[4] = gen_rtx_REG (CCZmode, FLAGS_REG);
+ ix86_compare_op0 = operands[3];
+ ix86_compare_op1 = NULL;
+ ix86_compare_emitted = operands[4];
+ if ((<MODE>mode == DImode && !TARGET_64BIT) || <MODE>mode == TImode)
+ {
+ enum machine_mode hmode = <MODE>mode == DImode ? SImode : DImode;
+ rtx low = simplify_gen_subreg (hmode, operands[3], <MODE>mode, 0);
+ rtx high = simplify_gen_subreg (hmode, operands[3], <MODE>mode,
+ GET_MODE_SIZE (hmode));
+ low = force_reg (hmode, low);
+ high = force_reg (hmode, high);
+ if (<MODE>mode == DImode)
+ emit_insn (gen_sync_double_compare_and_swap_ccdi
+ (operands[0], operands[1], operands[2], low, high));
+ else if (<MODE>mode == TImode)
+ emit_insn (gen_sync_double_compare_and_swap_ccti
+ (operands[0], operands[1], operands[2], low, high));
+ else
+ gcc_unreachable ();
+ DONE;
+ }
+})
+
+(define_insn "*sync_compare_and_swap_cc<mode>"
+ [(set (match_operand:IMODE 0 "register_operand" "=a")
+ (match_operand:IMODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:IMODE
+ [(match_dup 1)
+ (match_operand:IMODE 2 "register_operand" "a")
+ (match_operand:IMODE 3 "register_operand" "<modeconstraint>")]
+ UNSPECV_CMPXCHG_1))
+ (set (reg:CCZ FLAGS_REG)
+ (compare:CCZ
+ (unspec_volatile:IMODE
+ [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPECV_CMPXCHG_2)
+ (match_dup 2)))]
+ "TARGET_CMPXCHG"
+ "lock\;cmpxchg{<modesuffix>}\t{%3, %1|%1, %3}")
+
+(define_insn "sync_double_compare_and_swap_cc<mode>"
+ [(set (match_operand:DCASMODE 0 "register_operand" "=A")
+ (match_operand:DCASMODE 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:DCASMODE
+ [(match_dup 1)
+ (match_operand:DCASMODE 2 "register_operand" "A")
+ (match_operand:<DCASHMODE> 3 "register_operand" "b")
+ (match_operand:<DCASHMODE> 4 "register_operand" "c")]
+ UNSPECV_CMPXCHG_1))
+ (set (reg:CCZ FLAGS_REG)
+ (compare:CCZ
+ (unspec_volatile:DCASMODE
+ [(match_dup 1) (match_dup 2) (match_dup 3) (match_dup 4)]
+ UNSPECV_CMPXCHG_2)
+ (match_dup 2)))]
+ ""
+ "lock\;cmpxchg<doublemodesuffix>b\t%1")
+
+;; See above for the explanation of using the constraint "SD" for
+;; operand 3.
+(define_insn "*sync_double_compare_and_swap_ccdi_pic"
+ [(set (match_operand:DI 0 "register_operand" "=A")
+ (match_operand:DI 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:DI
+ [(match_dup 1)
+ (match_operand:DI 2 "register_operand" "A")
+ (match_operand:SI 3 "register_operand" "SD")
+ (match_operand:SI 4 "register_operand" "c")]
+ UNSPECV_CMPXCHG_1))
+ (set (reg:CCZ FLAGS_REG)
+ (compare:CCZ
+ (unspec_volatile:DI
+ [(match_dup 1) (match_dup 2) (match_dup 3) (match_dup 4)]
+ UNSPECV_CMPXCHG_2)
+ (match_dup 2)))]
+ "!TARGET_64BIT && TARGET_CMPXCHG8B && flag_pic"
+ "xchg{l}\t%%ebx, %3\;lock\;cmpxchg8b\t%1\;xchg{l}\t%%ebx, %3")
+
+(define_insn "sync_old_add<mode>"
+ [(set (match_operand:IMODE 0 "register_operand" "=<modeconstraint>")
+ (unspec_volatile:IMODE
+ [(match_operand:IMODE 1 "memory_operand" "+m")] UNSPECV_XCHG))
+ (set (match_dup 1)
+ (plus:IMODE (match_dup 1)
+ (match_operand:IMODE 2 "register_operand" "0")))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_XADD"
+ "lock\;xadd{<modesuffix>}\t{%0, %1|%1, %0}")
+
+;; Recall that xchg implicitly sets LOCK#, so adding it again wastes space.
+(define_insn "sync_lock_test_and_set<mode>"
+ [(set (match_operand:IMODE 0 "register_operand" "=<modeconstraint>")
+ (unspec_volatile:IMODE
+ [(match_operand:IMODE 1 "memory_operand" "+m")] UNSPECV_XCHG))
+ (set (match_dup 1)
+ (match_operand:IMODE 2 "register_operand" "0"))]
+ ""
+ "xchg{<modesuffix>}\t{%1, %0|%0, %1}")
+
+(define_insn "sync_add<mode>"
+ [(set (match_operand:IMODE 0 "memory_operand" "+m")
+ (unspec_volatile:IMODE
+ [(plus:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" "<modeconstraint><immconstraint>"))]
+ UNSPECV_LOCK))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "lock\;add{<modesuffix>}\t{%1, %0|%0, %1}")
+
+(define_insn "sync_sub<mode>"
+ [(set (match_operand:IMODE 0 "memory_operand" "+m")
+ (unspec_volatile:IMODE
+ [(minus:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" "<modeconstraint><immconstraint>"))]
+ UNSPECV_LOCK))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "lock\;sub{<modesuffix>}\t{%1, %0|%0, %1}")
+
+(define_insn "sync_ior<mode>"
+ [(set (match_operand:IMODE 0 "memory_operand" "+m")
+ (unspec_volatile:IMODE
+ [(ior:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" "<modeconstraint><immconstraint>"))]
+ UNSPECV_LOCK))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "lock\;or{<modesuffix>}\t{%1, %0|%0, %1}")
+
+(define_insn "sync_and<mode>"
+ [(set (match_operand:IMODE 0 "memory_operand" "+m")
+ (unspec_volatile:IMODE
+ [(and:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" "<modeconstraint><immconstraint>"))]
+ UNSPECV_LOCK))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "lock\;and{<modesuffix>}\t{%1, %0|%0, %1}")
+
+(define_insn "sync_xor<mode>"
+ [(set (match_operand:IMODE 0 "memory_operand" "+m")
+ (unspec_volatile:IMODE
+ [(xor:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" "<modeconstraint><immconstraint>"))]
+ UNSPECV_LOCK))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "lock\;xor{<modesuffix>}\t{%1, %0|%0, %1}")
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/t-darwin b/gcc-4.2.1-5666.3/gcc/config/i386/t-darwin
new file mode 100644
index 000000000..5e6df6912
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/t-darwin
@@ -0,0 +1,22 @@
+SHLIB_VERPFX = $(srcdir)/config/i386/darwin-libgcc
+# APPLE LOCAL begin 4099000
+LIB1ASMSRC = i386/lib1funcs.asm
+LIB1ASMFUNCS = _get_pc_thunk_ax _get_pc_thunk_dx _get_pc_thunk_cx _get_pc_thunk_bx _get_pc_thunk_si _get_pc_thunk_di _get_pc_thunk_bp
+# APPLE LOCAL end 4099000
+# APPLE LOCAL avoid try fat on thin system
+ifneq ($(shell lipo -info /usr/lib/libSystem.B.dylib | grep x86_64),)
+MULTILIB_OPTIONS = m64
+MULTILIB_DIRNAMES = x86_64
+# APPLE LOCAL avoid try fat on thin system
+endif
+LIB2_SIDITI_CONV_FUNCS=yes
+LIB2FUNCS_EXTRA = $(srcdir)/config/darwin-64.c
+
+# APPLE LOCAL begin gcov 5573505
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+TARGET_LIBGCC2_CFLAGS = -fPIC -pipe
+TARGET_LIBGCC2_STATIC_CFLAGS = -mmacosx-version-min=10.4
+# APPLE LOCAL end gcov 5573505
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/t-darwin64 b/gcc-4.2.1-5666.3/gcc/config/i386/t-darwin64
new file mode 100644
index 000000000..3670a125b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/t-darwin64
@@ -0,0 +1,12 @@
+SHLIB_VERPFX = $(srcdir)/config/i386/darwin-libgcc
+LIB2_SIDITI_CONV_FUNCS=yes
+LIB2FUNCS_EXTRA = $(srcdir)/config/darwin-64.c
+
+# APPLE LOCAL begin gcov 5573505
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+TARGET_LIBGCC2_CFLAGS = -fPIC -pipe
+TARGET_LIBGCC2_STATIC_CFLAGS = -mmacosx-version-min=10.4
+# APPLE LOCAL end gcov 5573505
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/t-gmm_malloc b/gcc-4.2.1-5666.3/gcc/config/i386/t-gmm_malloc
new file mode 100644
index 000000000..c37f8a759
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/t-gmm_malloc
@@ -0,0 +1,6 @@
+# Install gmm_malloc.h as mm_malloc.h.
+
+EXTRA_HEADERS += mm_malloc.h
+mm_malloc.h: $(srcdir)/config/i386/gmm_malloc.h
+ rm -f $@
+ cat $^ > $@
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/tmmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/tmmintrin.h
new file mode 100644
index 000000000..1bb254bfe
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/tmmintrin.h
@@ -0,0 +1,304 @@
+/* APPLE LOCAL file ssse3 4424835 */
+/* Copyright (C) 2006 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.1. */
+
+#ifndef _TMMINTRIN_H_INCLUDED
+#define _TMMINTRIN_H_INCLUDED
+
+#ifdef __SSSE3__
+#include <pmmintrin.h>
+
+/* APPLE LOCAL begin nodebug inline */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadd_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadd_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadds_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadd_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadd_pi32 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hadds_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsub_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsub_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsubs_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsub_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsub_pi32 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_hsubs_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_maddubs_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_maddubs_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mulhrs_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mulhrs_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_shuffle_epi8 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_shuffle_pi8 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sign_epi8 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sign_epi16 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sign_epi32 (__m128i __X, __m128i __Y)
+{
+ return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sign_pi8 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sign_pi16 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sign_pi32 (__m64 __X, __m64 __Y)
+{
+ return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y);
+}
+
+/* APPLE LOCAL begin 5814283 */
+#define _mm_alignr_epi8(__X, __Y, __N) \
+ ((__m128i)__builtin_ia32_palignr128 ((__v2di)(__X), (__v2di)(__Y), (__N) * 8))
+/* APPLE LOCAL end 5814283 */
+
+#define _mm_alignr_pi8(__X, __Y, __N) \
+ ((__m64)__builtin_ia32_palignr ((long long) (__X), (long long) (__Y), (__N) * 8))
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_abs_epi8 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_abs_epi16 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128i __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_abs_epi32 (__m128i __X)
+{
+ return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_abs_pi8 (__m64 __X)
+{
+ return (__m64) __builtin_ia32_pabsb ((__v8qi)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_abs_pi16 (__m64 __X)
+{
+ return (__m64) __builtin_ia32_pabsw ((__v4hi)__X);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_abs_pi32 (__m64 __X)
+{
+ return (__m64) __builtin_ia32_pabsd ((__v2si)__X);
+}
+
+/* APPLE LOCAL begin nodebug inline */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline */
+
+#endif /* __SSSE3__ */
+
+#endif /* _TMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/x-darwin b/gcc-4.2.1-5666.3/gcc/config/i386/x-darwin
new file mode 100644
index 000000000..025c5f4fb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/x-darwin
@@ -0,0 +1,8 @@
+host-i386-darwin.o : $(srcdir)/config/i386/host-i386-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) \
+ config/host-darwin.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+# APPLE LOCAL begin use -mdynamic-no-pic to build x86-hosted compilers
+
+XCFLAGS = -mdynamic-no-pic
+# APPLE LOCAL end use -mdynamic-no-pic to build x86-hosted compilers
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/x-darwin-x86_64 b/gcc-4.2.1-5666.3/gcc/config/i386/x-darwin-x86_64
new file mode 100644
index 000000000..58e9f6753
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/x-darwin-x86_64
@@ -0,0 +1,8 @@
+# APPLE LOCAL file mainline
+# This file should go away.
+host-x86_64-darwin.o : $(srcdir)/config/i386/host-x86_64-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) \
+ config/host-darwin.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+XCFLAGS =
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/x-i386 b/gcc-4.2.1-5666.3/gcc/config/i386/x-i386
new file mode 100644
index 000000000..e156bcde3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/x-i386
@@ -0,0 +1,3 @@
+driver-i386.o : $(srcdir)/config/i386/driver-i386.c \
+ $(CONFIG_H) $(SYSTEM_H) $(TM_H) coretypes.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
diff --git a/gcc-4.2.1-5666.3/gcc/config/i386/xmmintrin.h b/gcc-4.2.1-5666.3/gcc/config/i386/xmmintrin.h
new file mode 100644
index 000000000..ad805b866
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/i386/xmmintrin.h
@@ -0,0 +1,1582 @@
+/* APPLE LOCAL file mainline 2005-06-30 Radar 4131077 */
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented from the specification included in the Intel C++ Compiler
+ User Guide and Reference, version 9.0. */
+
+#ifndef _XMMINTRIN_H_INCLUDED
+#define _XMMINTRIN_H_INCLUDED
+
+#ifndef __SSE__
+# error "SSE instruction set not enabled"
+#else
+
+/* We need type definitions from the MMX header file. */
+#include <mmintrin.h>
+
+/* Get _mm_malloc () and _mm_free (). */
+/* APPLE LOCAL begin xmmintrin.h for kernel 4123064 */
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+/* APPLE LOCAL end xmmintrin.h for kernel 4123064 */
+
+/* The Intel API is flexible enough that we must allow aliasing with other
+ vector types, and their scalar components. */
+typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
+
+/* Internal data types for implementing the intrinsics. */
+typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+
+/* Create a selector for use with the SHUFPS instruction. */
+#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
+ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
+
+/* Constants for use with _mm_prefetch. */
+enum _mm_hint
+{
+ _MM_HINT_T0 = 3,
+ _MM_HINT_T1 = 2,
+ _MM_HINT_T2 = 1,
+ _MM_HINT_NTA = 0
+};
+
+/* Bits in the MXCSR. */
+#define _MM_EXCEPT_MASK 0x003f
+#define _MM_EXCEPT_INVALID 0x0001
+#define _MM_EXCEPT_DENORM 0x0002
+#define _MM_EXCEPT_DIV_ZERO 0x0004
+#define _MM_EXCEPT_OVERFLOW 0x0008
+#define _MM_EXCEPT_UNDERFLOW 0x0010
+#define _MM_EXCEPT_INEXACT 0x0020
+
+#define _MM_MASK_MASK 0x1f80
+#define _MM_MASK_INVALID 0x0080
+#define _MM_MASK_DENORM 0x0100
+#define _MM_MASK_DIV_ZERO 0x0200
+#define _MM_MASK_OVERFLOW 0x0400
+#define _MM_MASK_UNDERFLOW 0x0800
+#define _MM_MASK_INEXACT 0x1000
+
+#define _MM_ROUND_MASK 0x6000
+#define _MM_ROUND_NEAREST 0x0000
+#define _MM_ROUND_DOWN 0x2000
+#define _MM_ROUND_UP 0x4000
+#define _MM_ROUND_TOWARD_ZERO 0x6000
+
+#define _MM_FLUSH_ZERO_MASK 0x8000
+#define _MM_FLUSH_ZERO_ON 0x8000
+#define _MM_FLUSH_ZERO_OFF 0x0000
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#define __always_inline__ __always_inline__, __nodebug__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* APPLE LOCAL begin radar 5618945 */
+#undef __STATIC_INLINE
+#ifdef __GNUC_STDC_INLINE__
+#define __STATIC_INLINE __inline
+#else
+#define __STATIC_INLINE static __inline
+#endif
+/* APPLE LOCAL end radar 5618945 */
+
+/* Create a vector of zeros. */
+/* APPLE LOCAL begin radar 4152603 */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setzero_ps (void)
+{
+ return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
+}
+
+/* Perform the respective operation on the lower SPFP (single-precision
+ floating-point) values of A and B; the upper three SPFP values are
+ passed through from A. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mul_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_div_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sqrt_ss (__m128 __A)
+{
+ return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_rcp_ss (__m128 __A)
+{
+ return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_rsqrt_ss (__m128 __A)
+{
+ return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Perform the respective operation on the four SPFP values in A and B. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_add_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sub_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mul_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_div_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sqrt_ps (__m128 __A)
+{
+ return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_rcp_ps (__m128 __A)
+{
+ return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_rsqrt_ps (__m128 __A)
+{
+ return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Perform logical bit-wise operations on 128-bit values. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_and_ps (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_andps (__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_andnot_ps (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_andnps (__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_or_ps (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_orps (__A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_xor_ps (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_xorps (__A, __B);
+}
+
+/* Perform a comparison on the lower SPFP values of A and B. If the
+ comparison is true, place a mask of all ones in the result, otherwise a
+ mask of zeros. The upper three SPFP values are passed through from A. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmple_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movss ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_cmpltss ((__v4sf) __B,
+ (__v4sf)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpge_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movss ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_cmpless ((__v4sf) __B,
+ (__v4sf)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpneq_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnlt_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnle_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpngt_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movss ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_cmpnltss ((__v4sf) __B,
+ (__v4sf)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnge_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movss ((__v4sf) __A,
+ (__v4sf)
+ __builtin_ia32_cmpnless ((__v4sf) __B,
+ (__v4sf)
+ __A));
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpord_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpunord_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Perform a comparison on the four SPFP values of A and B. For each
+ element, if the comparison is true, place a mask of all ones in the
+ result, otherwise a mask of zeros. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpeq_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmplt_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmple_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpgt_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpge_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpneq_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnlt_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnle_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpngt_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpnge_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpord_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cmpunord_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Compare the lower SPFP values of A and B and return 1 if true
+ and 0 if false. */
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comieq_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comilt_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comile_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comigt_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comige_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_comineq_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomieq_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomilt_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomile_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomigt_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomige_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_ucomineq_ss (__m128 __A, __m128 __B)
+{
+ return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Convert the lower SPFP value to a 32-bit integer according to the current
+ rounding mode. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtss_si32 (__m128 __A)
+{
+ return __builtin_ia32_cvtss2si ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvt_ss2si (__m128 __A)
+{
+ return _mm_cvtss_si32 (__A);
+}
+
+#ifdef __x86_64__
+/* Convert the lower SPFP value to a 32-bit integer according to the
+ current rounding mode. */
+
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtss_si64 (__m128 __A)
+{
+ return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtss_si64x (__m128 __A)
+{
+ return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
+}
+#endif
+
+/* Convert the two lower SPFP values to 32-bit integers according to the
+ current rounding mode. Return the integers in packed form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtps_pi32 (__m128 __A)
+{
+ return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvt_ps2pi (__m128 __A)
+{
+ return _mm_cvtps_pi32 (__A);
+}
+
+/* Truncate the lower SPFP value to a 32-bit integer. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttss_si32 (__m128 __A)
+{
+ return __builtin_ia32_cvttss2si ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtt_ss2si (__m128 __A)
+{
+ return _mm_cvttss_si32 (__A);
+}
+
+#ifdef __x86_64__
+/* Truncate the lower SPFP value to a 32-bit integer. */
+
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttss_si64 (__m128 __A)
+{
+ return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE long long __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttss_si64x (__m128 __A)
+{
+ return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
+}
+#endif
+
+/* Truncate the two lower SPFP values to 32-bit integers. Return the
+ integers in packed form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvttps_pi32 (__m128 __A)
+{
+ return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtt_ps2pi (__m128 __A)
+{
+ return _mm_cvttps_pi32 (__A);
+}
+
+/* Convert B to a SPFP value and insert it as element zero in A. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi32_ss (__m128 __A, int __B)
+{
+ return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvt_si2ss (__m128 __A, int __B)
+{
+ return _mm_cvtsi32_ss (__A, __B);
+}
+
+#ifdef __x86_64__
+/* Convert B to a SPFP value and insert it as element zero in A. */
+
+/* Intel intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64_ss (__m128 __A, long long __B)
+{
+ return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
+}
+
+/* Microsoft intrinsic. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtsi64x_ss (__m128 __A, long long __B)
+{
+ return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
+}
+#endif
+
+/* Convert the two 32-bit values in B to SPFP form and insert them
+ as the two lower elements in A. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpi32_ps (__m128 __A, __m64 __B)
+{
+ return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvt_pi2ps (__m128 __A, __m64 __B)
+{
+ return _mm_cvtpi32_ps (__A, __B);
+}
+
+/* Convert the four signed 16-bit values in A to SPFP form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpi16_ps (__m64 __A)
+{
+ __v4hi __sign;
+ __v2si __hisi, __losi;
+ __v4sf __r;
+
+ /* This comparison against zero gives us a mask that can be used to
+ fill in the missing sign bits in the unpack operations below, so
+ that we get signed values after unpacking. */
+ __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
+
+ /* Convert the four words to doublewords. */
+ __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
+ __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
+
+ /* Convert the doublewords to floating point two at a time. */
+ __r = (__v4sf) _mm_setzero_ps ();
+ __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
+ __r = __builtin_ia32_movlhps (__r, __r);
+ __r = __builtin_ia32_cvtpi2ps (__r, __losi);
+
+ return (__m128) __r;
+}
+
+/* Convert the four unsigned 16-bit values in A to SPFP form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpu16_ps (__m64 __A)
+{
+ __v2si __hisi, __losi;
+ __v4sf __r;
+
+ /* Convert the four words to doublewords. */
+ __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
+ __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
+
+ /* Convert the doublewords to floating point two at a time. */
+ __r = (__v4sf) _mm_setzero_ps ();
+ __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
+ __r = __builtin_ia32_movlhps (__r, __r);
+ __r = __builtin_ia32_cvtpi2ps (__r, __losi);
+
+ return (__m128) __r;
+}
+
+/* Convert the low four signed 8-bit values in A to SPFP form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpi8_ps (__m64 __A)
+{
+ __v8qi __sign;
+
+ /* This comparison against zero gives us a mask that can be used to
+ fill in the missing sign bits in the unpack operations below, so
+ that we get signed values after unpacking. */
+ __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
+
+ /* Convert the four low bytes to words. */
+ __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
+
+ return _mm_cvtpi16_ps(__A);
+}
+
+/* Convert the low four unsigned 8-bit values in A to SPFP form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpu8_ps(__m64 __A)
+{
+ __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
+ return _mm_cvtpu16_ps(__A);
+}
+
+/* Convert the four signed 32-bit values in A and B to SPFP form. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
+{
+ __v4sf __zero = (__v4sf) _mm_setzero_ps ();
+ __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
+ __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
+ return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
+}
+
+/* Convert the four SPFP values in A to four signed 16-bit integers. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtps_pi16(__m128 __A)
+{
+ __v4sf __hisf = (__v4sf)__A;
+ __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
+ __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
+ __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
+ return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
+}
+
+/* Convert the four SPFP values in A to four signed 8-bit integers. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtps_pi8(__m128 __A)
+{
+ __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
+ return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
+}
+
+/* Selects four specific SPFP values from A and B based on MASK. */
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
+{
+ return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
+}
+#else
+#define _mm_shuffle_ps(A, B, MASK) \
+ ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
+#endif
+
+
+/* Selects and interleaves the upper two SPFP values from A and B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpackhi_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Selects and interleaves the lower two SPFP values from A and B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_unpacklo_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Sets the upper two SPFP values with 64-bits of data loaded from P;
+ the lower two values are passed through from A. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadh_pi (__m128 __A, __m64 const *__P)
+{
+ return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
+}
+
+/* Stores the upper two SPFP values of A into P. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storeh_pi (__m64 *__P, __m128 __A)
+{
+ __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
+}
+
+/* Moves the upper two values of B into the lower two values of A. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movehl_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Moves the lower two values of B into the upper two values of A. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movelh_ps (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Sets the lower two SPFP values with 64-bits of data loaded from P;
+ the upper two values are passed through from A. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadl_pi (__m128 __A, __m64 const *__P)
+{
+ return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
+}
+
+/* Stores the lower two SPFP values of A into P. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storel_pi (__m64 *__P, __m128 __A)
+{
+ __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
+}
+
+/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movemask_ps (__m128 __A)
+{
+ return __builtin_ia32_movmskps ((__v4sf)__A);
+}
+
+/* Return the contents of the control register. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_getcsr (void)
+{
+ return __builtin_ia32_stmxcsr ();
+}
+
+/* Read exception bits from the control register. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_GET_EXCEPTION_STATE (void)
+{
+ return _mm_getcsr() & _MM_EXCEPT_MASK;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_GET_EXCEPTION_MASK (void)
+{
+ return _mm_getcsr() & _MM_MASK_MASK;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_GET_ROUNDING_MODE (void)
+{
+ return _mm_getcsr() & _MM_ROUND_MASK;
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE unsigned int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_GET_FLUSH_ZERO_MODE (void)
+{
+ return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
+}
+
+/* Set the control register to I. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setcsr (unsigned int __I)
+{
+ __builtin_ia32_ldmxcsr (__I);
+}
+
+/* Set exception bits in the control register. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_SET_EXCEPTION_STATE(unsigned int __mask)
+{
+ _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_SET_EXCEPTION_MASK (unsigned int __mask)
+{
+ _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_SET_ROUNDING_MODE (unsigned int __mode)
+{
+ _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
+{
+ _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
+}
+
+/* Create a vector with element 0 as F and the rest zero. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_ss (float __F)
+{
+ return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
+}
+
+/* Create a vector with all four elements equal to F. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set1_ps (float __F)
+{
+ return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_ps1 (float __F)
+{
+ return _mm_set1_ps (__F);
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_ss (float const *__P)
+{
+ return _mm_set_ss (*__P);
+}
+
+/* Create a vector with all four elements equal to *P. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load1_ps (float const *__P)
+{
+ return _mm_set1_ps (*__P);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_ps1 (float const *__P)
+{
+ return _mm_load1_ps (__P);
+}
+
+/* Load four SPFP values from P. The address must be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_load_ps (float const *__P)
+{
+ return (__m128) *(__v4sf *)__P;
+}
+
+/* Load four SPFP values from P. The address need not be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadu_ps (float const *__P)
+{
+ return (__m128) __builtin_ia32_loadups (__P);
+}
+
+/* Load four SPFP values in reverse order. The address must be aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_loadr_ps (float const *__P)
+{
+ __v4sf __tmp = *(__v4sf *)__P;
+ return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
+}
+
+/* Create the vector [Z Y X W]. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
+{
+ return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
+}
+
+/* Create the vector [W X Y Z]. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_setr_ps (float __Z, float __Y, float __X, float __W)
+{
+ return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
+}
+
+/* Stores the lower SPFP value. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_ss (float *__P, __m128 __A)
+{
+ *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE float __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_cvtss_f32 (__m128 __A)
+{
+ return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
+}
+
+/* Store four SPFP values. The address must be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_ps (float *__P, __m128 __A)
+{
+ *(__v4sf *)__P = (__v4sf)__A;
+}
+
+/* Store four SPFP values. The address need not be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storeu_ps (float *__P, __m128 __A)
+{
+ __builtin_ia32_storeups (__P, (__v4sf)__A);
+}
+
+/* Store the lower SPFP value across four words. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store1_ps (float *__P, __m128 __A)
+{
+ __v4sf __va = (__v4sf)__A;
+ __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
+ _mm_storeu_ps (__P, __tmp);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_store_ps1 (float *__P, __m128 __A)
+{
+ _mm_store1_ps (__P, __A);
+}
+
+/* Store four SPFP values in reverse order. The address must be aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_storer_ps (float *__P, __m128 __A)
+{
+ __v4sf __va = (__v4sf)__A;
+ __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
+ _mm_store_ps (__P, __tmp);
+}
+
+/* Sets the low SPFP value of A from the low value of B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m128 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_move_ss (__m128 __A, __m128 __B)
+{
+ return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
+}
+
+/* Extracts one of the four words of A. The selector N must be immediate. */
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_extract_pi16 (__m64 const __A, int const __N)
+{
+ return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pextrw (__m64 const __A, int const __N)
+{
+ return _mm_extract_pi16 (__A, __N);
+}
+#else
+#define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
+#define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
+#endif
+
+/* Inserts word D into one of four words of A. The selector N must be
+ immediate. */
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
+{
+ return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pinsrw (__m64 const __A, int const __D, int const __N)
+{
+ return _mm_insert_pi16 (__A, __D, __N);
+}
+#else
+#define _mm_insert_pi16(A, D, N) \
+ ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
+#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
+#endif
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmaxsw (__m64 __A, __m64 __B)
+{
+ return _mm_max_pi16 (__A, __B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmaxub (__m64 __A, __m64 __B)
+{
+ return _mm_max_pu8 (__A, __B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pminsw (__m64 __A, __m64 __B)
+{
+ return _mm_min_pi16 (__A, __B);
+}
+
+/* Compute the element-wise minimum of unsigned 8-bit values. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pminub (__m64 __A, __m64 __B)
+{
+ return _mm_min_pu8 (__A, __B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_movemask_pi8 (__m64 __A)
+{
+ return __builtin_ia32_pmovmskb ((__v8qi)__A);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE int __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmovmskb (__m64 __A)
+{
+ return _mm_movemask_pi8 (__A);
+}
+
+/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
+ in B and produce the high 16 bits of the 32-bit results. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_mulhi_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pmulhuw (__m64 __A, __m64 __B)
+{
+ return _mm_mulhi_pu16 (__A, __B);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_shuffle_pi16 (__m64 __A, int __N)
+{
+ return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pshufw (__m64 __A, int __N)
+{
+ return _mm_shuffle_pi16 (__A, __N);
+}
+#else
+#define _mm_shuffle_pi16(A, N) \
+ ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
+#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
+#endif
+
+/* Conditionally store byte elements of A into P. The high bit of each
+ byte in the selector N determines whether the corresponding byte from
+ A is stored. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
+{
+ __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_maskmovq (__m64 __A, __m64 __N, char *__P)
+{
+ _mm_maskmove_si64 (__A, __N, __P);
+}
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pavgb (__m64 __A, __m64 __B)
+{
+ return _mm_avg_pu8 (__A, __B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_pavgw (__m64 __A, __m64 __B)
+{
+ return _mm_avg_pu16 (__A, __B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE __m64 __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_m_psadbw (__m64 __A, __m64 __B)
+{
+ return _mm_sad_pu8 (__A, __B);
+}
+
+/* Loads one cache line from address P to a location "closer" to the
+ processor. The selector I specifies the type of prefetch operation. */
+#if 0
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_prefetch (void *__P, enum _mm_hint __I)
+{
+ __builtin_prefetch (__P, 0, __I);
+}
+#else
+#define _mm_prefetch(P, I) \
+ __builtin_prefetch ((P), 0, (I))
+#endif
+
+/* Stores the data in A to the address P without polluting the caches. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_stream_pi (__m64 *__P, __m64 __A)
+{
+ /* APPLE LOCAL 4656532 use V1DImode for _m64 */
+ __builtin_ia32_movntq (__P, __A);
+}
+
+/* Likewise. The address must be 16-byte aligned. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_stream_ps (float *__P, __m128 __A)
+{
+ __builtin_ia32_movntps (__P, (__v4sf)__A);
+}
+
+/* Guarantees that every preceding store is globally visible before
+ any subsequent store. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_sfence (void)
+{
+ __builtin_ia32_sfence ();
+}
+
+/* The execution of the next instruction is delayed by an implementation
+ specific amount of time. The instruction does not modify the
+ architectural state. */
+/* APPLE LOCAL begin radar 5618945 */
+__STATIC_INLINE void __attribute__((__always_inline__))
+/* APPLE LOCAL end radar 5618945 */
+_mm_pause (void)
+{
+ __asm__ __volatile__ ("rep; nop" : : );
+}
+/* APPLE LOCAL end radar 4152603 */
+
+/* Transpose the 4x4 matrix composed of row[0-3]. */
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
+ __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
+ __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
+ __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
+ __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
+ (row0) = __builtin_ia32_movlhps (__t0, __t1); \
+ (row1) = __builtin_ia32_movhlps (__t1, __t0); \
+ (row2) = __builtin_ia32_movlhps (__t2, __t3); \
+ (row3) = __builtin_ia32_movhlps (__t3, __t2); \
+} while (0)
+
+/* APPLE LOCAL begin nodebug inline 4152603 */
+#undef __always_inline__
+/* APPLE LOCAL end nodebug inline 4152603 */
+
+/* For backward source compatibility. */
+#include <emmintrin.h>
+
+#endif /* __SSE__ */
+#endif /* _XMMINTRIN_H_INCLUDED */
diff --git a/gcc-4.2.1-5666.3/gcc/config/interix.h b/gcc-4.2.1-5666.3/gcc/config/interix.h
new file mode 100644
index 000000000..0fa5dee87
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/interix.h
@@ -0,0 +1,121 @@
+/* Operating system specific defines to be used when targeting GCC for
+ Interix
+ Copyright (C) 1994, 1995, 1999, 2002, 2004 Free Software Foundation, Inc.
+ Donn Terry, Softway Systems, Inc. (donn@softway.com)
+ Modified from code
+ Contributed by Douglas B. Rupp (drupp@cs.washington.edu).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* POSIX/Uni-thread only for now. Look at the winnt version
+for windows/multi thread */
+
+/* We need multiple -lc -lcpsx because they mutually refer;
+ that should go away someday */
+
+#undef LIB_SPEC
+#define LIB_SPEC "\
+ %{!shared:%{!dynamic:-lc -lcpsx -lc -lcpsx %$INTERIX_ROOT/usr/lib/psxdll.a \
+ %$INTERIX_ROOT/usr/lib/psxdll2.a \
+ }} \
+ %{!G:%{!dynamic:-lc -lcpsx -lc -lcpsx %$INTERIX_ROOT/usr/lib/psxdll.a \
+ %$INTERIX_ROOT/usr/lib/psxdll2.a \
+ }} \
+ %{dynamic:-lc %$INTERIX_ROOT/usr/lib/psxdll.a \
+ %$INTERIX_ROOT/usr/lib/psxdll2.a \
+ } \
+ %{v}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{!shared:-stack 0x400000,0x10000} \
+ -subsystem posix \
+ %{g} \
+ %{dynamic:-Bdynamic} \
+ %{static:-Bstatic} \
+ %{shared:--shared -Bdynamic} \
+ %{G:--shared -Bdynamic} \
+ %{symbolic:--shared -Bsymbolic -Bdynamic} \
+ %{soname*:--soname %*} \
+ %{rpath*:--rpath %*} \
+ "
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}} %{shared:crti%O%s}"
+
+#undef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ ((DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ || !strcmp(STR, "rpath")) \
+ && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \
+ && strcmp (STR, "Tbss"))
+
+
+#define STDC_0_IN_SYSTEM_HEADERS 1
+
+#define HANDLE_SYSV_PRAGMA 1
+#undef HANDLE_PRAGMA_WEAK /* until the link format can handle it */
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define DBX_DEBUGGING_INFO 1
+#define SDB_DEBUGGING_INFO 1
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+
+/* TARGET_DEFAULT from configure */
+
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE "short unsigned int"
+#define WCHAR_TYPE_SIZE 16
+
+/* Our strategy for finding global constructors is a bit different, although
+ not a lot. */
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ int i; \
+ unsigned long nptrs; \
+ func_ptr *p; \
+ asm( \
+ " .section .ctor_head, \"rw\"\n" \
+ "1:\n" \
+ " .text \n" \
+ ASM_LOAD_ADDR(1b,%0) \
+ : "=r" (p) : : "cc"); \
+ for (nptrs = 0; p[nptrs] != 0; nptrs++); \
+ for (i = nptrs-1; i >= 0; i--) \
+ p[i] (); \
+} while (0)
+
+#define DO_GLOBAL_DTORS_BODY \
+do { \
+ func_ptr *p; \
+ asm( \
+ " .section .dtor_head, \"rw\"\n" \
+ "1:\n" \
+ " .text \n" \
+ ASM_LOAD_ADDR(1b,%0) \
+ : "=r" (p) : : "cc"); \
+ while (*p) \
+ { \
+ p++; \
+ (*(p-1)) (); \
+ } \
+} while (0)
diff --git a/gcc-4.2.1-5666.3/gcc/config/interix3.h b/gcc-4.2.1-5666.3/gcc/config/interix3.h
new file mode 100644
index 000000000..f7c3aa612
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/interix3.h
@@ -0,0 +1,35 @@
+/* Operating system specific defines to be used when targeting GCC for
+ Interix version 3.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Douglas B. Rupp (rupp@gnat.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Interix 3.x has a single rooted file system and properly named
+ libraries, so LIB_SPEC can be simplified */
+
+#undef LIB_SPEC
+#define LIB_SPEC "\
+ %{!shared:%{!dynamic:-lc -lpsxdll \
+ }} \
+ %{!G:%{!dynamic:-lc -lpsxdll \
+ }} \
+ %{dynamic:-lc -lpsxdll \
+ } \
+ %{v}"
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/kaos.h b/gcc-4.2.1-5666.3/gcc/config/kaos.h
new file mode 100644
index 000000000..2425fd4ea
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/kaos.h
@@ -0,0 +1,31 @@
+/* Definitions of target machine for GCC.
+ common kaOS definitions for all architectures.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Specify predefined symbols in preprocessor. */
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__kaOS__"); \
+ } while (0)
+
+
+/* do not link any library implicitly for kaOS target. */
+#undef LIB_SPEC
+#define LIB_SPEC ""
diff --git a/gcc-4.2.1-5666.3/gcc/config/kfreebsd-gnu.h b/gcc-4.2.1-5666.3/gcc/config/kfreebsd-gnu.h
new file mode 100644
index 000000000..8c11ca501
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/kfreebsd-gnu.h
@@ -0,0 +1,38 @@
+/* Definitions for kFreeBSD-based GNU systems with ELF format
+ Copyright (C) 2004, 2006
+ Free Software Foundation, Inc.
+ Contributed by Robert Millan.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef LINUX_TARGET_OS_CPP_BUILTINS
+#define LINUX_TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__FreeBSD_kernel__"); \
+ builtin_define ("__GLIBC__"); \
+ builtin_define_std ("unix"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ } \
+ while (0)
+
+#ifdef GLIBC_DYNAMIC_LINKER
+#undef GLIBC_DYNAMIC_LINKER
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/knetbsd-gnu.h b/gcc-4.2.1-5666.3/gcc/config/knetbsd-gnu.h
new file mode 100644
index 000000000..e1ff325b8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/knetbsd-gnu.h
@@ -0,0 +1,39 @@
+/* Definitions for kNetBSD-based GNU systems with ELF format
+ Copyright (C) 2004, 2006
+ Free Software Foundation, Inc.
+ Contributed by Robert Millan.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef LINUX_TARGET_OS_CPP_BUILTINS
+#define LINUX_TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__NetBSD_kernel__"); \
+ builtin_define ("__GLIBC__"); \
+ builtin_define_std ("unix"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ } \
+ while (0)
+
+
+#ifdef GLIBC_DYNAMIC_LINKER
+#undef GLIBC_DYNAMIC_LINKER
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/libgcc-glibc.ver b/gcc-4.2.1-5666.3/gcc/config/libgcc-glibc.ver
new file mode 100644
index 000000000..837c1a7ee
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/libgcc-glibc.ver
@@ -0,0 +1,23 @@
+# In order to work around the very problems that force us to now generally
+# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
+# By now choosing the same version tags for these specific routines, we
+# maintain enough binary compatibility to allow future versions of glibc
+# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
+
+%inherit GCC_3.0 GLIBC_2.0
+GLIBC_2.0 {
+ # Sampling of DImode arithmetic used by (at least) i386 and m68k.
+ __divdi3
+ __moddi3
+ __udivdi3
+ __umoddi3
+
+ # Exception handling support functions used by most everyone.
+ __register_frame
+ __register_frame_table
+ __deregister_frame
+ __register_frame_info
+ __deregister_frame_info
+ __frame_state_for
+ __register_frame_info_table
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/libgloss.h b/gcc-4.2.1-5666.3/gcc/config/libgloss.h
new file mode 100644
index 000000000..82de626c3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/libgloss.h
@@ -0,0 +1,38 @@
+/* libgloss.h -- operating system specific defines to be used when
+ targeting GCC for Libgloss supported targets.
+ Copyright (C) 1996, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This file should not be used for ELF targets, as this definition of
+ STARTFILE_SPEC is all wrong. */
+
+/* The libgloss standard for crt0.s has the name based on the command line
+ option. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:%{pg:pgcrt0%O%s}%{!pg:%{p:pcrt0%O%s}%{!p:crt0%O%s}}}"
+
+/* This file used to force LINK_SPEC to be the null string, but that is not
+ correct. LINK_SPEC is used to pass machine specific arguments to the
+ linker and hence cannot be redefined here. LINK_SPEC is never used to
+ specify startup files or libraries, so it should never conflict with
+ libgloss. */
+
+/* Don't set the target flags, this is done by the linker script */
+#undef LIB_SPEC
+#define LIB_SPEC ""
diff --git a/gcc-4.2.1-5666.3/gcc/config/linux.h b/gcc-4.2.1-5666.3/gcc/config/linux.h
new file mode 100644
index 000000000..59e3e8538
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/linux.h
@@ -0,0 +1,129 @@
+/* Definitions for Linux-based GNU systems with ELF format
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Contributed by Eric Youngdale.
+ Modified for stabs-in-ELF by H.J. Lu (hjl@lucon.org).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#if defined HAVE_LD_PIE
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \
+ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#else
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p|profile:gcrt1.o%s;:crt1.o%s}} \
+ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#endif
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+
+/* This is for -profile to use -lc_p instead of -lc. */
+#ifndef CC1_SPEC
+#define CC1_SPEC "%{profile:-p}"
+#endif
+
+/* The GNU C++ standard library requires that these macros be defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{pthread:-lpthread} \
+ %{shared:-lc} \
+ %{!shared:%{mieee-fp:-lieee} %{profile:-lc_p}%{!profile:-lc}}"
+
+#define LINUX_TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__gnu_linux__"); \
+ builtin_define_std ("linux"); \
+ builtin_define_std ("unix"); \
+ builtin_assert ("system=linux"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ } while (0)
+
+#if defined(HAVE_LD_EH_FRAME_HDR)
+#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#endif
+
+/* Define this so we can compile MS code for use with WINE. */
+#define HANDLE_PRAGMA_PACK_PUSH_POP
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
+
+/* Determine which dynamic linker to use depending on whether GLIBC or
+ uClibc is the default C library and whether -muclibc or -mglibc has
+ been passed to change the default. */
+#if UCLIBC_DEFAULT
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:%{muclibc:%e-mglibc and -muclibc used together}" G ";:" U "}"
+#else
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:%{mglibc:%e-mglibc and -muclibc used together}" U ";:" G "}"
+#endif
+
+/* For most targets the following definitions suffice;
+ GLIBC_DYNAMIC_LINKER must be defined for each target using them, or
+ GLIBC_DYNAMIC_LINKER32 and GLIBC_DYNAMIC_LINKER64 for targets
+ supporting both 32-bit and 64-bit compilation. */
+#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0"
+#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0"
+#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0"
+#define LINUX_DYNAMIC_LINKER \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER)
+#define LINUX_DYNAMIC_LINKER32 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER32, UCLIBC_DYNAMIC_LINKER32)
+#define LINUX_DYNAMIC_LINKER64 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER64, UCLIBC_DYNAMIC_LINKER64)
+
+/* Determine whether the entire c99 runtime
+ is present in the runtime library. */
+#define TARGET_C99_FUNCTIONS (OPTION_GLIBC)
+
+#define TARGET_POSIX_IO
diff --git a/gcc-4.2.1-5666.3/gcc/config/linux.opt b/gcc-4.2.1-5666.3/gcc/config/linux.opt
new file mode 100644
index 000000000..3f615bb51
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/linux.opt
@@ -0,0 +1,29 @@
+; Processor-independent options for GNU/Linux.
+;
+; Copyright (C) 2006 Free Software Foundation, Inc.
+; Contributed by CodeSourcery.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+muclibc
+Target RejectNegative Report Mask(UCLIBC) Var(linux_uclibc) Init(UCLIBC_DEFAULT ? OPTION_MASK_UCLIBC : 0)
+Use uClibc instead of GNU libc
+
+mglibc
+Target RejectNegative Report InverseMask(UCLIBC, GLIBC) Var(linux_uclibc) VarExists
+Use GNU libc instead of uClibc
diff --git a/gcc-4.2.1-5666.3/gcc/config/lynx.h b/gcc-4.2.1-5666.3/gcc/config/lynx.h
new file mode 100644
index 000000000..271fb626d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/lynx.h
@@ -0,0 +1,178 @@
+/* Target independent definitions for LynxOS.
+ Copyright (C) 1993, 1994, 1995, 1996, 1999, 2000, 2002, 2003, 2004
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* In this file we set up defaults that can be chosen by
+ <target>/lynx.h files. A target-specific lynx.h file can decide
+ either to define and override these definitions or to use them by
+ ensuring they are undefined at this point. If we were to #undef
+ them here we might accidentally disable some target-specific
+ defines. */
+
+#ifndef EXTRA_OS_LYNX_TARGET_SPECS
+# define EXTRA_OS_LYNX_TARGET_SPECS
+#endif
+
+#ifndef EXTRA_OS_LYNX_SPECS
+# define EXTRA_OS_LYNX_SPECS \
+ { "cpp_os_lynx", CPP_OS_LYNX_SPEC }, \
+ { "lib_os_lynx", LIB_OS_LYNX_SPEC }, \
+ { "link_os_lynx", LINK_OS_LYNX_SPEC }, \
+ { "startfile_os_lynx", STARTFILE_OS_LYNX_SPEC }, \
+ { "endfile_os_lynx", ENDFILE_OS_LYNX_SPEC }, \
+ EXTRA_OS_LYNX_TARGET_SPECS
+#endif
+
+#ifndef SUBTARGET_EXTRA_SPECS
+# define SUBTARGET_EXTRA_SPECS EXTRA_OS_LYNX_SPECS
+#endif
+
+#ifndef CPP_SPEC
+# define CPP_SPEC "%(cpp_cpu) %(cpp_os_lynx)"
+#endif
+
+#ifndef LIB_SPEC
+# define LIB_SPEC "%(lib_os_lynx)"
+#endif
+
+#ifndef LINK_SPEC
+# define LINK_SPEC "%(link_os_lynx)"
+#endif
+
+#ifndef STARTFILE_SPEC
+# define STARTFILE_SPEC "%(startfile_os_lynx)"
+#endif
+
+#ifndef ENDFILE_SPEC
+# define ENDFILE_SPEC "%(endfile_os_lynx)"
+#endif
+
+#ifndef CPP_OS_LYNX_SPEC
+# define CPP_OS_LYNX_SPEC \
+"%{mthreads: \
+ %{mlegacy-threads: \
+ %ecannot use mthreads and mlegacy-threads together}} \
+ %{mthreads: -D_MULTITHREADED} \
+ %{mlegacy-threads: -D_THREADS_POSIX4ad4} \
+ -Asystem=lynx -Asystem=unix -D__Lynx__ -D__unix__"
+#endif
+
+#ifndef LIB_OS_LYNX_SPEC
+# define LIB_OS_LYNX_SPEC \
+"%{mlegacy-threads:-lposix-pre1c} -lm -lc"
+#endif
+
+/* We link static executables for LynxOS by default unless -mshared is
+ used when linking an executable. Along the same line, we link to
+ shared libraries when linking a shared object by default unless
+ -static is used.
+
+ We have to pass in our -L options here otherwise the translated
+ startfile directories (%D) will take priority over this.
+ Furthermore since we have to pass in -L options here we have to
+ make sure that -L options provided by the user take priority over
+ everything we specify. */
+
+#ifndef LINK_OS_LYNX_SPEC
+# define LINK_OS_LYNX_SPEC \
+"%{shared} %{static} \
+ %{mshared: %{static: %ecannot use mshared and static together}} \
+ %{!mshared: %{!shared: %{!static: -static}}} \
+ %{L*} \
+ %{mthreads: \
+ %{mshared: -L/lib/thread/shlib -rpath /lib/thread/shlib} \
+ %{shared: \
+ %{!static: -L/lib/thread/shlib -rpath /lib/thread/shlib} \
+ %{!mshared: -L/lib/thread}} \
+ %{shared: %{static: -L/lib/thread}}} \
+ %{!mthreads: \
+ %{mshared: -L/lib/shlib -rpath /lib/shlib} \
+ %{shared: -L/lib/shlib -rpath /lib/shlib}} \
+ %{mlegacy-threads:-lposix-pre1c} -lm -lc"
+#endif
+
+#ifndef STARTFILE_OS_LYNX_SPEC
+# define STARTFILE_OS_LYNX_SPEC \
+"%{!shared: \
+ %{!mthreads: \
+ %{p:gcrt1.o%s} %{pg:gcrt1.o%s} \
+ %{!p:%{!pg:crt1.o%s}}} \
+ %{mthreads: \
+ %{p:thread/gcrt1.o%s} %{pg:thread/gcrt1.o%s} \
+ %{!p:%{!pg:thread/crt1.o%s }}}}\
+ %{mthreads: thread/crti.o%s} %{!mthreads: crti.o%s} \
+ %{!shared: crtbegin.o%s} \
+ %{shared: crtbeginS.o%s}"
+#endif
+
+#ifndef ENDFILE_OS_LYNX_SPEC
+# define ENDFILE_OS_LYNX_SPEC \
+"%{!shared: crtend.o%s} \
+ %{shared: crtendS.o%s} \
+ %{mthreads: thread/crtn.o%s} %{!mthreads: crtn.o%s}"
+#endif
+
+/* Define the actual types of some ANSI-mandated types. */
+
+#ifndef SIZE_TYPE
+# define SIZE_TYPE "unsigned int"
+#endif
+
+#ifndef PTRDIFF_TYPE
+# define PTRDIFF_TYPE "int"
+#endif
+
+#ifndef WCHAR_TYPE
+# define WCHAR_TYPE "long int"
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+# define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+
+/* Define ASM_OUTPUT_ALIGN to use the .balign directive rather that
+ the .align directive with GAS. */
+
+#ifndef ASM_OUTPUT_ALIGN
+# define ASM_OUTPUT_ALIGN(FILE, LOG) \
+ do \
+ { \
+ if ((LOG) != 0) \
+ fprintf ((FILE), "\t.balign %d\n", 1 << (LOG)); \
+ } \
+ while (0)
+#endif
+
+/* Keep the *_DEBUGGING_INFO defines from elfos.h except that stabs is
+ the default on LynxOS. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+# define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+#endif
+
+/* We have C++ support in our system headers. */
+
+#ifndef NO_IMPLICIT_EXTERN_C
+# define NO_IMPLICIT_EXTERN_C
+#endif
+
+#ifndef TARGET_POSIX_IO
+# define TARGET_POSIX_IO
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/lynx.opt b/gcc-4.2.1-5666.3/gcc/config/lynx.opt
new file mode 100644
index 000000000..d576ef84f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/lynx.opt
@@ -0,0 +1,32 @@
+; Processor-independent options for LynxOS.
+
+; Copyright (C) 2005 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mlegacy-threads
+Target RejectNegative
+Support legacy multi-threading
+
+mshared
+Target RejectNegative
+Use shared libraries
+
+mthreads
+Target RejectNegative
+Support multi-threading
diff --git a/gcc-4.2.1-5666.3/gcc/config/memcmp.c b/gcc-4.2.1-5666.3/gcc/config/memcmp.c
new file mode 100644
index 000000000..2348afe1d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/memcmp.c
@@ -0,0 +1,16 @@
+/* Public domain. */
+#include <stddef.h>
+
+int
+memcmp (const void *str1, const void *str2, size_t count)
+{
+ const unsigned char *s1 = str1;
+ const unsigned char *s2 = str2;
+
+ while (count-- > 0)
+ {
+ if (*s1++ != *s2++)
+ return s1[-1] < s2[-1] ? -1 : 1;
+ }
+ return 0;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/memcpy.c b/gcc-4.2.1-5666.3/gcc/config/memcpy.c
new file mode 100644
index 000000000..58b1e4056
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/memcpy.c
@@ -0,0 +1,12 @@
+/* Public domain. */
+#include <stddef.h>
+
+void *
+memcpy (void *dest, const void *src, size_t len)
+{
+ char *d = dest;
+ const char *s = src;
+ while (len--)
+ *d++ = *s++;
+ return dest;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/memmove.c b/gcc-4.2.1-5666.3/gcc/config/memmove.c
new file mode 100644
index 000000000..13b340af6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/memmove.c
@@ -0,0 +1,20 @@
+/* Public domain. */
+#include <stddef.h>
+
+void *
+memmove (void *dest, const void *src, size_t len)
+{
+ char *d = dest;
+ const char *s = src;
+ if (d < s)
+ while (len--)
+ *d++ = *s++;
+ else
+ {
+ char *lasts = s + (len-1);
+ char *lastd = d + (len-1);
+ while (len--)
+ *lastd-- = *lasts--;
+ }
+ return dest;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/memset.c b/gcc-4.2.1-5666.3/gcc/config/memset.c
new file mode 100644
index 000000000..3e7025ee3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/memset.c
@@ -0,0 +1,11 @@
+/* Public domain. */
+#include <stddef.h>
+
+void *
+memset (void *dest, int val, size_t len)
+{
+ unsigned char *ptr = dest;
+ while (len-- > 0)
+ *ptr++ = val;
+ return dest;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/netbsd-aout.h b/gcc-4.2.1-5666.3/gcc/config/netbsd-aout.h
new file mode 100644
index 000000000..fe5b3ec5f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/netbsd-aout.h
@@ -0,0 +1,206 @@
+/* Common configuration file for NetBSD a.out targets.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by Wasabi Systems, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* TARGET_OS_CPP_BUILTINS() common to all NetBSD a.out targets. */
+#define NETBSD_OS_CPP_BUILTINS_AOUT() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_COMMON(); \
+ } \
+ while (0)
+
+/* This defines which switch letters take arguments. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ || (CHAR) == 'R')
+
+
+/* Provide an ASM_SPEC appropriate for NetBSD. Currently we only deal
+ with the options for generating PIC code. */
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
+
+#define AS_NEEDS_DASH_FOR_PIPED_INPUT
+
+
+/* Provide a STARTFILE_SPEC appropriate for NetBSD a.out. Here we
+ provide support for the special GCC option -static. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt0%O%s} \
+ %{!pg: \
+ %{p:mcrt0%O%s} \
+ %{!p: \
+ %{!static:crt0%O%s} \
+ %{static:scrt0%O%s}}}}"
+
+/* Provide a LINK_SPEC appropriate for NetBSD a.out. Here we provide
+ support for the special GCC options -static, -assert, and -nostdlib. */
+
+#undef NETBSD_LINK_SPEC_AOUT
+#define NETBSD_LINK_SPEC_AOUT \
+ "%{nostdlib:-nostdlib} \
+ %{!shared: \
+ %{!nostdlib: \
+ %{!r*: \
+ %{!e*:-e start}}} \
+ -dc -dp \
+ %{static:-Bstatic}} \
+ %{shared:-Bshareable} \
+ %{R*} \
+ %{assert*}"
+
+/* Default LINK_SPEC. */
+#undef LINK_SPEC
+#define LINK_SPEC NETBSD_LINK_SPEC_AOUT
+
+/* Some imports from svr4.h in support of shared libraries. */
+
+/* Define the strings used for the .type, .size, and .set directives.
+ These strings generally do not vary from one system running NetBSD
+ to another, but if a given system needs to use different pseudo-op
+ names for these, they may be overridden in the file included after
+ this one. */
+
+#undef TYPE_ASM_OP
+#undef SIZE_ASM_OP
+#undef SET_ASM_OP
+#define TYPE_ASM_OP "\t.type\t"
+#define SIZE_ASM_OP "\t.size\t"
+#define SET_ASM_OP "\t.set\t"
+
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#undef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do \
+ { \
+ fputs ("\t.globl\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms of this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending on the particulars of your assembler). */
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "@%s"
+
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4 (and a.out on NetBSD).
+ These macros also output the starting labels for the relevant
+ functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ HOST_WIDE_INT size; \
+ \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive \
+ && (DECL) && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \
+ } \
+ \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do \
+ { \
+ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ HOST_WIDE_INT size; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
+ } \
+ } \
+ while (0)
+
+
+/* This is how to declare the size of a function. */
+
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } \
+ while (0)
diff --git a/gcc-4.2.1-5666.3/gcc/config/netbsd-elf.h b/gcc-4.2.1-5666.3/gcc/config/netbsd-elf.h
new file mode 100644
index 000000000..9a56de23f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/netbsd-elf.h
@@ -0,0 +1,94 @@
+/* Common configuration file for NetBSD ELF targets.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by Wasabi Systems, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* TARGET_OS_CPP_BUILTINS() common to all NetBSD ELF targets. */
+#define NETBSD_OS_CPP_BUILTINS_ELF() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_COMMON(); \
+ } \
+ while (0)
+
+/* This defines which switch letters take arguments. On NetBSD, most
+ of the normal cases (defined by gcc.c) apply, and we also have -h*
+ and -z* options (for the linker) (coming from SVR4). */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z' \
+ || (CHAR) == 'R')
+
+
+/* Provide a STARTFILE_SPEC appropriate for NetBSD ELF. Here we
+ provide support for the special GCC option -static. On ELF
+ targets, we also add the crtbegin.o file, which provides part
+ of the support for getting C++ file-scope static objects
+ constructed before entering "main". */
+
+#define NETBSD_STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt0%O%s} \
+ %{!pg: \
+ %{p:gcrt0%O%s} \
+ %{!p:crt0%O%s}}} \
+ %:if-exists(crti%O%s) \
+ %{static:%:if-exists-else(crtbeginT%O%s crtbegin%O%s)} \
+ %{!static: \
+ %{!shared:crtbegin%O%s} %{shared:crtbeginS%O%s}}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC NETBSD_STARTFILE_SPEC
+
+
+/* Provide an ENDFILE_SPEC appropriate for NetBSD ELF. Here we
+ add crtend.o, which provides part of the support for getting
+ C++ file-scope static objects deconstructed after exiting "main". */
+
+#define NETBSD_ENDFILE_SPEC \
+ "%{!shared:crtend%O%s} %{shared:crtendS%O%s} \
+ %:if-exists(crtn%O%s)"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC NETBSD_ENDFILE_SPEC
+
+/* Provide a LINK_SPEC appropriate for NetBSD ELF. Here we provide
+ support for the special GCC options -assert, -R, -rpath, -shared,
+ -nostdlib, -static, -rdynamic, and -dynamic-linker.
+
+ Target-specific code can use this in conjunction with any other
+ target-specific LINK_SPEC options.
+
+ Target-specific code must provide the %(netbsd_entry_point) spec. */
+
+#define NETBSD_LINK_SPEC_ELF \
+ "%{assert*} %{R*} %{rpath*} \
+ %{shared:-shared} \
+ %{!shared: \
+ -dc -dp \
+ %{!nostdlib: \
+ %{!r*: \
+ %{!e*:-e %(netbsd_entry_point)}}} \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.elf_so}} \
+ %{static:-static}}"
diff --git a/gcc-4.2.1-5666.3/gcc/config/netbsd.h b/gcc-4.2.1-5666.3/gcc/config/netbsd.h
new file mode 100644
index 000000000..5386b6565
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/netbsd.h
@@ -0,0 +1,227 @@
+/* Base configuration file for all NetBSD targets.
+ Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* TARGET_OS_CPP_BUILTINS() common to all NetBSD targets. */
+#define NETBSD_OS_CPP_BUILTINS_COMMON() \
+ do \
+ { \
+ builtin_define ("__NetBSD__"); \
+ builtin_define ("__unix__"); \
+ builtin_assert ("system=bsd"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=NetBSD"); \
+ } \
+ while (0)
+
+/* CPP_SPEC parts common to all NetBSD targets. */
+#define NETBSD_CPP_SPEC \
+ "%{posix:-D_POSIX_SOURCE} \
+ %{pthread:-D_REENTRANT -D_PTHREADS}"
+
+/* NETBSD_NATIVE is defined when gcc is integrated into the NetBSD
+ source tree so it can be configured appropriately without using
+ the GNU configure/build mechanism. */
+
+#ifdef NETBSD_NATIVE
+
+/* Look for the include files in the system-defined places. */
+
+#undef GPLUSPLUS_INCLUDE_DIR
+#define GPLUSPLUS_INCLUDE_DIR "/usr/include/g++"
+
+#undef GCC_INCLUDE_DIR
+#define GCC_INCLUDE_DIR "/usr/include"
+
+#undef INCLUDE_DEFAULTS
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
+ { 0, 0, 0, 0 } \
+ }
+
+/* Under NetBSD, the normal location of the compiler back ends is the
+ /usr/libexec directory. */
+
+#undef STANDARD_EXEC_PREFIX
+#define STANDARD_EXEC_PREFIX "/usr/libexec/"
+
+/* Under NetBSD, the normal location of the various *crt*.o files is the
+ /usr/lib directory. */
+
+#undef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/lib/"
+
+#endif /* NETBSD_NATIVE */
+
+
+/* Provide a LIB_SPEC appropriate for NetBSD. Here we:
+
+ 1. Select the appropriate set of libs, depending on whether we're
+ profiling.
+
+ 2. Include the pthread library if -pthread is specified (only
+ if threads are enabled).
+
+ 3. Include the posix library if -posix is specified.
+
+ FIXME: Could eliminate the duplication here if we were allowed to
+ use string concatenation. */
+
+#ifdef NETBSD_ENABLE_PTHREADS
+#define NETBSD_LIB_SPEC \
+ "%{pthread: \
+ %{!p: \
+ %{!pg:-lpthread}} \
+ %{p:-lpthread_p} \
+ %{pg:-lpthread_p}} \
+ %{posix: \
+ %{!p: \
+ %{!pg:-lposix}} \
+ %{p:-lposix_p} \
+ %{pg:-lposix_p}} \
+ %{!shared: \
+ %{!symbolic: \
+ %{!p: \
+ %{!pg:-lc}} \
+ %{p:-lc_p} \
+ %{pg:-lc_p}}}"
+#else
+#define NETBSD_LIB_SPEC \
+ "%{posix: \
+ %{!p: \
+ %{!pg:-lposix}} \
+ %{p:-lposix_p} \
+ %{pg:-lposix_p}} \
+ %{!shared: \
+ %{!symbolic: \
+ %{!p: \
+ %{!pg:-lc}} \
+ %{p:-lc_p} \
+ %{pg:-lc_p}}}"
+#endif
+
+#undef LIB_SPEC
+#define LIB_SPEC NETBSD_LIB_SPEC
+
+/* Provide a LIBGCC_SPEC appropriate for NetBSD. We also want to exclude
+ libgcc with -symbolic. */
+
+#ifdef NETBSD_NATIVE
+#define NETBSD_LIBGCC_SPEC \
+ "%{!symbolic: \
+ %{!shared: \
+ %{!p: \
+ %{!pg: -lgcc}}} \
+ %{shared: -lgcc_pic} \
+ %{p: -lgcc_p} \
+ %{pg: -lgcc_p}}"
+#else
+#define NETBSD_LIBGCC_SPEC "%{!shared:%{!symbolic: -lgcc}}"
+#endif
+
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC NETBSD_LIBGCC_SPEC
+
+/* When building shared libraries, the initialization and finalization
+ functions for the library are .init and .fini respectively. */
+
+#define COLLECT_SHARED_INIT_FUNC(STREAM,FUNC) \
+ do { \
+ fprintf ((STREAM), "void __init() __asm__ (\".init\");"); \
+ fprintf ((STREAM), "void __init() {\n\t%s();\n}\n", (FUNC)); \
+ } while (0)
+
+#define COLLECT_SHARED_FINI_FUNC(STREAM,FUNC) \
+ do { \
+ fprintf ((STREAM), "void __fini() __asm__ (\".fini\");"); \
+ fprintf ((STREAM), "void __fini() {\n\t%s();\n}\n", (FUNC)); \
+ } while (0)
+
+#undef TARGET_POSIX_IO
+#define TARGET_POSIX_IO
+
+/* Handle #pragma weak and #pragma pack. */
+
+#define HANDLE_SYSV_PRAGMA 1
+
+/* Don't assume anything about the header files. */
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C 1
+
+/* Define some types that are the same on all NetBSD platforms,
+ making them agree with <machine/ansi.h>. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef WINT_TYPE
+#define WINT_TYPE "int"
+
+
+/* Attempt to turn on execute permission for the stack. This may be
+ used by INITIALIZE_TRAMPOLINE of the target needs it (that is,
+ if the target machine can change execute permissions on a page).
+
+ There is no way to query the execute permission of the stack, so
+ we always issue the mprotect() call.
+
+ Note that we go out of our way to use namespace-non-invasive calls
+ here. Unfortunately, there is no libc-internal name for mprotect().
+
+ Also note that no errors should be emitted by this code; it is considered
+ dangerous for library calls to send messages to stdout/stderr. */
+
+#define NETBSD_ENABLE_EXECUTE_STACK \
+extern void __enable_execute_stack (void *); \
+void \
+__enable_execute_stack (void *addr) \
+{ \
+ extern int mprotect (void *, size_t, int); \
+ extern int __sysctl (int *, unsigned int, void *, size_t *, \
+ void *, size_t); \
+ \
+ static int size; \
+ static long mask; \
+ \
+ char *page, *end; \
+ \
+ if (size == 0) \
+ { \
+ int mib[2]; \
+ size_t len; \
+ \
+ mib[0] = 6; /* CTL_HW */ \
+ mib[1] = 7; /* HW_PAGESIZE */ \
+ len = sizeof (size); \
+ (void) __sysctl (mib, 2, &size, &len, NULL, 0); \
+ mask = ~((long) size - 1); \
+ } \
+ \
+ page = (char *) (((long) addr) & mask); \
+ end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
+ \
+ /* 7 == PROT_READ | PROT_WRITE | PROT_EXEC */ \
+ (void) mprotect (page, end - page, 7); \
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/openbsd-oldgas.h b/gcc-4.2.1-5666.3/gcc/config/openbsd-oldgas.h
new file mode 100644
index 000000000..5ca617b9e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/openbsd-oldgas.h
@@ -0,0 +1,23 @@
+/* Generic settings for a.out OpenBSD systems.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by David E. O'Brien <obrien@FreeBSD.org>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+
+#define OBSD_OLD_GAS
diff --git a/gcc-4.2.1-5666.3/gcc/config/openbsd.h b/gcc-4.2.1-5666.3/gcc/config/openbsd.h
new file mode 100644
index 000000000..68eb94877
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/openbsd.h
@@ -0,0 +1,311 @@
+/* Base configuration file for all OpenBSD targets.
+ Copyright (C) 1999, 2000, 2004, 2005 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Common OpenBSD configuration.
+ All OpenBSD architectures include this file, which is intended as
+ a repository for common defines.
+
+ Some defines are common to all architectures, a few of them are
+ triggered by OBSD_* guards, so that we won't override architecture
+ defaults by mistakes.
+
+ OBSD_HAS_CORRECT_SPECS:
+ another mechanism provides correct specs already.
+ OBSD_NO_DYNAMIC_LIBRARIES:
+ no implementation of dynamic libraries.
+ OBSD_OLD_GAS:
+ older flavor of gas which needs help for PIC.
+ OBSD_HAS_DECLARE_FUNCTION_NAME, OBSD_HAS_DECLARE_FUNCTION_SIZE,
+ OBSD_HAS_DECLARE_OBJECT:
+ PIC support, FUNCTION_NAME/FUNCTION_SIZE are independent, whereas
+ the corresponding logic for OBJECTS is necessarily coupled.
+
+ There are also a few `default' defines such as ASM_WEAKEN_LABEL,
+ intended as common ground for arch that don't provide
+ anything suitable. */
+
+/* OPENBSD_NATIVE is defined only when gcc is configured as part of
+ the OpenBSD source tree, specifically through Makefile.bsd-wrapper.
+
+ In such a case the include path can be trimmed as there is no
+ distinction between system includes and gcc includes. */
+
+/* This configuration method, namely Makefile.bsd-wrapper and
+ OPENBSD_NATIVE is NOT recommended for building cross-compilers. */
+
+#ifdef OPENBSD_NATIVE
+
+/* The compiler is configured with ONLY the gcc/g++ standard headers. */
+#undef INCLUDE_DEFAULTS
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
+ { GPLUSPLUS_TOOL_INCLUDE_DIR, "G++", 1, 1 }, \
+ { GPLUSPLUS_BACKWARD_INCLUDE_DIR, "G++", 1, 1 }, \
+ { STANDARD_INCLUDE_DIR, STANDARD_INCLUDE_COMPONENT, 0, 0 }, \
+ { 0, 0, 0, 0 } \
+ }
+
+/* Under OpenBSD, the normal location of the various *crt*.o files is the
+ /usr/lib directory. */
+#undef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/local/lib/"
+
+#endif
+
+
+/* Controlling the compilation driver. */
+/* TARGET_OS_CPP_BUILTINS() common to all OpenBSD targets. */
+#define OPENBSD_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__OpenBSD__"); \
+ builtin_define ("__unix__"); \
+ builtin_define ("__ANSI_COMPAT"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=bsd"); \
+ builtin_assert ("system=OpenBSD"); \
+ } \
+ while (0)
+
+/* CPP_SPEC appropriate for OpenBSD. We deal with -posix and -pthread.
+ XXX the way threads are handled currently is not very satisfying,
+ since all code must be compiled with -pthread to work.
+ This two-stage defines makes it easy to pick that for targets that
+ have subspecs. */
+#ifdef CPP_CPU_SPEC
+#define OBSD_CPP_SPEC "%(cpp_cpu) %{posix:-D_POSIX_SOURCE} %{pthread:-D_POSIX_THREADS}"
+#else
+#define OBSD_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_POSIX_THREADS}"
+#endif
+
+/* LIB_SPEC appropriate for OpenBSD. */
+#ifdef HAS_LIBC_R
+/* -lc(_r)?(_p)?, select _r for threads, and _p for p or pg. */
+# define OBSD_LIB_SPEC "%{!shared:-lc%{pthread:_r}%{p:_p}%{!p:%{pg:_p}}}"
+#else
+/* Include -lpthread if -pthread is specified on the command line. */
+# define OBSD_LIB_SPEC "%{!shared:%{pthread:-lpthread%{p:_p}%{!p:%{pg:_p}}}} %{!shared:-lc%{p:_p}%{!p:%{pg:_p}}}"
+#endif
+
+
+#ifndef OBSD_HAS_CORRECT_SPECS
+
+#ifndef OBSD_NO_DYNAMIC_LIBRARIES
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \
+ || (CHAR) == 'R')
+#endif
+
+#undef CPP_SPEC
+#define CPP_SPEC OBSD_CPP_SPEC
+
+#ifdef OBSD_OLD_GAS
+/* ASM_SPEC appropriate for OpenBSD. For some architectures, OpenBSD
+ still uses a special flavor of gas that needs to be told when generating
+ pic code. */
+#undef ASM_SPEC
+#define ASM_SPEC "%{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
+#endif
+
+/* Since we use gas, stdin -> - is a good idea. */
+#define AS_NEEDS_DASH_FOR_PIPED_INPUT
+
+/* LINK_SPEC appropriate for OpenBSD. Support for GCC options
+ -static, -assert, and -nostdlib. */
+#undef LINK_SPEC
+#ifdef OBSD_NO_DYNAMIC_LIBRARIES
+#define LINK_SPEC \
+ "%{g:%{!nostdlib:-L/usr/lib/debug}} %{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{assert*}"
+#else
+#define LINK_SPEC \
+ "%{g:%{!nostdlib:-L/usr/lib/debug}} %{!shared:%{!nostdlib:%{!r*:%{!e*:-e start}}}} %{shared:-Bshareable -x} -dc -dp %{R*} %{static:-Bstatic} %{assert*}"
+#endif
+
+#undef LIB_SPEC
+#define LIB_SPEC OBSD_LIB_SPEC
+#endif
+
+
+/* Runtime target specification. */
+
+/* Miscellaneous parameters. */
+
+/* Controlling debugging info: dbx options. */
+
+/* Don't use the `xsTAG;' construct in DBX output; OpenBSD systems that
+ use DBX don't support it. */
+#define DBX_NO_XREFS
+
+
+/* Support of shared libraries, mostly imported from svr4.h through netbsd. */
+/* Two differences from svr4.h:
+ - we use . - _func instead of a local label,
+ - we put extra spaces in expressions such as
+ .type _func , @function
+ This is more readable for a human being and confuses c++filt less. */
+
+/* Assembler format: output and generation of labels. */
+
+/* Define the strings used for the .type and .size directives.
+ These strings generally do not vary from one system running OpenBSD
+ to another, but if a given system needs to use different pseudo-op
+ names for these, they may be overridden in the arch specific file. */
+
+/* OpenBSD assembler is hacked to have .type & .size support even in a.out
+ format object files. Functions size are supported but not activated
+ yet (look for GRACE_PERIOD_EXPIRED in gas/config/obj-aout.c).
+ SET_ASM_OP is needed for attribute alias to work. */
+
+#undef TYPE_ASM_OP
+#undef SIZE_ASM_OP
+#undef SET_ASM_OP
+#undef GLOBAL_ASM_OP
+
+#define TYPE_ASM_OP "\t.type\t"
+#define SIZE_ASM_OP "\t.size\t"
+#define SET_ASM_OP "\t.set\t"
+#define GLOBAL_ASM_OP "\t.globl\t"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Provision if extra assembler code is needed to declare a function's result
+ (taken from svr4, not needed yet actually). */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries under OpenBSD. These macros also have to output the starting
+ labels for the relevant functions/objects. */
+
+#ifndef OBSD_HAS_DECLARE_FUNCTION_NAME
+/* Extra assembler code needed to declare a function properly.
+ Some assemblers may also need to also have something extra said
+ about the function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+#endif
+
+#ifndef OBSD_HAS_DECLARE_FUNCTION_SIZE
+/* Declare the size of a function. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } while (0)
+#endif
+
+#ifndef OBSD_HAS_DECLARE_OBJECT
+/* Extra assembler code needed to declare an object properly. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ HOST_WIDE_INT size; \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive \
+ && (DECL) && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \
+ } \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set by ASM_DECLARE_OBJECT_NAME
+ when it was run for the same decl. */
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ HOST_WIDE_INT size; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
+ } \
+ } while (0)
+#endif
+
+
+/* Those are `generic' ways to weaken/globalize a label. We shouldn't need
+ to override a processor specific definition. Hence, #ifndef ASM_*
+ In case overriding turns out to be needed, one can always #undef ASM_*
+ before including this file. */
+
+/* Tell the assembler that a symbol is weak. */
+/* Note: netbsd arm32 assembler needs a .globl here. An override may
+ be needed when/if we go for arm32 support. */
+#ifndef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+#endif
+
+/* Storage layout. */
+
+
+/* Otherwise, since we support weak, gthr.h erroneously tries to use
+ #pragma weak. */
+#define GTHREAD_USE_WEAK 0
+
+/* bug work around: we don't want to support #pragma weak, but the current
+ code layout needs HANDLE_PRAGMA_WEAK asserted for __attribute((weak)) to
+ work. On the other hand, we don't define HANDLE_PRAGMA_WEAK directly,
+ as this depends on a few other details as well... */
+#define HANDLE_SYSV_PRAGMA 1
+
+/* Stack is explicitly denied execution rights on OpenBSD platforms. */
+#define ENABLE_EXECUTE_STACK \
+extern void __enable_execute_stack (void *); \
+void \
+__enable_execute_stack (void *addr) \
+{ \
+ long size = getpagesize (); \
+ long mask = ~(size-1); \
+ char *page = (char *) (((long) addr) & mask); \
+ char *end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
+ \
+ if (mprotect (page, end - page, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) \
+ perror ("mprotect of trampoline code"); \
+}
+
+#include <sys/types.h>
+#include <sys/mman.h>
diff --git a/gcc-4.2.1-5666.3/gcc/config/ptx4.h b/gcc-4.2.1-5666.3/gcc/config/ptx4.h
new file mode 100644
index 000000000..8c231027d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/ptx4.h
@@ -0,0 +1,223 @@
+/* Operating system specific defines to be used when targeting GCC for
+ Sequent's Dynix/ptx v4 and later.
+ Copyright 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004
+ Free Software Foundation, Inc.
+ Generic SysV4 file Contributed by Ron Guilmette (rfg@monkeys.com).
+ Renamed and changed to suit Dynix/ptx v4 and later.
+ Modified by Tim Wright (timw@sequent.com).
+ Modified by Janis Johnson (janis@us.ibm.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.
+
+*/
+
+/* Define a symbol indicating that we are using svr4.h. */
+#define USING_SVR4_H
+
+/* Use DWARF 2 debugging info by default. */
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#define DWARF2_DEBUGGING_INFO 1
+
+/* Cpp, assembler, linker, library, and startfile spec's. */
+
+/* This defines which switch letters take arguments. On svr4, most of
+ the normal cases (defined in gcc.c) apply, and we also have -h* and
+ -z* options (for the linker). Note however that there is no such
+ thing as a -T option for svr4. */
+
+#define SWITCH_TAKES_ARG(CHAR) \
+ ( (CHAR) == 'D' \
+ || (CHAR) == 'U' \
+ || (CHAR) == 'o' \
+ || (CHAR) == 'e' \
+ || (CHAR) == 'u' \
+ || (CHAR) == 'I' \
+ || (CHAR) == 'm' \
+ || (CHAR) == 'L' \
+ || (CHAR) == 'A' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z')
+
+/* This defines which multi-letter switches take arguments. On svr4,
+ there are no such switches except those implemented by GCC itself. */
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \
+ && strcmp (STR, "Tbss"))
+
+/* Provide an ASM_SPEC appropriate for svr4. Here we try to support as
+ many of the specialized svr4 assembler options as seems reasonable,
+ given that there are certain options which we can't (or shouldn't)
+ support directly due to the fact that they conflict with other options
+ for other svr4 tools (e.g. ld) or with other options for GCC itself.
+ For example, we don't support the -o (output file) or -R (remove
+ input file) options because GCC already handles these things. We
+ also don't support the -m (run m4) option for the assembler because
+ that conflicts with the -m (produce load map) option of the svr4
+ linker. We do however allow passing arbitrary options to the svr4
+ assembler via the -Wa, option.
+
+ Note that gcc doesn't allow a space to follow -Y in a -Ym,* or -Yd,*
+ option.
+*/
+
+#undef ASM_SPEC
+#ifdef USE_GAS
+#define ASM_SPEC \
+ "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+#else
+#define ASM_SPEC \
+ "-no_0f_fix -no_eflags_chk %{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+#endif
+
+#define AS_NEEDS_DASH_FOR_PIPED_INPUT
+
+/* Provide a LIB_SPEC appropriate for svr4. Here we tack on the default
+ standard C library (unless we are building a shared library). */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{!symbolic:-lc}}"
+
+/* Provide a LIBGCC_SPEC appropriate for svr4. We also want to exclude
+ libgcc when -symbolic. */
+
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC "%{!shared:%{!symbolic:-lgcc}}"
+
+/* Provide an ENDFILE_SPEC appropriate for svr4. Here we tack on our own
+ magical crtend.o file (see crtstuff.c) which provides part of the
+ support for getting C++ file-scope static object constructed before
+ entering `main', followed by the normal svr3/svr4 "finalizer" file,
+ which is either `gcrtn.o' or `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o}%{!pg:crtn.o%s}"
+
+/* Provide a LINK_SPEC appropriate for svr4. Here we provide support
+ for the special GCC options -static, -shared, and -symbolic which
+ allow us to link things in one of these three modes by applying the
+ appropriate combinations of options at link-time. We also provide
+ support here for as many of the other svr4 linker options as seems
+ reasonable, given that some of them conflict with options for other
+ svr4 tools (e.g. the assembler). In particular, we do support the
+ -z*, -V, -b, -t, -Qy, -Qn, and -YP* options here, and the -e*,
+ -l*, -o*, -r, -s, -u*, and -L* options are directly supported
+ by gcc.c itself. We don't directly support the -m (generate load
+ map) option because that conflicts with the -m (run m4) option of
+ the svr4 assembler. We also don't directly support the svr4 linker's
+ -I* or -M* options because these conflict with existing GCC options.
+ We do however allow passing arbitrary options to the svr4 linker
+ via the -Wl, option. We don't support the svr4 linker's -a option
+ at all because it is totally useless and because it conflicts with
+ GCC's own -a option.
+
+ Note that gcc doesn't allow a space to follow -Y in a -YP,* option.
+
+ When the -G link option is used (-shared and -symbolic) a final link is
+ not being done. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{h*} %{v:-V} \
+ %{b} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy -z text} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{!YP,*:%{p:-Y P,/lib/libp:/usr/lib/libp:/lib:/usr/lib} \
+ %{!p:-Y P,/lib:/usr/lib}} \
+ %{Qy:} %{!Qn:-Qy}"
+
+/* Gcc automatically adds in one of the files /lib/values-Xc.o
+ or /lib/values-Xa.o, for each final link step (depending upon the other
+ gcc options selected, such as -ansi). These files each contain one
+ (initialized) copy of a special variable called `_lib_version'. Each
+ one of these files has `_lib_version' initialized to a different (enum)
+ value. The SVR4 library routines query the value of `_lib_version'
+ at run to decide how they should behave. Specifically, they decide
+ (based upon the value of `_lib_version') if they will act in a strictly
+ ANSI conforming manner or not.
+*/
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}}}\
+ %{pg:gcrti.o%s}%{!pg:crti.o%s} \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi:values-Xa.o%s} \
+ crtbegin.o%s"
+
+/* Don't use bcopy, which doesn't handle overlaps before DYNIX/ptx 4.6. */
+
+#undef HAVE_BCOPY
+
+/* The numbers used to denote specific machine registers in the System V
+ Release 4 DWARF debugging information are quite likely to be totally
+ different from the numbers used in BSD stabs debugging information
+ for the same kind of target machine. Thus, we undefine the macro
+ DBX_REGISTER_NUMBER here as an extra inducement to get people to
+ provide proper machine-specific definitions of DBX_REGISTER_NUMBER
+ (which is also used to provide DWARF registers numbers in dwarfout.c)
+ in their tm.h files which include this file. */
+
+#undef DBX_REGISTER_NUMBER
+
+/* Like block addresses, stabs line numbers are relative to the
+ current function. */
+
+#define DBX_LINES_FUNCTION_RELATIVE 1
+
+/* Generate a blank trailing N_SO to mark the end of the .o file, since
+ we can't depend upon the linker to mark .o file boundaries with
+ embedded stabs. */
+
+#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+
+/* Define the actual types of some ANSI-mandated types. (These
+ definitions should work for most SVR4 systems). */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* This says how to output assembler code to declare an uninitialized
+ external linkage data item. There's a bug in the DYNIX/ptx linker
+ (PR 254649) when the alignment for such an object is specified, so
+ ignore the ALIGN parameter. */
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%lu\n", (unsigned long)(SIZE)); \
+} while (0)
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/40x.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/40x.md
new file mode 100644
index 000000000..94b6c459a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/40x.md
@@ -0,0 +1,119 @@
+;; Scheduling description for IBM PowerPC 403 and PowerPC 405 processors.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "ppc40x,ppc40xiu")
+(define_cpu_unit "bpu_40x,fpu_405" "ppc40x")
+(define_cpu_unit "iu_40x" "ppc40xiu")
+
+;; PPC401 / PPC403 / PPC405 32-bit integer only IU BPU
+;; Embedded PowerPC controller
+;; In-order execution
+;; Max issue two insns/cycle (includes one branch)
+(define_insn_reservation "ppc403-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,iu_40x")
+
+(define_insn_reservation "ppc403-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,iu_40x,iu_40x")
+
+(define_insn_reservation "ppc403-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,nothing,bpu_40x")
+
+(define_insn_reservation "ppc403-imul" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc403"))
+ "iu_40x*4")
+
+(define_insn_reservation "ppc405-imul" 5
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc405"))
+ "iu_40x*4")
+
+(define_insn_reservation "ppc405-imul2" 3
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc405"))
+ "iu_40x*2")
+
+(define_insn_reservation "ppc405-imul3" 2
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-idiv" 33
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x*33")
+
+(define_insn_reservation "ppc403-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "bpu_40x")
+
+(define_insn_reservation "ppc403-cr" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "bpu_40x")
+
+(define_insn_reservation "ppc405-float" 11
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,\
+ fpcompare,fp,dmul,sdiv,ddiv")
+ (eq_attr "cpu" "ppc405"))
+ "fpu_405*10")
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/440.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/440.md
new file mode 100644
index 000000000..60e0f72dc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/440.md
@@ -0,0 +1,133 @@
+;; Scheduling description for IBM PowerPC 440 processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; PPC440 Embedded PowerPC controller
+;; dual issue
+;; i_pipe - complex integer / compare / branch
+;; j_pipe - simple integer arithmetic
+;; l_pipe - load-store
+;; f_pipe - floating point arithmetic
+
+(define_automaton "ppc440_core,ppc440_apu")
+(define_cpu_unit "ppc440_i_pipe,ppc440_j_pipe,ppc440_l_pipe" "ppc440_core")
+(define_cpu_unit "ppc440_f_pipe" "ppc440_apu")
+(define_cpu_unit "ppc440_issue_0,ppc440_issue_1" "ppc440_core")
+
+(define_reservation "ppc440_issue" "ppc440_issue_0|ppc440_issue_1")
+
+
+(define_insn_reservation "ppc440-load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-store" 3
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-fpload" 4
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue_0+ppc440_issue_1,\
+ ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue_0+ppc440_issue_1,ppc440_i_pipe|ppc440_j_pipe,\
+ ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-imul" 3
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-imul2" 2
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-idiv" 34
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe*33")
+
+(define_insn_reservation "ppc440-branch" 1
+ (and (eq_attr "type" "branch,jmpreg,isync")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-compare" 2
+ (and (eq_attr "type" "cmp,fast_compare,compare,cr_logical,delayed_cr,mfcr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-fpcompare" 3 ; 2
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe+ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-fp" 5
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe")
+
+(define_insn_reservation "ppc440-sdiv" 19
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe*15")
+
+(define_insn_reservation "ppc440-ddiv" 33
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe*29")
+
+(define_insn_reservation "ppc440-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/603.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/603.md
new file mode 100644
index 000000000..4721aca79
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/603.md
@@ -0,0 +1,142 @@
+;; Scheduling description for PowerPC 603 processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "ppc603,ppc603fp")
+(define_cpu_unit "iu_603" "ppc603")
+(define_cpu_unit "fpu_603" "ppc603fp")
+(define_cpu_unit "lsu_603,bpu_603,sru_603" "ppc603")
+
+;; PPC603/PPC603e 32-bit IU, LSU, FPU, BPU, SRU
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+
+;; Branches go straight to the BPU. All other insns are handled
+;; by a dispatch unit which can issue a max of 2 insns per cycle.
+
+;; The PPC603e user's manual recommends that to reduce branch mispredictions,
+;; the insn that sets CR bits should be separated from the branch insn
+;; that evaluates them; separation by more than 9 insns ensures that the CR
+;; bits will be immediately available for execution.
+;; This could be artificially achieved by exaggerating the latency of
+;; compare insns but at the expense of a poorer schedule.
+
+;; CR insns get executed in the SRU. Not modelled.
+
+(define_insn_reservation "ppc603-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ux,load_u,load_l")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
+(define_insn_reservation "ppc603-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603*2")
+
+(define_insn_reservation "ppc603-fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
+(define_insn_reservation "ppc603-storec" 8
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
+(define_insn_reservation "ppc603-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603")
+
+(define_insn_reservation "ppc603-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,iu_603")
+
+(define_insn_reservation "ppc603-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,iu_603,iu_603")
+
+; This takes 2 or 3 cycles
+(define_insn_reservation "ppc603-imul" 3
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603*2")
+
+(define_insn_reservation "ppc603-imul2" 2
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603*2")
+
+(define_insn_reservation "ppc603-idiv" 37
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603*37")
+
+(define_insn_reservation "ppc603-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,nothing,bpu_603")
+
+(define_insn_reservation "ppc603-fpcompare" 3
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc603"))
+ "(fpu_603+iu_603*2),bpu_603")
+
+(define_insn_reservation "ppc603-fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603")
+
+(define_insn_reservation "ppc603-dmul" 4
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603*2")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc603-sdiv" 18
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603*18")
+
+(define_insn_reservation "ppc603-ddiv" 33
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603*33")
+
+(define_insn_reservation "ppc603-crlogical" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr,mfcr,mtcr")
+ (eq_attr "cpu" "ppc603"))
+ "sru_603")
+
+(define_insn_reservation "ppc603-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc603"))
+ "sru_603")
+
+(define_insn_reservation "ppc603-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr,isync,sync")
+ (eq_attr "cpu" "ppc603"))
+ "sru_603")
+
+(define_insn_reservation "ppc603-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "ppc603"))
+ "bpu_603")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/6xx.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/6xx.md
new file mode 100644
index 000000000..31aa60620
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/6xx.md
@@ -0,0 +1,274 @@
+;; Scheduling description for PowerPC 604, PowerPC 604e, PowerPC 620,
+;; and PowerPC 630 processors.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "ppc6xx,ppc6xxfp,ppc6xxfp2")
+(define_cpu_unit "iu1_6xx,iu2_6xx,mciu_6xx" "ppc6xx")
+(define_cpu_unit "fpu_6xx" "ppc6xxfp")
+(define_cpu_unit "fpu1_6xx,fpu2_6xx" "ppc6xxfp2")
+(define_cpu_unit "lsu_6xx,bpu_6xx,cru_6xx" "ppc6xx")
+
+;; PPC604 32-bit 2xSCIU, MCIU, LSU, FPU, BPU
+;; PPC604e 32-bit 2xSCIU, MCIU, LSU, FPU, BPU, CRU
+;; MCIU used for imul/idiv and moves from/to spr
+;; LSU 2 stage pipelined
+;; FPU 3 stage pipelined
+;; Max issue 4 insns/clock cycle
+
+;; PPC604e is PPC604 with larger caches and a CRU. In the 604
+;; the CR logical operations are handled in the BPU.
+;; In the 604e, the CRU shares bus with BPU so only one condition
+;; register or branch insn can be issued per clock. Not modelled.
+
+;; PPC620 64-bit 2xSCIU, MCIU, LSU, FPU, BPU, CRU
+;; PPC630 64-bit 2xSCIU, MCIU, LSU, 2xFPU, BPU, CRU
+;; Max issue 4 insns/clock cycle
+;; Out-of-order execution, in-order completion
+
+;; No following instruction can dispatch in the same cycle as a branch
+;; instruction. Not modelled. This is no problem if RCSP is not
+;; enabled since the scheduler stops a schedule when it gets to a branch.
+
+;; Four insns can be dispatched per cycle.
+
+(define_insn_reservation "ppc604-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-fpload" 3
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-store" 3
+ (and (eq_attr "type" "store,fpstore,store_ux,store_u,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-llsc" 3
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc630-llsc" 4
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-imul" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc604"))
+ "mciu_6xx*2")
+
+(define_insn_reservation "ppc604e-imul" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc604e"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc620-imul" 5
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*3")
+
+(define_insn_reservation "ppc620-imul2" 4
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*3")
+
+(define_insn_reservation "ppc620-imul3" 3
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*3")
+
+(define_insn_reservation "ppc620-lmul" 7
+ (and (eq_attr "type" "lmul,lmul_compare")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*5")
+
+(define_insn_reservation "ppc604-idiv" 20
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "mciu_6xx*19")
+
+(define_insn_reservation "ppc620-idiv" 37
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc620"))
+ "mciu_6xx*36")
+
+(define_insn_reservation "ppc630-idiv" 21
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc630"))
+ "mciu_6xx*20")
+
+(define_insn_reservation "ppc620-ldiv" 37
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*36")
+
+(define_insn_reservation "ppc604-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "(iu1_6xx|iu2_6xx)")
+
+; FPU PPC604{,e},PPC620
+(define_insn_reservation "ppc604-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx")
+
+(define_insn_reservation "ppc604-fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx")
+
+(define_insn_reservation "ppc604-dmul" 3
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc604-sdiv" 18
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx*18")
+
+(define_insn_reservation "ppc604-ddiv" 32
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx*32")
+
+(define_insn_reservation "ppc620-ssqrt" 31
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc620"))
+ "fpu_6xx*31")
+
+(define_insn_reservation "ppc620-dsqrt" 31
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc620"))
+ "fpu_6xx*31")
+
+
+; 2xFPU PPC630
+(define_insn_reservation "ppc630-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx|fpu2_6xx")
+
+(define_insn_reservation "ppc630-fp" 3
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx|fpu2_6xx")
+
+(define_insn_reservation "ppc630-sdiv" 17
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*17|fpu2_6xx*17")
+
+(define_insn_reservation "ppc630-ddiv" 21
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*21|fpu2_6xx*21")
+
+(define_insn_reservation "ppc630-ssqrt" 18
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*18|fpu2_6xx*18")
+
+(define_insn_reservation "ppc630-dsqrt" 25
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*25|fpu2_6xx*25")
+
+(define_insn_reservation "ppc604-mfcr" 3
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc604-mtcr" 2
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-crlogical" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc604"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604e-crlogical" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc604e,ppc620,ppc630"))
+ "cru_6xx")
+
+(define_insn_reservation "ppc604-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc604-mfjmpr" 3
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc630-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc630"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc604-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604-isync" 0
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc630-isync" 6
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604-sync" 35
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc630-sync" 26
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "lsu_6xx")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/7450.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/7450.md
new file mode 100644
index 000000000..99e87125f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/7450.md
@@ -0,0 +1,184 @@
+;; Scheduling description for Motorola PowerPC 7450 processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "ppc7450,ppc7450mciu,ppc7450fp,ppc7450vec")
+(define_cpu_unit "iu1_7450,iu2_7450,iu3_7450" "ppc7450")
+(define_cpu_unit "mciu_7450" "ppc7450mciu")
+(define_cpu_unit "fpu_7450" "ppc7450fp")
+(define_cpu_unit "lsu_7450,bpu_7450" "ppc7450")
+(define_cpu_unit "du1_7450,du2_7450,du3_7450" "ppc7450")
+(define_cpu_unit "vecsmpl_7450,veccmplx_7450,vecflt_7450,vecperm_7450" "ppc7450vec")
+(define_cpu_unit "vdu1_7450,vdu2_7450" "ppc7450vec")
+
+
+;; PPC7450 32-bit 3xIU, MCIU, LSU, SRU, FPU, BPU, 4xVEC
+;; IU1,IU2,IU3 can perform all integer operations
+;; MCIU performs imul and idiv, cr logical, SPR moves
+;; LSU 2 stage pipelined
+;; FPU 3 stage pipelined
+;; It also has 4 vector units, one for each type of vector instruction.
+;; However, we can only dispatch 2 instructions per cycle.
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+;; In-order execution
+
+;; Branches go straight to the BPU. All other insns are handled
+;; by a dispatch unit which can issue a max of 3 insns per cycle.
+(define_reservation "ppc7450_du" "du1_7450|du2_7450|du3_7450")
+(define_reservation "ppc7450_vec_du" "vdu1_7450|vdu2_7450")
+
+(define_insn_reservation "ppc7450-load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
+ load_ux,load_u,vecload")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-store" 3
+ (and (eq_attr "type" "store,store_ux,store_u,vecstore")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-fpload" 4
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450*3")
+
+(define_insn_reservation "ppc7450-llsc" 3
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-sync" 35
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,\
+ iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-imul" 4
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450*2")
+
+(define_insn_reservation "ppc7450-imul2" 3
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450")
+
+(define_insn_reservation "ppc7450-idiv" 23
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450*23")
+
+(define_insn_reservation "ppc7450-compare" 2
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,(iu1_7450|iu2_7450|iu3_7450)")
+
+(define_insn_reservation "ppc7450-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450")
+
+(define_insn_reservation "ppc7450-fp" 5
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc7450-sdiv" 21
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450*21")
+
+(define_insn_reservation "ppc7450-ddiv" 35
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450*35")
+
+(define_insn_reservation "ppc7450-mfcr" 2
+ (and (eq_attr "type" "mfcr,mtcr")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450")
+
+(define_insn_reservation "ppc7450-crlogical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450")
+
+(define_insn_reservation "ppc7450-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc7450"))
+ "nothing,mciu_7450*2")
+
+(define_insn_reservation "ppc7450-mfjmpr" 3
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc7450"))
+ "nothing,mciu_7450*2")
+
+(define_insn_reservation "ppc7450-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc7450"))
+ "nothing,bpu_7450")
+
+;; Altivec
+(define_insn_reservation "ppc7450-vecsimple" 1
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,vecsmpl_7450")
+
+(define_insn_reservation "ppc7450-veccomplex" 4
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,veccmplx_7450")
+
+(define_insn_reservation "ppc7450-veccmp" 2
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,veccmplx_7450")
+
+(define_insn_reservation "ppc7450-vecfloat" 4
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,vecflt_7450")
+
+(define_insn_reservation "ppc7450-vecperm" 2
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,vecperm_7450")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/7xx.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/7xx.md
new file mode 100644
index 000000000..77e58a3cb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/7xx.md
@@ -0,0 +1,183 @@
+;; Scheduling description for Motorola PowerPC 750 and PowerPC 7400 processors.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "ppc7xx,ppc7xxfp")
+(define_cpu_unit "iu1_7xx,iu2_7xx" "ppc7xx")
+(define_cpu_unit "fpu_7xx" "ppc7xxfp")
+(define_cpu_unit "lsu_7xx,bpu_7xx,sru_7xx" "ppc7xx")
+(define_cpu_unit "du1_7xx,du2_7xx" "ppc7xx")
+(define_cpu_unit "veccmplx_7xx,vecperm_7xx,vdu_7xx" "ppc7xx")
+
+;; PPC740/PPC750/PPC7400 32-bit 2xIU, LSU, SRU, FPU, BPU
+;; IU1 can perform all integer operations
+;; IU2 can perform all integer operations except imul and idiv
+;; LSU 2 stage pipelined
+;; FPU 3 stage pipelined
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+;; In-order execution
+
+
+;; The PPC750 user's manual recommends that to reduce branch mispredictions,
+;; the insn that sets CR bits should be separated from the branch insn
+;; that evaluates them. There is no advantage have more than 10 cycles
+;; of separation.
+;; This could be artificially achieved by exaggerating the latency of
+;; compare insns but at the expense of a poorer schedule.
+
+;; Branches go straight to the BPU. All other insns are handled
+;; by a dispatch unit which can issue a max of 2 insns per cycle.
+(define_reservation "ppc750_du" "du1_7xx|du2_7xx")
+(define_reservation "ppc7400_vec_du" "vdu_7xx")
+
+(define_insn_reservation "ppc750-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
+ load_ux,load_u,fpload,fpload_ux,fpload_u,\
+ vecload,load_l")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
+(define_insn_reservation "ppc750-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,\
+ fpstore,fpstore_ux,fpstore_u,vecstore")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
+(define_insn_reservation "ppc750-storec" 8
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
+(define_insn_reservation "ppc750-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-imul" 4
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx*4")
+
+(define_insn_reservation "ppc750-imul2" 3
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx*2")
+
+(define_insn_reservation "ppc750-imul3" 2
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx")
+
+(define_insn_reservation "ppc750-idiv" 19
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx*19")
+
+(define_insn_reservation "ppc750-compare" 2
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,(iu1_7xx|iu2_7xx)")
+
+(define_insn_reservation "ppc750-fpcompare" 2
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx")
+
+(define_insn_reservation "ppc750-fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx")
+
+(define_insn_reservation "ppc750-dmul" 4
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc750"))
+ "ppc750_du,fpu_7xx*2")
+
+(define_insn_reservation "ppc7400-dmul" 3
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,fpu_7xx")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc750-sdiv" 17
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx*17")
+
+(define_insn_reservation "ppc750-ddiv" 31
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx*31")
+
+(define_insn_reservation "ppc750-mfcr" 2
+ (and (eq_attr "type" "mfcr,mtcr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx")
+
+(define_insn_reservation "ppc750-crlogical" 3
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,sru_7xx*2")
+
+(define_insn_reservation "ppc750-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr,isync,sync")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,sru_7xx*2")
+
+(define_insn_reservation "ppc750-mfjmpr" 3
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,sru_7xx*2")
+
+(define_insn_reservation "ppc750-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,bpu_7xx")
+
+;; Altivec
+(define_insn_reservation "ppc7400-vecsimple" 1
+ (and (eq_attr "type" "vecsimple,veccmp")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,veccmplx_7xx")
+
+(define_insn_reservation "ppc7400-veccomplex" 4
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,veccmplx_7xx")
+
+(define_insn_reservation "ppc7400-vecfloat" 4
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,veccmplx_7xx")
+
+(define_insn_reservation "ppc7400-vecperm" 2
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,vecperm_7xx")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/8540.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/8540.md
new file mode 100644
index 000000000..b42e2472a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/8540.md
@@ -0,0 +1,249 @@
+;; Pipeline description for Motorola PowerPC 8540 processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "ppc8540_most,ppc8540_long,ppc8540_retire")
+(define_cpu_unit "ppc8540_decode_0,ppc8540_decode_1" "ppc8540_most")
+
+;; We don't simulate general issue queue (GIC). If we have SU insn
+;; and then SU1 insn, they cannot be issued on the same cycle
+;; (although SU1 insn and then SU insn can be issued) because the SU
+;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle
+;; multipass insn scheduling will find the situation and issue the SU1
+;; insn and then the SU insn.
+(define_cpu_unit "ppc8540_issue_0,ppc8540_issue_1" "ppc8540_most")
+
+;; We could describe completion buffers slots in combination with the
+;; retirement units and the order of completion but the result
+;; automaton would behave in the same way because we cannot describe
+;; real latency time with taking in order completion into account.
+;; Actually we could define the real latency time by querying reserved
+;; automaton units but the current scheduler uses latency time before
+;; issuing insns and making any reservations.
+;;
+;; So our description is aimed to achieve a insn schedule in which the
+;; insns would not wait in the completion buffer.
+(define_cpu_unit "ppc8540_retire_0,ppc8540_retire_1" "ppc8540_retire")
+
+;; Branch unit:
+(define_cpu_unit "ppc8540_bu" "ppc8540_most")
+
+;; SU:
+(define_cpu_unit "ppc8540_su0_stage0,ppc8540_su1_stage0" "ppc8540_most")
+
+;; We could describe here MU subunits for float multiply, float add
+;; etc. But the result automaton would behave the same way as the
+;; described one pipeline below because MU can start only one insn
+;; per cycle. Actually we could simplify the automaton more not
+;; describing stages 1-3, the result automata would be the same.
+(define_cpu_unit "ppc8540_mu_stage0,ppc8540_mu_stage1" "ppc8540_most")
+(define_cpu_unit "ppc8540_mu_stage2,ppc8540_mu_stage3" "ppc8540_most")
+
+;; The following unit is used to describe non-pipelined division.
+(define_cpu_unit "ppc8540_mu_div" "ppc8540_long")
+
+;; Here we simplified LSU unit description not describing the stages.
+(define_cpu_unit "ppc8540_lsu" "ppc8540_most")
+
+;; The following units are used to make automata deterministic
+(define_cpu_unit "present_ppc8540_decode_0" "ppc8540_most")
+(define_cpu_unit "present_ppc8540_issue_0" "ppc8540_most")
+(define_cpu_unit "present_ppc8540_retire_0" "ppc8540_retire")
+(define_cpu_unit "present_ppc8540_su0_stage0" "ppc8540_most")
+
+;; The following sets to make automata deterministic when option ndfa is used.
+(presence_set "present_ppc8540_decode_0" "ppc8540_decode_0")
+(presence_set "present_ppc8540_issue_0" "ppc8540_issue_0")
+(presence_set "present_ppc8540_retire_0" "ppc8540_retire_0")
+(presence_set "present_ppc8540_su0_stage0" "ppc8540_su0_stage0")
+
+;; Some useful abbreviations.
+(define_reservation "ppc8540_decode"
+ "ppc8540_decode_0|ppc8540_decode_1+present_ppc8540_decode_0")
+(define_reservation "ppc8540_issue"
+ "ppc8540_issue_0|ppc8540_issue_1+present_ppc8540_issue_0")
+(define_reservation "ppc8540_retire"
+ "ppc8540_retire_0|ppc8540_retire_1+present_ppc8540_retire_0")
+(define_reservation "ppc8540_su_stage0"
+ "ppc8540_su0_stage0|ppc8540_su1_stage0+present_ppc8540_su0_stage0")
+
+;; Simple SU insns
+(define_insn_reservation "ppc8540_su" 1
+ (and (eq_attr "type" "integer,insert_word,cmp,compare,delayed_compare,fast_compare")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+(define_insn_reservation "ppc8540_two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+(define_insn_reservation "ppc8540_three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Branch. Actually this latency time is not used by the scheduler.
+(define_insn_reservation "ppc8540_branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_bu,ppc8540_retire")
+
+;; Multiply
+(define_insn_reservation "ppc8540_multiply" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; Divide. We use the average latency time here. We omit reserving a
+;; retire unit because of the result automata will be huge. We ignore
+;; reservation of miu_stage3 here because we use the average latency
+;; time.
+(define_insn_reservation "ppc8540_divide" 14
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\
+ ppc8540_mu_div*13")
+
+;; CR logical
+(define_insn_reservation "ppc8540_cr_logical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_bu,ppc8540_retire")
+
+;; Mfcr
+(define_insn_reservation "ppc8540_mfcr" 1
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Mtcrf
+(define_insn_reservation "ppc8540_mtcrf" 1
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Mtjmpr
+(define_insn_reservation "ppc8540_mtjmpr" 1
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Loads
+(define_insn_reservation "ppc8540_load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,sync")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
+
+;; Stores.
+(define_insn_reservation "ppc8540_store" 3
+ (and (eq_attr "type" "store,store_ux,store_u,store_c")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
+
+;; Simple FP
+(define_insn_reservation "ppc8540_simple_float" 1
+ (and (eq_attr "type" "fpsimple")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; FP
+(define_insn_reservation "ppc8540_float" 4
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; float divides. We omit reserving a retire unit and miu_stage3
+;; because of the result automata will be huge.
+(define_insn_reservation "ppc8540_float_vector_divide" 29
+ (and (eq_attr "type" "vecfdiv")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\
+ ppc8540_mu_div*28")
+
+;; Brinc
+(define_insn_reservation "ppc8540_brinc" 1
+ (and (eq_attr "type" "brinc")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Simple vector
+(define_insn_reservation "ppc8540_simple_vector" 1
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Simple vector compare
+(define_insn_reservation "ppc8540_simple_vector_compare" 1
+ (and (eq_attr "type" "veccmpsimple")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Vector compare
+(define_insn_reservation "ppc8540_vector_compare" 1
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; evsplatfi evsplati
+(define_insn_reservation "ppc8540_vector_perm" 1
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Vector float
+(define_insn_reservation "ppc8540_float_vector" 4
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; Vector divides: Use the average. We omit reserving a retire unit
+;; because of the result automata will be huge. We ignore reservation
+;; of miu_stage3 here because we use the average latency time.
+(define_insn_reservation "ppc8540_vector_divide" 14
+ (and (eq_attr "type" "vecdiv")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\
+ ppc8540_mu_div*13")
+
+;; Complex vector.
+(define_insn_reservation "ppc8540_complex_vector" 4
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; Vector load
+(define_insn_reservation "ppc8540_vector_load" 3
+ (and (eq_attr "type" "vecload")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
+
+;; Vector store
+(define_insn_reservation "ppc8540_vector_store" 3
+ (and (eq_attr "type" "vecstore")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.h
new file mode 100644
index 000000000..f682a2121
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.h
@@ -0,0 +1,464 @@
+/* PowerPC AltiVec include file.
+ Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+ Rewritten by Paolo Bonzini (bonzini@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented to conform to the specification included in the AltiVec
+ Technology Programming Interface Manual (ALTIVECPIM/D 6/1999 Rev 0). */
+
+#ifndef _ALTIVEC_H
+#define _ALTIVEC_H 1
+
+#if !defined(__VEC__) || !defined(__ALTIVEC__)
+#error Use the "-maltivec" flag to enable PowerPC AltiVec support
+#endif
+
+/* APPLE LOCAL begin AltiVec */
+/* If __APPLE_ALTIVEC__ is defined, the compiler has internally
+ synthesized the definitions contained in this header. */
+
+#if defined(__APPLE_ALTIVEC__)
+#warning Ignoring <altivec.h> because "-faltivec" specified
+#else
+/* You are allowed to undef these for C++ compatibility. */
+#define vector __vector
+#define pixel __pixel
+#define bool __bool
+/* APPLE LOCAL end AltiVec */
+
+/* Condition register codes for AltiVec predicates. */
+
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
+/* Synonyms. */
+#define vec_vaddcuw vec_addc
+#define vec_vand vec_and
+#define vec_vandc vec_andc
+#define vec_vrfip vec_ceil
+#define vec_vcmpbfp vec_cmpb
+#define vec_vcmpgefp vec_cmpge
+#define vec_vctsxs vec_cts
+#define vec_vctuxs vec_ctu
+#define vec_vexptefp vec_expte
+#define vec_vrfim vec_floor
+#define vec_lvx vec_ld
+#define vec_lvxl vec_ldl
+#define vec_vlogefp vec_loge
+#define vec_vmaddfp vec_madd
+#define vec_vmhaddshs vec_madds
+#define vec_vmladduhm vec_mladd
+#define vec_vmhraddshs vec_mradds
+#define vec_vnmsubfp vec_nmsub
+#define vec_vnor vec_nor
+#define vec_vor vec_or
+#define vec_vpkpx vec_packpx
+#define vec_vperm vec_perm
+#define vec_vrefp vec_re
+#define vec_vrfin vec_round
+#define vec_vrsqrtefp vec_rsqrte
+#define vec_vsel vec_sel
+#define vec_vsldoi vec_sld
+#define vec_vsl vec_sll
+#define vec_vslo vec_slo
+#define vec_vspltisb vec_splat_s8
+#define vec_vspltish vec_splat_s16
+#define vec_vspltisw vec_splat_s32
+#define vec_vsr vec_srl
+#define vec_vsro vec_sro
+#define vec_stvx vec_st
+#define vec_stvxl vec_stl
+#define vec_vsubcuw vec_subc
+#define vec_vsum2sws vec_sum2s
+#define vec_vsumsws vec_sums
+#define vec_vrfiz vec_trunc
+#define vec_vxor vec_xor
+
+/* Functions that are resolved by the backend to one of the
+ typed builtins. */
+#define vec_vaddfp __builtin_vec_vaddfp
+#define vec_addc __builtin_vec_addc
+#define vec_vaddsws __builtin_vec_vaddsws
+#define vec_vaddshs __builtin_vec_vaddshs
+#define vec_vaddsbs __builtin_vec_vaddsbs
+#define vec_vavgsw __builtin_vec_vavgsw
+#define vec_vavguw __builtin_vec_vavguw
+#define vec_vavgsh __builtin_vec_vavgsh
+#define vec_vavguh __builtin_vec_vavguh
+#define vec_vavgsb __builtin_vec_vavgsb
+#define vec_vavgub __builtin_vec_vavgub
+#define vec_ceil __builtin_vec_ceil
+#define vec_cmpb __builtin_vec_cmpb
+#define vec_vcmpeqfp __builtin_vec_vcmpeqfp
+#define vec_cmpge __builtin_vec_cmpge
+#define vec_vcmpgtfp __builtin_vec_vcmpgtfp
+#define vec_vcmpgtsw __builtin_vec_vcmpgtsw
+#define vec_vcmpgtuw __builtin_vec_vcmpgtuw
+#define vec_vcmpgtsh __builtin_vec_vcmpgtsh
+#define vec_vcmpgtuh __builtin_vec_vcmpgtuh
+#define vec_vcmpgtsb __builtin_vec_vcmpgtsb
+#define vec_vcmpgtub __builtin_vec_vcmpgtub
+#define vec_vcfsx __builtin_vec_vcfsx
+#define vec_vcfux __builtin_vec_vcfux
+#define vec_cts __builtin_vec_cts
+#define vec_ctu __builtin_vec_ctu
+#define vec_expte __builtin_vec_expte
+#define vec_floor __builtin_vec_floor
+#define vec_loge __builtin_vec_loge
+#define vec_madd __builtin_vec_madd
+#define vec_madds __builtin_vec_madds
+#define vec_mtvscr __builtin_vec_mtvscr
+#define vec_vmaxfp __builtin_vec_vmaxfp
+#define vec_vmaxsw __builtin_vec_vmaxsw
+#define vec_vmaxsh __builtin_vec_vmaxsh
+#define vec_vmaxsb __builtin_vec_vmaxsb
+#define vec_vminfp __builtin_vec_vminfp
+#define vec_vminsw __builtin_vec_vminsw
+#define vec_vminsh __builtin_vec_vminsh
+#define vec_vminsb __builtin_vec_vminsb
+#define vec_mradds __builtin_vec_mradds
+#define vec_vmsumshm __builtin_vec_vmsumshm
+#define vec_vmsumuhm __builtin_vec_vmsumuhm
+#define vec_vmsummbm __builtin_vec_vmsummbm
+#define vec_vmsumubm __builtin_vec_vmsumubm
+#define vec_vmsumshs __builtin_vec_vmsumshs
+#define vec_vmsumuhs __builtin_vec_vmsumuhs
+#define vec_vmulesb __builtin_vec_vmulesb
+#define vec_vmulesh __builtin_vec_vmulesh
+#define vec_vmuleuh __builtin_vec_vmuleuh
+#define vec_vmuleub __builtin_vec_vmuleub
+#define vec_vmulosh __builtin_vec_vmulosh
+#define vec_vmulouh __builtin_vec_vmulouh
+#define vec_vmulosb __builtin_vec_vmulosb
+#define vec_vmuloub __builtin_vec_vmuloub
+#define vec_nmsub __builtin_vec_nmsub
+#define vec_packpx __builtin_vec_packpx
+#define vec_vpkswss __builtin_vec_vpkswss
+#define vec_vpkuwus __builtin_vec_vpkuwus
+#define vec_vpkshss __builtin_vec_vpkshss
+#define vec_vpkuhus __builtin_vec_vpkuhus
+#define vec_vpkswus __builtin_vec_vpkswus
+#define vec_vpkshus __builtin_vec_vpkshus
+#define vec_re __builtin_vec_re
+#define vec_round __builtin_vec_round
+#define vec_rsqrte __builtin_vec_rsqrte
+#define vec_vsubfp __builtin_vec_vsubfp
+#define vec_subc __builtin_vec_subc
+#define vec_vsubsws __builtin_vec_vsubsws
+#define vec_vsubshs __builtin_vec_vsubshs
+#define vec_vsubsbs __builtin_vec_vsubsbs
+#define vec_sum4s __builtin_vec_sum4s
+#define vec_vsum4shs __builtin_vec_vsum4shs
+#define vec_vsum4sbs __builtin_vec_vsum4sbs
+#define vec_vsum4ubs __builtin_vec_vsum4ubs
+#define vec_sum2s __builtin_vec_sum2s
+#define vec_sums __builtin_vec_sums
+#define vec_trunc __builtin_vec_trunc
+#define vec_vupkhpx __builtin_vec_vupkhpx
+#define vec_vupkhsh __builtin_vec_vupkhsh
+#define vec_vupkhsb __builtin_vec_vupkhsb
+#define vec_vupklpx __builtin_vec_vupklpx
+#define vec_vupklsh __builtin_vec_vupklsh
+#define vec_vupklsb __builtin_vec_vupklsb
+#define vec_abs __builtin_vec_abs
+#define vec_abss __builtin_vec_abss
+#define vec_add __builtin_vec_add
+#define vec_adds __builtin_vec_adds
+#define vec_and __builtin_vec_and
+#define vec_andc __builtin_vec_andc
+#define vec_avg __builtin_vec_avg
+#define vec_cmpeq __builtin_vec_cmpeq
+#define vec_cmpgt __builtin_vec_cmpgt
+#define vec_ctf __builtin_vec_ctf
+#define vec_dst __builtin_vec_dst
+#define vec_dstst __builtin_vec_dstst
+#define vec_dststt __builtin_vec_dststt
+#define vec_dstt __builtin_vec_dstt
+#define vec_ld __builtin_vec_ld
+#define vec_lde __builtin_vec_lde
+#define vec_ldl __builtin_vec_ldl
+#define vec_lvebx __builtin_vec_lvebx
+#define vec_lvehx __builtin_vec_lvehx
+#define vec_lvewx __builtin_vec_lvewx
+#define vec_lvsl __builtin_vec_lvsl
+#define vec_lvsr __builtin_vec_lvsr
+#define vec_max __builtin_vec_max
+#define vec_mergeh __builtin_vec_mergeh
+#define vec_mergel __builtin_vec_mergel
+#define vec_min __builtin_vec_min
+#define vec_mladd __builtin_vec_mladd
+#define vec_msum __builtin_vec_msum
+#define vec_msums __builtin_vec_msums
+#define vec_mule __builtin_vec_mule
+#define vec_mulo __builtin_vec_mulo
+#define vec_nor __builtin_vec_nor
+#define vec_or __builtin_vec_or
+#define vec_pack __builtin_vec_pack
+#define vec_packs __builtin_vec_packs
+#define vec_packsu __builtin_vec_packsu
+#define vec_perm __builtin_vec_perm
+#define vec_rl __builtin_vec_rl
+#define vec_sel __builtin_vec_sel
+#define vec_sl __builtin_vec_sl
+#define vec_sld __builtin_vec_sld
+#define vec_sll __builtin_vec_sll
+#define vec_slo __builtin_vec_slo
+#define vec_splat __builtin_vec_splat
+#define vec_sr __builtin_vec_sr
+#define vec_sra __builtin_vec_sra
+#define vec_srl __builtin_vec_srl
+#define vec_sro __builtin_vec_sro
+#define vec_st __builtin_vec_st
+#define vec_ste __builtin_vec_ste
+#define vec_stl __builtin_vec_stl
+#define vec_stvebx __builtin_vec_stvebx
+#define vec_stvehx __builtin_vec_stvehx
+#define vec_stvewx __builtin_vec_stvewx
+#define vec_sub __builtin_vec_sub
+#define vec_subs __builtin_vec_subs
+#define vec_sum __builtin_vec_sum
+#define vec_unpackh __builtin_vec_unpackh
+#define vec_unpackl __builtin_vec_unpackl
+#define vec_vaddubm __builtin_vec_vaddubm
+#define vec_vaddubs __builtin_vec_vaddubs
+#define vec_vadduhm __builtin_vec_vadduhm
+#define vec_vadduhs __builtin_vec_vadduhs
+#define vec_vadduwm __builtin_vec_vadduwm
+#define vec_vadduws __builtin_vec_vadduws
+#define vec_vcmpequb __builtin_vec_vcmpequb
+#define vec_vcmpequh __builtin_vec_vcmpequh
+#define vec_vcmpequw __builtin_vec_vcmpequw
+#define vec_vmaxub __builtin_vec_vmaxub
+#define vec_vmaxuh __builtin_vec_vmaxuh
+#define vec_vmaxuw __builtin_vec_vmaxuw
+#define vec_vminub __builtin_vec_vminub
+#define vec_vminuh __builtin_vec_vminuh
+#define vec_vminuw __builtin_vec_vminuw
+#define vec_vmrghb __builtin_vec_vmrghb
+#define vec_vmrghh __builtin_vec_vmrghh
+#define vec_vmrghw __builtin_vec_vmrghw
+#define vec_vmrglb __builtin_vec_vmrglb
+#define vec_vmrglh __builtin_vec_vmrglh
+#define vec_vmrglw __builtin_vec_vmrglw
+#define vec_vpkuhum __builtin_vec_vpkuhum
+#define vec_vpkuwum __builtin_vec_vpkuwum
+#define vec_vrlb __builtin_vec_vrlb
+#define vec_vrlh __builtin_vec_vrlh
+#define vec_vrlw __builtin_vec_vrlw
+#define vec_vslb __builtin_vec_vslb
+#define vec_vslh __builtin_vec_vslh
+#define vec_vslw __builtin_vec_vslw
+#define vec_vspltb __builtin_vec_vspltb
+#define vec_vsplth __builtin_vec_vsplth
+#define vec_vspltw __builtin_vec_vspltw
+#define vec_vsrab __builtin_vec_vsrab
+#define vec_vsrah __builtin_vec_vsrah
+#define vec_vsraw __builtin_vec_vsraw
+#define vec_vsrb __builtin_vec_vsrb
+#define vec_vsrh __builtin_vec_vsrh
+#define vec_vsrw __builtin_vec_vsrw
+#define vec_vsububs __builtin_vec_vsububs
+#define vec_vsububm __builtin_vec_vsububm
+#define vec_vsubuhm __builtin_vec_vsubuhm
+#define vec_vsubuhs __builtin_vec_vsubuhs
+#define vec_vsubuwm __builtin_vec_vsubuwm
+#define vec_vsubuws __builtin_vec_vsubuws
+#define vec_xor __builtin_vec_xor
+
+/* Predicates.
+ For C++, we use templates in order to allow non-parenthesized arguments.
+ For C, instead, we use macros since non-parenthesized arguments were
+ not allowed even in older GCC implementation of AltiVec.
+
+ In the future, we may add more magic to the back-end, so that no
+ one- or two-argument macros are used. */
+
+#ifdef __cplusplus__
+#define __altivec_unary_pred(NAME, CALL) \
+template <class T> int NAME (T a1) { return CALL; }
+
+#define __altivec_scalar_pred(NAME, CALL) \
+template <class T, class U> int NAME (T a1, U a2) { return CALL; }
+
+/* Given the vec_step of a type, return the corresponding bool type. */
+template <int STEP> class __altivec_bool_ret { };
+template <> class __altivec_bool_ret <4> {
+ typedef __vector __bool int __ret;
+};
+template <> class __altivec_bool_ret <8> {
+ typedef __vector __bool short __ret;
+};
+template <> class __altivec_bool_ret <16> {
+ typedef __vector __bool char __ret;
+};
+
+/* Be very liberal in the pairs we accept. Mistakes such as passing
+ a `vector char' and `vector short' will be caught by the middle-end,
+ while any attempt to detect them here would produce hard to understand
+ error messages involving the implementation details of AltiVec. */
+#define __altivec_binary_pred(NAME, CALL) \
+template <class T, class U> \
+typename __altivec_bool_ret <vec_step (T)>::__ret \
+NAME (T a1, U a2) \
+{ \
+ return CALL; \
+}
+
+__altivec_binary_pred(vec_cmplt,
+ __builtin_vec_cmpgt (a2, a1))
+__altivec_binary_pred(vec_cmple,
+ __builtin_altivec_cmpge (a2, a1))
+
+__altivec_scalar_pred(vec_all_in,
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_any_out,
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2))
+
+__altivec_unary_pred(vec_all_nan,
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1))
+__altivec_unary_pred(vec_any_nan,
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1))
+
+__altivec_unary_pred(vec_all_numeric,
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a1))
+__altivec_unary_pred(vec_any_numeric,
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a1))
+
+__altivec_scalar_pred(vec_all_eq,
+ __builtin_vec_vcmpeq_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_all_ne,
+ __builtin_vec_vcmpeq_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_any_eq,
+ __builtin_vec_vcmpeq_p (__CR6_EQ_REV, a1, a2))
+__altivec_scalar_pred(vec_any_ne,
+ __builtin_vec_vcmpeq_p (__CR6_LT_REV, a1, a2))
+
+__altivec_scalar_pred(vec_all_gt,
+ __builtin_vec_vcmpgt_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_all_lt,
+ __builtin_vec_vcmpgt_p (__CR6_LT, a2, a1))
+__altivec_scalar_pred(vec_any_gt,
+ __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a1, a2))
+__altivec_scalar_pred(vec_any_lt,
+ __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a2, a1))
+
+__altivec_scalar_pred(vec_all_ngt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_all_nlt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a2, a1))
+__altivec_scalar_pred(vec_any_ngt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a1, a2))
+__altivec_scalar_pred(vec_any_nlt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a2, a1))
+
+/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types,
+ while for integer types it is converted to __builtin_vec_vcmpgt_p,
+ with inverted args and condition code. */
+__altivec_scalar_pred(vec_all_le,
+ __builtin_vec_vcmpge_p (__CR6_LT, a2, a1))
+__altivec_scalar_pred(vec_all_ge,
+ __builtin_vec_vcmpge_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_any_le,
+ __builtin_vec_vcmpge_p (__CR6_EQ_REV, a2, a1))
+__altivec_scalar_pred(vec_any_ge,
+ __builtin_vec_vcmpge_p (__CR6_EQ_REV, a1, a2))
+
+__altivec_scalar_pred(vec_all_nge,
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_all_nle,
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, a2, a1))
+__altivec_scalar_pred(vec_any_nge,
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a1, a2))
+__altivec_scalar_pred(vec_any_nle,
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1))
+
+#undef __altivec_scalar_pred
+#undef __altivec_unary_pred
+#undef __altivec_binary_pred
+#else
+#define vec_cmplt(a1, a2) __builtin_vec_cmpgt ((a2), (a1))
+#define vec_cmple(a1, a2) __builtin_altivec_vcmpgefp ((a2), (a1))
+
+#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2))
+#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2))
+
+#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1))
+#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (a1), (a1))
+
+#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT, (a1), (a1))
+#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1))
+
+#define vec_all_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT, (a1), (a2))
+#define vec_all_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ, (a1), (a2))
+#define vec_any_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ_REV, (a1), (a2))
+#define vec_any_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT_REV, (a1), (a2))
+
+#define vec_all_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a1), (a2))
+#define vec_all_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a2), (a1))
+#define vec_any_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a1), (a2))
+#define vec_any_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a2), (a1))
+
+#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2))
+#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1))
+#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2))
+#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1))
+
+/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types,
+ while for integer types it is converted to __builtin_vec_vcmpgt_p,
+ with inverted args and condition code. */
+#define vec_all_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a2), (a1))
+#define vec_all_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a1), (a2))
+#define vec_any_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a2), (a1))
+#define vec_any_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a1), (a2))
+
+#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2))
+#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1))
+#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2))
+#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1))
+#endif
+
+/* These do not accept vectors, so they do not have a __builtin_vec_*
+ counterpart. */
+#define vec_dss(x) __builtin_altivec_dss((x))
+#define vec_dssall() __builtin_altivec_dssall ()
+#define vec_mfvscr() ((__vector unsigned short) __builtin_altivec_mfvscr ())
+#define vec_splat_s8(x) __builtin_altivec_vspltisb ((x))
+#define vec_splat_s16(x) __builtin_altivec_vspltish ((x))
+#define vec_splat_s32(x) __builtin_altivec_vspltisw ((x))
+#define vec_splat_u8(x) ((__vector unsigned char) vec_splat_s8 ((x)))
+#define vec_splat_u16(x) ((__vector unsigned short) vec_splat_s16 ((x)))
+#define vec_splat_u32(x) ((__vector unsigned int) vec_splat_s32 ((x)))
+
+/* This also accepts a type for its parameter, so it is not enough
+ to #define vec_step to __builtin_vec_step. */
+#define vec_step(x) __builtin_vec_step (* (__typeof__ (x) *) 0)
+
+/* APPLE LOCAL AltiVec */
+#endif /* __APPLE_ALTIVEC__ */
+#endif /* _ALTIVEC_H */
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.md
new file mode 100644
index 000000000..a2ce18e5b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/altivec.md
@@ -0,0 +1,2351 @@
+;; AltiVec patterns.
+;; Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_constants
+ [(UNSPEC_VCMPBFP 50)
+ (UNSPEC_VCMPEQUB 51)
+ (UNSPEC_VCMPEQUH 52)
+ (UNSPEC_VCMPEQUW 53)
+ (UNSPEC_VCMPEQFP 54)
+ (UNSPEC_VCMPGEFP 55)
+ (UNSPEC_VCMPGTUB 56)
+ (UNSPEC_VCMPGTSB 57)
+ (UNSPEC_VCMPGTUH 58)
+ (UNSPEC_VCMPGTSH 59)
+ (UNSPEC_VCMPGTUW 60)
+ (UNSPEC_VCMPGTSW 61)
+ (UNSPEC_VCMPGTFP 62)
+ (UNSPEC_VMSUMU 65)
+ (UNSPEC_VMSUMM 66)
+ (UNSPEC_VMSUMSHM 68)
+ (UNSPEC_VMSUMUHS 69)
+ (UNSPEC_VMSUMSHS 70)
+ (UNSPEC_VMHADDSHS 71)
+ (UNSPEC_VMHRADDSHS 72)
+ (UNSPEC_VMLADDUHM 73)
+ (UNSPEC_VADDCUW 75)
+ (UNSPEC_VADDU 76)
+ (UNSPEC_VADDS 77)
+ (UNSPEC_VAVGU 80)
+ (UNSPEC_VAVGS 81)
+ (UNSPEC_VMULEUB 83)
+ (UNSPEC_VMULESB 84)
+ (UNSPEC_VMULEUH 85)
+ (UNSPEC_VMULESH 86)
+ (UNSPEC_VMULOUB 87)
+ (UNSPEC_VMULOSB 88)
+ (UNSPEC_VMULOUH 89)
+ (UNSPEC_VMULOSH 90)
+ (UNSPEC_VPKUHUM 93)
+ (UNSPEC_VPKUWUM 94)
+ (UNSPEC_VPKPX 95)
+ (UNSPEC_VPKSHSS 97)
+ (UNSPEC_VPKSWSS 99)
+ (UNSPEC_VPKUHUS 100)
+ (UNSPEC_VPKSHUS 101)
+ (UNSPEC_VPKUWUS 102)
+ (UNSPEC_VPKSWUS 103)
+ (UNSPEC_VRL 104)
+ (UNSPEC_VSL 107)
+ (UNSPEC_VSLV4SI 110)
+ (UNSPEC_VSLO 111)
+ (UNSPEC_VSR 118)
+ (UNSPEC_VSRO 119)
+ (UNSPEC_VSUBCUW 124)
+ (UNSPEC_VSUBU 125)
+ (UNSPEC_VSUBS 126)
+ (UNSPEC_VSUM4UBS 131)
+ (UNSPEC_VSUM4S 132)
+ (UNSPEC_VSUM2SWS 134)
+ (UNSPEC_VSUMSWS 135)
+ (UNSPEC_VPERM 144)
+ (UNSPEC_VRFIP 148)
+ (UNSPEC_VRFIN 149)
+ (UNSPEC_VRFIM 150)
+ (UNSPEC_VCFUX 151)
+ (UNSPEC_VCFSX 152)
+ (UNSPEC_VCTUXS 153)
+ (UNSPEC_VCTSXS 154)
+ (UNSPEC_VLOGEFP 155)
+ (UNSPEC_VEXPTEFP 156)
+ (UNSPEC_VRSQRTEFP 157)
+ (UNSPEC_VREFP 158)
+ (UNSPEC_VSEL4SI 159)
+ (UNSPEC_VSEL4SF 160)
+ (UNSPEC_VSEL8HI 161)
+ (UNSPEC_VSEL16QI 162)
+ (UNSPEC_VLSDOI 163)
+ (UNSPEC_VUPKHSB 167)
+ (UNSPEC_VUPKHPX 168)
+ (UNSPEC_VUPKHSH 169)
+ (UNSPEC_VUPKLSB 170)
+ (UNSPEC_VUPKLPX 171)
+ (UNSPEC_VUPKLSH 172)
+ (UNSPEC_PREDICATE 173)
+ (UNSPEC_DST 190)
+ (UNSPEC_DSTT 191)
+ (UNSPEC_DSTST 192)
+ (UNSPEC_DSTSTT 193)
+ (UNSPEC_LVSL 194)
+ (UNSPEC_LVSR 195)
+ (UNSPEC_LVE 196)
+ (UNSPEC_STVX 201)
+ (UNSPEC_STVXL 202)
+ (UNSPEC_STVE 203)
+ (UNSPEC_SET_VSCR 213)
+ (UNSPEC_GET_VRSAVE 214)
+ (UNSPEC_REALIGN_LOAD 215)
+ (UNSPEC_REDUC_PLUS 217)
+ (UNSPEC_VECSH 219)
+ (UNSPEC_VCOND_V4SI 301)
+ (UNSPEC_VCOND_V4SF 302)
+ (UNSPEC_VCOND_V8HI 303)
+ (UNSPEC_VCOND_V16QI 304)
+ (UNSPEC_VCONDU_V4SI 305)
+ (UNSPEC_VCONDU_V8HI 306)
+ (UNSPEC_VCONDU_V16QI 307)
+ ])
+
+(define_constants
+ [(UNSPECV_SET_VRSAVE 30)
+ (UNSPECV_MTVSCR 186)
+ (UNSPECV_MFVSCR 187)
+ (UNSPECV_DSSALL 188)
+ (UNSPECV_DSS 189)
+ ])
+
+;; Vec int modes
+(define_mode_macro VI [V4SI V8HI V16QI])
+;; Short vec in modes
+(define_mode_macro VIshort [V8HI V16QI])
+;; Vec float modes
+(define_mode_macro VF [V4SF])
+;; Vec modes, pity mode macros are not composable
+(define_mode_macro V [V4SI V8HI V16QI V4SF])
+
+(define_mode_attr VI_char [(V4SI "w") (V8HI "h") (V16QI "b")])
+
+;; Generic LVX load instruction.
+;; APPLE LOCAL begin 4708231
+(define_insn "altivec_lvx_<mode>"
+ [(set (match_operand:V 0 "altivec_register_operand" "=v")
+ (match_operand:V 1 "indexed_or_indirect_operand" "Z"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+;; Generic STVX store instruction.
+(define_insn "altivec_stvx_<mode>"
+ [(set (match_operand:V 0 "indexed_or_indirect_operand" "=Z")
+ (match_operand:V 1 "altivec_register_operand" "v"))]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+;; APPLE LOCAL end 4708231
+
+;; Vector move instructions.
+(define_expand "mov<mode>"
+ [(set (match_operand:V 0 "nonimmediate_operand" "")
+ (match_operand:V 1 "any_operand" ""))]
+ "TARGET_ALTIVEC"
+{
+ rs6000_emit_move (operands[0], operands[1], <MODE>mode);
+ DONE;
+})
+
+(define_insn "*mov<mode>_internal"
+ [(set (match_operand:V 0 "nonimmediate_operand" "=Z,v,v,o,r,r,v")
+ (match_operand:V 1 "input_operand" "v,Z,v,r,o,r,W"))]
+ "TARGET_ALTIVEC
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0: return "stvx %1,%y0";
+ case 1: return "lvx %0,%y1";
+ case 2: return "vor %0,%1,%1";
+ case 3: return "#";
+ case 4: return "#";
+ case 5: return "#";
+ case 6: return output_vec_const_move (operands);
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
+
+(define_split
+ [(set (match_operand:V4SI 0 "nonimmediate_operand" "")
+ (match_operand:V4SI 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+})
+
+(define_split
+ [(set (match_operand:V8HI 0 "nonimmediate_operand" "")
+ (match_operand:V8HI 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_split
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
+ (match_operand:V16QI 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_split
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "")
+ (match_operand:V4SF 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+})
+
+(define_split
+ [(set (match_operand:VI 0 "altivec_register_operand" "")
+ (match_operand:VI 1 "easy_vector_constant_add_self" ""))]
+ "TARGET_ALTIVEC && reload_completed"
+ [(set (match_dup 0) (match_dup 3))
+ (set (match_dup 0) (plus:VI (match_dup 0)
+ (match_dup 0)))]
+{
+ rtx dup = gen_easy_altivec_constant (operands[1]);
+ rtx const_vec;
+
+ /* Divide the operand of the resulting VEC_DUPLICATE, and use
+ simplify_rtx to make a CONST_VECTOR. */
+ XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
+ XEXP (dup, 0), const1_rtx);
+ const_vec = simplify_rtx (dup);
+
+ if (GET_MODE (const_vec) == <MODE>mode)
+ operands[3] = const_vec;
+ else
+ operands[3] = gen_lowpart (<MODE>mode, const_vec);
+})
+
+(define_insn "get_vrsave_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(reg:SI 109)] UNSPEC_GET_VRSAVE))]
+ "TARGET_ALTIVEC"
+{
+ if (TARGET_MACHO)
+ return "mfspr %0,256";
+ else
+ return "mfvrsave %0";
+}
+ [(set_attr "type" "*")])
+
+(define_insn "*set_vrsave_internal"
+ [(match_parallel 0 "vrsave_operation"
+ [(set (reg:SI 109)
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
+ (reg:SI 109)] UNSPECV_SET_VRSAVE))])]
+ "TARGET_ALTIVEC"
+{
+ if (TARGET_MACHO)
+ return "mtspr 256,%1";
+ else
+ return "mtvrsave %1";
+}
+ [(set_attr "type" "*")])
+
+(define_insn "*save_world"
+ [(match_parallel 0 "save_world_operation"
+ [(clobber (match_operand:SI 1 "register_operand" "=l"))
+ (use (match_operand:SI 2 "call_operand" "s"))])]
+ "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
+ "bl %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*restore_world"
+ [(match_parallel 0 "restore_world_operation"
+ [(return)
+ (use (match_operand:SI 1 "register_operand" "l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" "=r"))])]
+ "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
+ "b %z2")
+
+;; Simple binary operations.
+
+;; add
+(define_insn "add<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (plus:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vaddu<VI_char>m %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "addv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vaddfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vaddcuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VADDCUW))]
+ "TARGET_ALTIVEC"
+ "vaddcuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vaddu<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VADDU))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vaddu<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vadds<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VADDS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vadds<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+;; sub
+(define_insn "sub<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (minus:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubu<VI_char>m %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "subv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vsubcuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUBCUW))]
+ "TARGET_ALTIVEC"
+ "vsubcuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubu<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSUBU))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsubu<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubs<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSUBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsubs<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+;;
+(define_insn "altivec_vavgu<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VAVGU))]
+ "TARGET_ALTIVEC"
+ "vavgu<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavgs<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VAVGS))]
+ "TARGET_ALTIVEC"
+ "vavgs<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpbfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPBFP))]
+ "TARGET_ALTIVEC"
+ "vcmpbfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpequb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUB))]
+ "TARGET_ALTIVEC"
+ "vcmpequb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpequh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUH))]
+ "TARGET_ALTIVEC"
+ "vcmpequh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpequw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUW))]
+ "TARGET_ALTIVEC"
+ "vcmpequw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpeqfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPEQFP))]
+ "TARGET_ALTIVEC"
+ "vcmpeqfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgefp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPGEFP))]
+ "TARGET_ALTIVEC"
+ "vcmpgefp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtub"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUB))]
+ "TARGET_ALTIVEC"
+ "vcmpgtub %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSB))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtuh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUH))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSH))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUW))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSW))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPGTFP))]
+ "TARGET_ALTIVEC"
+ "vcmpgtfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+;; Fused multiply add
+(define_insn "altivec_vmaddfp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 3 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaddfp %0,%1,%2,%3"
+ [(set_attr "type" "vecfloat")])
+
+;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
+
+(define_expand "mulv4sf3"
+ [(use (match_operand:V4SF 0 "register_operand" ""))
+ (use (match_operand:V4SF 1 "register_operand" ""))
+ (use (match_operand:V4SF 2 "register_operand" ""))]
+ "TARGET_ALTIVEC && TARGET_FUSED_MADD"
+ "
+{
+ rtx neg0;
+
+ /* Generate [-0.0, -0.0, -0.0, -0.0]. */
+ neg0 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
+ emit_insn (gen_altivec_vslw (neg0, neg0, neg0));
+
+ /* Use the multiply-add. */
+ emit_insn (gen_altivec_vmaddfp (operands[0], operands[1], operands[2],
+ gen_lowpart (V4SFmode, neg0)));
+ DONE;
+}")
+
+;; 32 bit integer multiplication
+;; A_high = Operand_0 & 0xFFFF0000 >> 16
+;; A_low = Operand_0 & 0xFFFF
+;; B_high = Operand_1 & 0xFFFF0000 >> 16
+;; B_low = Operand_1 & 0xFFFF
+;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
+
+;; (define_insn "mulv4si3"
+;; [(set (match_operand:V4SI 0 "register_operand" "=v")
+;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
+;; (match_operand:V4SI 2 "register_operand" "v")))]
+(define_expand "mulv4si3"
+ [(use (match_operand:V4SI 0 "register_operand" ""))
+ (use (match_operand:V4SI 1 "register_operand" ""))
+ (use (match_operand:V4SI 2 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+ {
+ rtx zero;
+ rtx swap;
+ rtx small_swap;
+ rtx sixteen;
+ rtx one;
+ rtx two;
+ rtx low_product;
+ rtx high_product;
+
+ zero = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
+
+ sixteen = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
+
+ swap = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vrlw (swap, operands[2], sixteen));
+
+ one = gen_reg_rtx (V8HImode);
+ convert_move (one, operands[1], 0);
+
+ two = gen_reg_rtx (V8HImode);
+ convert_move (two, operands[2], 0);
+
+ small_swap = gen_reg_rtx (V8HImode);
+ convert_move (small_swap, swap, 0);
+
+ low_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmulouh (low_product, one, two));
+
+ high_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
+
+ emit_insn (gen_altivec_vslw (high_product, high_product, sixteen));
+
+ emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
+
+ DONE;
+ }")
+
+
+;; Fused multiply subtract
+(define_insn "altivec_vnmsubfp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (neg:V4SF (minus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 3 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vnmsubfp %0,%1,%2,%3"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vmsumu<VI_char>m"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMU))]
+ "TARGET_ALTIVEC"
+ "vmsumu<VI_char>m %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumm<VI_char>m"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMM))]
+ "TARGET_ALTIVEC"
+ "vmsumm<VI_char>m %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumshm"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMSHM))]
+ "TARGET_ALTIVEC"
+ "vmsumshm %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumuhs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMUHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmsumuhs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumshs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmsumshs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+;; max
+
+(define_insn "umax<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (umax:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxu<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smax<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxs<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "umin<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (umin:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminu<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smin<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (smin:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmins<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "sminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vmhaddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMHADDSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmhaddshs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmhraddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMHRADDSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmhraddshs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmladduhm"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMLADDUHM))]
+ "TARGET_ALTIVEC"
+ "vmladduhm %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmrghb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 8)
+ (const_int 1)
+ (const_int 9)
+ (const_int 2)
+ (const_int 10)
+ (const_int 3)
+ (const_int 11)
+ (const_int 4)
+ (const_int 12)
+ (const_int 5)
+ (const_int 13)
+ (const_int 6)
+ (const_int 14)
+ (const_int 7)
+ (const_int 15)]))
+ (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 0)
+ (const_int 9)
+ (const_int 1)
+ (const_int 10)
+ (const_int 2)
+ (const_int 11)
+ (const_int 3)
+ (const_int 12)
+ (const_int 4)
+ (const_int 13)
+ (const_int 5)
+ (const_int 14)
+ (const_int 6)
+ (const_int 15)
+ (const_int 7)]))
+ (const_int 21845)))]
+ "TARGET_ALTIVEC"
+ "vmrghb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
+ (const_int 5)
+ (const_int 2)
+ (const_int 6)
+ (const_int 3)
+ (const_int 7)]))
+ (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
+ (const_int 1)
+ (const_int 6)
+ (const_int 2)
+ (const_int 7)
+ (const_int 3)]))
+ (const_int 85)))]
+ "TARGET_ALTIVEC"
+ "vmrghh %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (const_int 5)))]
+ "TARGET_ALTIVEC"
+ "vmrghw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 0)
+ (const_int 9)
+ (const_int 1)
+ (const_int 10)
+ (const_int 2)
+ (const_int 11)
+ (const_int 3)
+ (const_int 12)
+ (const_int 4)
+ (const_int 13)
+ (const_int 5)
+ (const_int 14)
+ (const_int 6)
+ (const_int 15)
+ (const_int 7)]))
+ (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 8)
+ (const_int 1)
+ (const_int 9)
+ (const_int 2)
+ (const_int 10)
+ (const_int 3)
+ (const_int 11)
+ (const_int 4)
+ (const_int 12)
+ (const_int 5)
+ (const_int 13)
+ (const_int 6)
+ (const_int 14)
+ (const_int 7)
+ (const_int 15)]))
+ (const_int 21845)))]
+ "TARGET_ALTIVEC"
+ "vmrglb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
+ (const_int 1)
+ (const_int 6)
+ (const_int 2)
+ (const_int 7)
+ (const_int 3)]))
+ (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
+ (const_int 5)
+ (const_int 2)
+ (const_int 6)
+ (const_int 3)
+ (const_int 7)]))
+ (const_int 85)))]
+ "TARGET_ALTIVEC"
+ "vmrglh %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (const_int 5)))]
+ "TARGET_ALTIVEC"
+ "vmrglw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmuleub"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULEUB))]
+ "TARGET_ALTIVEC"
+ "vmuleub %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulesb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULESB))]
+ "TARGET_ALTIVEC"
+ "vmulesb %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmuleuh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULEUH))]
+ "TARGET_ALTIVEC"
+ "vmuleuh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulesh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULESH))]
+ "TARGET_ALTIVEC"
+ "vmulesh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmuloub"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULOUB))]
+ "TARGET_ALTIVEC"
+ "vmuloub %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulosb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULOSB))]
+ "TARGET_ALTIVEC"
+ "vmulosb %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulouh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULOUH))]
+ "TARGET_ALTIVEC"
+ "vmulouh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulosh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULOSH))]
+ "TARGET_ALTIVEC"
+ "vmulosh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+
+;; logical ops
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (and:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vand %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ior:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (xor:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "xorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (xor:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (not:VI (match_operand:VI 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%1"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_nor<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (not:VI (ior:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "andc<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (and:VI (not:VI (match_operand:VI 2 "register_operand" "v"))
+ (match_operand:VI 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "*andc3_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (and:V4SF (not:V4SF (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vpkuhum"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUM))]
+ "TARGET_ALTIVEC"
+ "vpkuhum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwum"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUM))]
+ "TARGET_ALTIVEC"
+ "vpkuwum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkpx"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKPX))]
+ "TARGET_ALTIVEC"
+ "vpkpx %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkshss"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKSHSS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkshss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkswss"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKSWSS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkswss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuhus"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkuhus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkshus"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKSHUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkshus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwus"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkuwus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkswus"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKSWUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkswus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vrl<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VRL))]
+ "TARGET_ALTIVEC"
+ "vrl<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsl<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSL))]
+ "TARGET_ALTIVEC"
+ "vsl<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsl"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSLV4SI))]
+ "TARGET_ALTIVEC"
+ "vsl %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vslo"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSLO))]
+ "TARGET_ALTIVEC"
+ "vslo %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "lshr<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (lshiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
+ "TARGET_ALTIVEC"
+ "vsr<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "ashr<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ashiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
+ "TARGET_ALTIVEC"
+ "vsra<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsr"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSR))]
+ "TARGET_ALTIVEC"
+ "vsr %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsro"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSRO))]
+ "TARGET_ALTIVEC"
+ "vsro %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsum4ubs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM4UBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsum4ubs %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum4s<VI_char>s"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM4S))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsum4s<VI_char>s %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum2sws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM2SWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsum2sws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsumsws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUMSWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsumsws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vspltb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_duplicate:V16QI
+ (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
+ "TARGET_ALTIVEC"
+ "vspltb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsplth"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_duplicate:V8HI
+ (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
+ "TARGET_ALTIVEC"
+ "vsplth %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vspltw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_duplicate:V4SI
+ (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
+ "TARGET_ALTIVEC"
+ "vspltw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "*altivec_vspltsf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (vec_duplicate:V4SF
+ (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
+ "TARGET_ALTIVEC"
+ "vspltw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vspltis<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (vec_duplicate:VI
+ (match_operand:QI 1 "s5bit_cint_operand" "i")))]
+ "TARGET_ALTIVEC"
+ "vspltis<VI_char> %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "ftruncv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vrfiz %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vperm_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VPERM))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vrfip"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIP))]
+ "TARGET_ALTIVEC"
+ "vrfip %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrfin"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIN))]
+ "TARGET_ALTIVEC"
+ "vrfin %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrfim"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIM))]
+ "TARGET_ALTIVEC"
+ "vrfim %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vcfux"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCFUX))]
+ "TARGET_ALTIVEC"
+ "vcfux %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vcfsx"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCFSX))]
+ "TARGET_ALTIVEC"
+ "vcfsx %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vctuxs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCTUXS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vctuxs %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vctsxs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCTSXS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vctsxs %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vlogefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VLOGEFP))]
+ "TARGET_ALTIVEC"
+ "vlogefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vexptefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VEXPTEFP))]
+ "TARGET_ALTIVEC"
+ "vexptefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrsqrtefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRSQRTEFP))]
+ "TARGET_ALTIVEC"
+ "vrsqrtefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VREFP))]
+ "TARGET_ALTIVEC"
+ "vrefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_expand "vcondv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "comparison_operator" "")
+ (match_operand:V4SI 4 "register_operand" "v")
+ (match_operand:V4SI 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V4SI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "comparison_operator" "")
+ (match_operand:V4SI 4 "register_operand" "v")
+ (match_operand:V4SI 5 "register_operand" "v")
+ ] UNSPEC_VCONDU_V4SI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:V4SF 3 "comparison_operator" "")
+ (match_operand:V4SF 4 "register_operand" "v")
+ (match_operand:V4SF 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "comparison_operator" "")
+ (match_operand:V8HI 4 "register_operand" "v")
+ (match_operand:V8HI 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V8HI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "comparison_operator" "")
+ (match_operand:V8HI 4 "register_operand" "v")
+ (match_operand:V8HI 5 "register_operand" "v")
+ ] UNSPEC_VCONDU_V8HI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv16qi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "comparison_operator" "")
+ (match_operand:V16QI 4 "register_operand" "v")
+ (match_operand:V16QI 5 "register_operand" "v")
+ ] UNSPEC_VCOND_V16QI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv16qi"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "comparison_operator" "")
+ (match_operand:V16QI 4 "register_operand" "v")
+ (match_operand:V16QI 5 "register_operand" "v")
+ ] UNSPEC_VCONDU_V16QI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+
+(define_insn "altivec_vsel_v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VSEL4SI))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VSEL4SF))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_v8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VSEL8HI))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VSEL16QI))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsldoi_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")]
+ UNSPEC_VLSDOI))]
+ "TARGET_ALTIVEC"
+ "vsldoi %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhsb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSB))]
+ "TARGET_ALTIVEC"
+ "vupkhsb %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhpx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHPX))]
+ "TARGET_ALTIVEC"
+ "vupkhpx %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhsh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSH))]
+ "TARGET_ALTIVEC"
+ "vupkhsh %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklsb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSB))]
+ "TARGET_ALTIVEC"
+ "vupklsb %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklpx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLPX))]
+ "TARGET_ALTIVEC"
+ "vupklpx %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklsh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSH))]
+ "TARGET_ALTIVEC"
+ "vupklsh %0,%1"
+ [(set_attr "type" "vecperm")])
+
+;; AltiVec predicates.
+
+(define_expand "cr6_test_for_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg:CC 74)
+ (const_int 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+(define_expand "cr6_test_for_zero_reverse"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg:CC 74)
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+(define_expand "cr6_test_for_lt"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lt:SI (reg:CC 74)
+ (const_int 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+(define_expand "cr6_test_for_lt_reverse"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lt:SI (reg:CC 74)
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+;; We can get away with generating the opcode on the fly (%3 below)
+;; APPLE LOCAL begin radar 4571747
+;; because all the predicates for v4sf have the same scheduling parameters.
+;; APPLE LOCAL end radar 4571747
+
+;; APPLE LOCAL begin radar 4571747
+(define_insn "altivec_predicate_v4sf"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] 174))
+ (clobber (match_scratch:V4SF 0 "=v"))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpequw"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPEQUW))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpgtsw"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPGTSW))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpgtuw"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPGTUW))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpgtuh"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPGTUH))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpgtsh"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPGTSH))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpequh"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPEQUH))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpequb"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPEQUB))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpgtsb"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPGTSB))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_predicate_vcmpgtub"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_VCMPGTUB))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+;; APPLE LOCAL end radar 4571747
+
+(define_insn "altivec_mtvscr"
+ [(set (reg:SI 110)
+ (unspec_volatile:SI
+ [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
+ "TARGET_ALTIVEC"
+ "mtvscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_mfvscr"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec_volatile:V8HI [(reg:SI 110)] UNSPECV_MFVSCR))]
+ "TARGET_ALTIVEC"
+ "mfvscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dssall"
+ [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
+ "TARGET_ALTIVEC"
+ "dssall"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dss"
+ [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
+ UNSPECV_DSS)]
+ "TARGET_ALTIVEC"
+ "dss %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dst"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dst %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dstt"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dstt %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dstst"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dstst %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dststt"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dststt %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+;; APPLE LOCAL begin 4708231
+(define_insn "altivec_lvsl"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "indexed_or_indirect_operand" "Z")] UNSPEC_LVSL))]
+ "TARGET_ALTIVEC"
+ "lvsl %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvsr"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "indexed_or_indirect_operand" "Z")] UNSPEC_LVSR))]
+ "TARGET_ALTIVEC"
+ "lvsr %0,%y1"
+ [(set_attr "type" "vecload")])
+;; APPLE LOCAL end 4708231
+
+(define_expand "build_vector_mask_for_load"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx addr;
+ rtx temp;
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ addr = XEXP (operands[1], 0);
+ temp = gen_reg_rtx (GET_MODE (addr));
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_NEG (GET_MODE (addr), addr)));
+ emit_insn (gen_altivec_lvsr (operands[0],
+ replace_equiv_address (operands[1], temp)));
+ DONE;
+}")
+
+;; Parallel some of the LVE* and STV*'s with unspecs because some have
+;; identical rtl but different instructions-- and gcc gets confused.
+
+;; APPLE LOCAL begin 4708231
+(define_insn "altivec_lve<VI_char>x"
+ [(parallel
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (match_operand:VI 1 "indexed_or_indirect_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_LVE)])]
+ "TARGET_ALTIVEC"
+ "lve<VI_char>x %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "*altivec_lvesfx"
+ [(parallel
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (match_operand:V4SF 1 "indexed_or_indirect_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_LVE)])]
+ "TARGET_ALTIVEC"
+ "lvewx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvxl"
+ [(parallel
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V4SI 1 "indexed_or_indirect_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
+ "TARGET_ALTIVEC"
+ "lvxl %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V4SI 1 "indexed_or_indirect_operand" "Z"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_stvx"
+ [(parallel
+ [(set (match_operand:V4SI 0 "indexed_or_indirect_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVX)])]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvxl"
+ [(parallel
+ [(set (match_operand:V4SI 0 "indexed_or_indirect_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVXL)])]
+ "TARGET_ALTIVEC"
+ "stvxl %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stve<VI_char>x"
+ [(parallel
+ [(set (match_operand:VI 0 "indexed_or_indirect_operand" "=Z")
+ (match_operand:VI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVE)])]
+ "TARGET_ALTIVEC"
+ "stve<VI_char>x %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "*altivec_stvesfx"
+ [(parallel
+ [(set (match_operand:V4SF 0 "indexed_or_indirect_operand" "=Z")
+ (match_operand:V4SF 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVE)])]
+ "TARGET_ALTIVEC"
+ "stvewx %1,%y0"
+ [(set_attr "type" "vecstore")])
+;; APPLE LOCAL end 4708231
+
+(define_expand "vec_init<mode>"
+ [(match_operand:V 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv4si"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_setv8hi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_setv16qi"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_setv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4si"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv8hi"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv16qi"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4sf"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+;; Generate
+;; vspltis? SCRATCH0,0
+;; vsubu?m SCRATCH2,SCRATCH1,%1
+;; vmaxs? %0,%1,SCRATCH2"
+(define_expand "abs<mode>2"
+ [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
+ (set (match_dup 3)
+ (minus:VI (match_dup 2)
+ (match_operand:VI 1 "register_operand" "v")))
+ (set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_dup 1) (match_dup 3)))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
+ operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
+})
+
+;; Generate
+;; vspltisw SCRATCH1,-1
+;; vslw SCRATCH2,SCRATCH1,SCRATCH1
+;; vandc %0,%1,SCRATCH2
+(define_expand "absv4sf2"
+ [(set (match_dup 2)
+ (vec_duplicate:V4SI (const_int -1)))
+ (set (match_dup 3)
+ (unspec:V4SI [(match_dup 2) (match_dup 2)] UNSPEC_VSL))
+ (set (match_operand:V4SF 0 "register_operand" "=v")
+ (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
+ (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (V4SImode);
+ operands[3] = gen_reg_rtx (V4SImode);
+})
+
+;; Generate
+;; vspltis? SCRATCH0,0
+;; vsubs?s SCRATCH2,SCRATCH1,%1
+;; vmaxs? %0,%1,SCRATCH2"
+(define_expand "altivec_abss_<mode>"
+ [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
+ (parallel [(set (match_dup 3)
+ (unspec:VI [(match_dup 2)
+ (match_operand:VI 1 "register_operand" "v")]
+ UNSPEC_VSUBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
+ (set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_dup 1) (match_dup 3)))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
+ operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
+})
+
+;; Vector shift left in bits. Currently supported ony for shift
+;; amounts that can be expressed as byte shifts (divisible by 8).
+;; General shift amounts can be supported using vslo + vsl. We're
+;; not expecting to see these yet (the vectorizer currently
+;; generates only shifts divisible by byte_size).
+(define_expand "vec_shl_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:QI 2 "reg_or_short_operand" "")]
+ UNSPEC_VECSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx bitshift = operands[2];
+ rtx byteshift = gen_reg_rtx (QImode);
+ HOST_WIDE_INT bitshift_val;
+ HOST_WIDE_INT byteshift_val;
+
+ if (! CONSTANT_P (bitshift))
+ FAIL;
+ bitshift_val = INTVAL (bitshift);
+ if (bitshift_val & 0x7)
+ FAIL;
+ byteshift_val = bitshift_val >> 3;
+ byteshift = gen_rtx_CONST_INT (QImode, byteshift_val);
+ emit_insn (gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1],
+ byteshift));
+ DONE;
+}")
+
+;; Vector shift left in bits. Currently supported ony for shift
+;; amounts that can be expressed as byte shifts (divisible by 8).
+;; General shift amounts can be supported using vsro + vsr. We're
+;; not expecting to see these yet (the vectorizer currently
+;; generates only shifts divisible by byte_size).
+(define_expand "vec_shr_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:QI 2 "reg_or_short_operand" "")]
+ UNSPEC_VECSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx bitshift = operands[2];
+ rtx byteshift = gen_reg_rtx (QImode);
+ HOST_WIDE_INT bitshift_val;
+ HOST_WIDE_INT byteshift_val;
+
+ if (! CONSTANT_P (bitshift))
+ FAIL;
+ bitshift_val = INTVAL (bitshift);
+ if (bitshift_val & 0x7)
+ FAIL;
+ byteshift_val = 16 - (bitshift_val >> 3);
+ byteshift = gen_rtx_CONST_INT (QImode, byteshift_val);
+ emit_insn (gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1],
+ byteshift));
+ DONE;
+}")
+
+(define_insn "altivec_vsumsws_nomode"
+ [(set (match_operand 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUMSWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsumsws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:VIshort 0 "register_operand" "=v")
+ (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
+ UNSPEC_REDUC_PLUS))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx vtmp1 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+ emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
+ emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero));
+ DONE;
+}")
+
+(define_expand "reduc_uplus_v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_REDUC_PLUS))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx vtmp1 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+ emit_insn (gen_altivec_vsum4ubs (vtmp1, operands[1], vzero));
+ emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero));
+ DONE;
+}")
+
+(define_insn "vec_realign_load_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_REALIGN_LOAD))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_expand "neg<mode>2"
+ [(use (match_operand:VI 0 "register_operand" ""))
+ (use (match_operand:VI 1 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero;
+
+ vzero = gen_reg_rtx (GET_MODE (operands[0]));
+ emit_insn (gen_altivec_vspltis<VI_char> (vzero, const0_rtx));
+ emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
+
+ DONE;
+}")
+
+(define_expand "udot_prod<mode>"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")]
+ UNSPEC_VMSUMU)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_expand "sdot_prodv8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMSUMSHM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_expand "widen_usum<mode>3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
+ UNSPEC_VMSUMU)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
+
+ emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "widen_ssumv16qi3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VMSUMM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (V16QImode);
+
+ emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "widen_ssumv8hi3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VMSUMSHM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vspltish (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "negv4sf2"
+ [(use (match_operand:V4SF 0 "register_operand" ""))
+ (use (match_operand:V4SF 1 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx neg0;
+
+ /* Generate [-0.0, -0.0, -0.0, -0.0]. */
+ neg0 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
+ emit_insn (gen_altivec_vslw (neg0, neg0, neg0));
+
+ /* XOR */
+ emit_insn (gen_xorv4sf3 (operands[0],
+ gen_lowpart (V4SFmode, neg0), operands[1]));
+
+ DONE;
+}")
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/builtin.ops b/gcc-4.2.1-5666.3/gcc/config/rs6000/builtin.ops
new file mode 100644
index 000000000..a28e35654
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/builtin.ops
@@ -0,0 +1,297 @@
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+# @ betype betype-code type-spelling
+@ @ float BETYPE_R4 float
+@ @ ushort BETYPE_U4 unsigned=short
+@ @ uint BETYPE_U4 unsigned=int
+@ @ ulong BETYPE_U4 unsigned=long
+@ @ immed_u2 U2 0..3
+@ @ immed_u4 U4 0..15
+@ @ immed_s5 I5 -16..15
+@ @ immed_u5 U5 0..31
+@ @ int BETYPE_I4 int
+@ @ long BETYPE_I4 long
+@ @ ptr PTR void=*
+@ @ v16 BETYPE_V16 vec_type
+@ @ void BETYPE_I4 void
+# fetype betype [code [spelling]]
+@ float_ptr ptr i float=*
+@ const_float_ptr ptr i float=*
+@ const_volatile_float_ptr ptr i float=*
+@ int int i
+@ int_ptr ptr i int=*
+@ long_ptr ptr i long=*
+@ const_int_ptr ptr i int=*
+@ const_long_ptr ptr i long=*
+@ const_volatile_int_ptr ptr i int=*
+@ const_volatile_long_ptr ptr i long=*
+@ immed_s5 immed_s5 A
+@ immed_u5 immed_u5 B
+@ immed_u4 immed_u4 C
+@ immed_u2 immed_u2 D
+@ cc24f int j=24=f
+@ cc24fd int j=24=f=d
+@ cc24fr int j=24=f=r
+@ cc24t int j=24=t
+@ cc24td int j=24=t=d
+@ cc24tr int j=24=t=r
+@ cc26f int j=26=f
+@ cc26fd int j=26=f=d
+@ cc26fr int j=26=f=r
+@ cc26t int j=26=t
+@ cc26td int j=26=t=d
+@ cc26tr int j=26=t=r
+@ short_ptr ptr i short=*
+@ signed_char_ptr ptr i signed=char=*
+@ unsigned_char_ptr ptr i unsigned=char=*
+@ unsigned_short_ptr ptr i unsigned=short=*
+@ unsigned_int_ptr ptr i unsigned=int=*
+@ unsigned_long_ptr ptr i unsigned=long=*
+@ const_short_ptr ptr i short=*
+@ const_signed_char_ptr ptr i signed=char=*
+@ const_unsigned_char_ptr ptr i unsigned=char=*
+@ const_unsigned_short_ptr ptr i unsigned=short=*
+@ const_unsigned_int_ptr ptr i unsigned=int=*
+@ const_unsigned_long_ptr ptr i unsigned=long=*
+@ const_volatile_short_ptr ptr i short=*
+@ const_volatile_signed_char_ptr ptr i signed=char=*
+@ const_volatile_unsigned_char_ptr ptr i unsigned=char=*
+@ const_volatile_unsigned_short_ptr ptr i unsigned=short=*
+@ const_volatile_unsigned_int_ptr ptr i unsigned=int=*
+@ const_volatile_unsigned_long_ptr ptr i unsigned=long=*
+@ vec_b16 v16 x vec_b16
+@ vec_b16_load_op v16 xl vec_b16
+@ vec_b16_ptr ptr i vec_b16=*
+@ const_vec_b16_ptr ptr i vec_b16=*
+@ vec_b32 v16 x vec_b32
+@ vec_b32_load_op v16 xl vec_b32
+@ vec_b32_ptr ptr i vec_b32=*
+@ const_vec_b32_ptr ptr i vec_b32=*
+@ vec_b8 v16 x vec_b8
+@ vec_b8_load_op v16 xl vec_b8
+@ vec_b8_ptr ptr i vec_b8=*
+@ const_vec_b8_ptr ptr i vec_b8=*
+@ vec_f32 v16 x vec_f32
+@ vec_f32_load_op v16 xl vec_f32
+@ vec_f32_ptr ptr i vec_f32=*
+@ const_vec_f32_ptr ptr i vec_f32=*
+@ vec_p16 v16 x vec_p16
+@ vec_p16_load_op v16 xl vec_p16
+@ vec_p16_ptr ptr i vec_p16=*
+@ const_vec_p16_ptr ptr i vec_p16=*
+@ vec_s16 v16 x vec_s16
+@ vec_s16_load_op v16 xl vec_s16
+@ vec_s16_ptr ptr i vec_s16=*
+@ const_vec_s16_ptr ptr i vec_s16=*
+@ vec_s32 v16 x vec_s32
+@ vec_s32_load_op v16 xl vec_s32
+@ vec_s32_ptr ptr i vec_s32=*
+@ const_vec_s32_ptr ptr i vec_s32=*
+@ vec_s8 v16 x vec_s8
+@ vec_s8_load_op v16 xl vec_s8
+@ vec_s8_ptr ptr i vec_s8=*
+@ const_vec_s8_ptr ptr i vec_s8=*
+@ vec_u16 v16 x vec_u16
+@ vec_u16_load_op v16 xl vec_u16
+@ vec_u16_ptr ptr i vec_u16=*
+@ const_vec_u16_ptr ptr i vec_u16=*
+@ vec_u32 v16 x vec_u32
+@ vec_u32_load_op v16 xl vec_u32
+@ vec_u32_ptr ptr i vec_u32=*
+@ const_vec_u32_ptr ptr i vec_u32=*
+@ vec_u8 v16 x vec_u8
+@ vec_u8_load_op v16 xl vec_u8
+@ vec_u8_ptr ptr i vec_u8=*
+@ const_vec_u8_ptr ptr i vec_u8=*
+@ void_store_op void s
+@ volatile_void void v
+@ volatile_void_load_op void vl
+@ volatile_void_store_op void vs
+@ volatile_vec_u16 v16 vx vec_u16
+@ char_ptr ptr i char=*
+@ const_char_ptr ptr i char=*
+# @ @ instruction type
+@ @ @ MOP_mfvscr fxu
+@ @ @ MOP_mtvscr fxu
+@ @ @ MOP_dss load
+@ @ @ MOP_dssall load
+@ @ @ MOP_dst load
+@ @ @ MOP_dstst load
+@ @ @ MOP_dststt load
+@ @ @ MOP_dstt load
+@ @ @ MOP_lvebx load
+@ @ @ MOP_lvehx load
+@ @ @ MOP_lvewx load
+@ @ @ MOP_lvsl load
+@ @ @ MOP_lvsr load
+@ @ @ MOP_lvx load
+@ @ @ MOP_lvxl load
+@ @ @ MOP_stvebx store
+@ @ @ MOP_stvehx store
+@ @ @ MOP_stvewx store
+@ @ @ MOP_stvx store
+@ @ @ MOP_stvxl store
+@ @ @ MOP_vaddcuw simple
+@ @ @ MOP_vaddfp fp
+@ @ @ MOP_vaddsbs simple
+@ @ @ MOP_vaddshs simple
+@ @ @ MOP_vaddsws simple
+@ @ @ MOP_vaddubm simple
+@ @ @ MOP_vaddubs simple
+@ @ @ MOP_vadduhm simple
+@ @ @ MOP_vadduhs simple
+@ @ @ MOP_vadduwm simple
+@ @ @ MOP_vadduws simple
+@ @ @ MOP_vand simple
+@ @ @ MOP_vandc simple
+@ @ @ MOP_vavgsb simple
+@ @ @ MOP_vavgsh simple
+@ @ @ MOP_vavgsw simple
+@ @ @ MOP_vavgub simple
+@ @ @ MOP_vavguh simple
+@ @ @ MOP_vavguw simple
+@ @ @ MOP_vcfsx fp
+@ @ @ MOP_vcfux fp
+@ @ @ MOP_vcmpbfp simple
+@ @ @ MOP_vcmpbfpD simple
+@ @ @ MOP_vcmpeqfp simple
+@ @ @ MOP_vcmpeqfpD simple
+@ @ @ MOP_vcmpequb simple
+@ @ @ MOP_vcmpequbD simple
+@ @ @ MOP_vcmpequh simple
+@ @ @ MOP_vcmpequhD simple
+@ @ @ MOP_vcmpequw simple
+@ @ @ MOP_vcmpequwD simple
+@ @ @ MOP_vcmpgefp simple
+@ @ @ MOP_vcmpgefpD simple
+@ @ @ MOP_vcmpgtfp simple
+@ @ @ MOP_vcmpgtfpD simple
+@ @ @ MOP_vcmpgtsb simple
+@ @ @ MOP_vcmpgtsbD simple
+@ @ @ MOP_vcmpgtsh simple
+@ @ @ MOP_vcmpgtshD simple
+@ @ @ MOP_vcmpgtsw simple
+@ @ @ MOP_vcmpgtswD simple
+@ @ @ MOP_vcmpgtub simple
+@ @ @ MOP_vcmpgtubD simple
+@ @ @ MOP_vcmpgtuh simple
+@ @ @ MOP_vcmpgtuhD simple
+@ @ @ MOP_vcmpgtuw simple
+@ @ @ MOP_vcmpgtuwD simple
+@ @ @ MOP_vctsxs fp
+@ @ @ MOP_vctuxs fp
+@ @ @ MOP_vexptefp fp
+@ @ @ MOP_vlogefp fp
+@ @ @ MOP_vmaddfp fp
+@ @ @ MOP_vmaxfp simple
+@ @ @ MOP_vmaxsb simple
+@ @ @ MOP_vmaxsh simple
+@ @ @ MOP_vmaxsw simple
+@ @ @ MOP_vmaxub simple
+@ @ @ MOP_vmaxuh simple
+@ @ @ MOP_vmaxuw simple
+@ @ @ MOP_vmhaddshs complex
+@ @ @ MOP_vmhraddshs complex
+@ @ @ MOP_vminfp simple
+@ @ @ MOP_vminsb simple
+@ @ @ MOP_vminsh simple
+@ @ @ MOP_vminsw simple
+@ @ @ MOP_vminub simple
+@ @ @ MOP_vminuh simple
+@ @ @ MOP_vminuw simple
+@ @ @ MOP_vmladduhm complex
+@ @ @ MOP_vmrghb perm
+@ @ @ MOP_vmrghh perm
+@ @ @ MOP_vmrghw perm
+@ @ @ MOP_vmrglb perm
+@ @ @ MOP_vmrglh perm
+@ @ @ MOP_vmrglw perm
+@ @ @ MOP_vmsummbm complex
+@ @ @ MOP_vmsumshm complex
+@ @ @ MOP_vmsumshs complex
+@ @ @ MOP_vmsumubm complex
+@ @ @ MOP_vmsumuhm complex
+@ @ @ MOP_vmsumuhs complex
+@ @ @ MOP_vmulesb complex
+@ @ @ MOP_vmulesh complex
+@ @ @ MOP_vmuleub complex
+@ @ @ MOP_vmuleuh complex
+@ @ @ MOP_vmulosb complex
+@ @ @ MOP_vmulosh complex
+@ @ @ MOP_vmuloub complex
+@ @ @ MOP_vmulouh complex
+@ @ @ MOP_vnmsubfp fp
+@ @ @ MOP_vnor simple
+@ @ @ MOP_vor simple
+@ @ @ MOP_vperm perm
+@ @ @ MOP_vpkpx perm
+@ @ @ MOP_vpkshss perm
+@ @ @ MOP_vpkshus perm
+@ @ @ MOP_vpkswss perm
+@ @ @ MOP_vpkswus perm
+@ @ @ MOP_vpkuhum perm
+@ @ @ MOP_vpkuhus perm
+@ @ @ MOP_vpkuwum perm
+@ @ @ MOP_vpkuwus perm
+@ @ @ MOP_vrefp fp
+@ @ @ MOP_vrfim fp
+@ @ @ MOP_vrfin fp
+@ @ @ MOP_vrfip fp
+@ @ @ MOP_vrfiz fp
+@ @ @ MOP_vrlb simple
+@ @ @ MOP_vrlh simple
+@ @ @ MOP_vrlw simple
+@ @ @ MOP_vrsqrtefp fp
+@ @ @ MOP_vsel simple
+@ @ @ MOP_vsl simple
+@ @ @ MOP_vslb simple
+@ @ @ MOP_vsldoi perm
+@ @ @ MOP_vslh simple
+@ @ @ MOP_vslo perm_bug
+@ @ @ MOP_vslw simple
+@ @ @ MOP_vspltb perm
+@ @ @ MOP_vsplth perm
+@ @ @ MOP_vspltisb perm
+@ @ @ MOP_vspltish perm
+@ @ @ MOP_vspltisw perm
+@ @ @ MOP_vspltw perm
+@ @ @ MOP_vsr simple
+@ @ @ MOP_vsrab simple
+@ @ @ MOP_vsrah simple
+@ @ @ MOP_vsraw simple
+@ @ @ MOP_vsrb simple
+@ @ @ MOP_vsrh simple
+@ @ @ MOP_vsro perm_bug
+@ @ @ MOP_vsrw simple
+@ @ @ MOP_vsubcuw simple
+@ @ @ MOP_vsubfp fp
+@ @ @ MOP_vsubsbs simple
+@ @ @ MOP_vsubshs simple
+@ @ @ MOP_vsubsws simple
+@ @ @ MOP_vsububm simple
+@ @ @ MOP_vsububs simple
+@ @ @ MOP_vsubuhm simple
+@ @ @ MOP_vsubuhs simple
+@ @ @ MOP_vsubuwm simple
+@ @ @ MOP_vsubuws simple
+@ @ @ MOP_vsum2sws complex
+@ @ @ MOP_vsum4sbs complex
+@ @ @ MOP_vsum4shs complex
+@ @ @ MOP_vsum4ubs complex
+@ @ @ MOP_vsumsws complex
+@ @ @ MOP_vupkhpx perm
+@ @ @ MOP_vupkhsb perm
+@ @ @ MOP_vupkhsh perm
+@ @ @ MOP_vupklpx perm
+@ @ @ MOP_vupklsb perm
+@ @ @ MOP_vupklsh perm
+@ @ @ MOP_vxor simple
+# The vec_abs and vec_abss operations identify their variants with insn_name.
+# Map these into a valid insn code (xfx_perm).
+@ @ @ 1 perm
+@ @ @ 2 perm
+@ @ @ 3 perm
+@ @ @ 4 perm
+@ @ @ 5 perm
+@ @ @ 6 perm
+@ @ @ 7 perm
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/constraints.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/constraints.md
new file mode 100644
index 000000000..a7d466118
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/constraints.md
@@ -0,0 +1,162 @@
+;; Constraint definitions for RS6000
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Register constraints
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT && TARGET_FPRS
+ ? FLOAT_REGS : NO_REGS"
+ "@internal")
+
+(define_register_constraint "b" "BASE_REGS"
+ "@internal")
+
+(define_register_constraint "h" "SPECIAL_REGS"
+ "@internal")
+
+(define_register_constraint "q" "MQ_REGS"
+ "@internal")
+
+(define_register_constraint "c" "CTR_REGS"
+ "@internal")
+
+(define_register_constraint "l" "LINK_REGS"
+ "@internal")
+
+(define_register_constraint "v" "ALTIVEC_REGS"
+ "@internal")
+
+(define_register_constraint "x" "CR0_REGS"
+ "@internal")
+
+(define_register_constraint "y" "CR_REGS"
+ "@internal")
+
+(define_register_constraint "z" "XER_REGS"
+ "@internal")
+
+;; Integer constraints
+
+(define_constraint "I"
+ "A signed 16-bit constant"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) (ival + 0x8000) < 0x10000")))
+
+(define_constraint "J"
+ "high-order 16 bits nonzero"
+ (and (match_code "const_int")
+ (match_test "(ival & (~ (unsigned HOST_WIDE_INT) 0xffff0000)) == 0")))
+
+(define_constraint "K"
+ "low-order 16 bits nonzero"
+ (and (match_code "const_int")
+ (match_test "(ival & (~ (HOST_WIDE_INT) 0xffff)) == 0")))
+
+(define_constraint "L"
+ "signed 16-bit constant shifted left 16 bits"
+ (and (match_code "const_int")
+ (match_test "((ival & 0xffff) == 0
+ && (ival >> 31 == -1 || ival >> 31 == 0))")))
+
+(define_constraint "M"
+ "constant greater than 31"
+ (and (match_code "const_int")
+ (match_test "ival > 31")))
+
+(define_constraint "N"
+ "positive constant that is an exact power of two"
+ (and (match_code "const_int")
+ (match_test "ival > 0 && exact_log2 (ival) >= 0")))
+
+(define_constraint "O"
+ "constant zero"
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "P"
+ "constant whose negation is signed 16-bit constant"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ((- ival) + 0x8000) < 0x10000")))
+
+;; Floating-point constraints
+
+(define_constraint "G"
+ "Constant that can be copied into GPR with two insns for DF/DI
+ and one for SF."
+ (and (match_code "const_double")
+ (match_test "num_insns_constant (op, mode)
+ == (mode == SFmode ? 1 : 2)")))
+
+(define_constraint "H"
+ "DF/DI constant that takes three insns."
+ (and (match_code "const_double")
+ (match_test "num_insns_constant (op, mode) == 3")))
+
+;; Memory constraints
+
+(define_memory_constraint "Q"
+ "Memory operand that is just an offset from a reg"
+ (and (match_code "mem")
+ (match_test "GET_CODE (XEXP (op, 0)) == REG")))
+
+(define_memory_constraint "Y"
+ "Indexed or word-aligned displacement memory operand"
+ (match_operand 0 "word_offset_memref_operand"))
+
+(define_memory_constraint "Z"
+ "Indexed or indirect memory operand"
+ (match_operand 0 "indexed_or_indirect_operand"))
+
+;; Address constraints
+
+(define_address_constraint "a"
+ "Indexed or indirect address operand"
+ (match_operand 0 "indexed_or_indirect_address"))
+
+(define_constraint "R"
+ "AIX TOC entry"
+ (match_test "legitimate_constant_pool_address_p (op)"))
+
+;; General constraints
+
+(define_constraint "S"
+ "Constant that can be placed into a 64-bit mask operand"
+ (match_operand 0 "mask64_operand"))
+
+(define_constraint "T"
+ "Constant that can be placed into a 32-bit mask operand"
+ (match_operand 0 "mask_operand"))
+
+(define_constraint "U"
+ "V.4 small data reference"
+ (and (match_test "DEFAULT_ABI == ABI_V4")
+ (match_operand 0 "small_data_operand")))
+
+(define_constraint "t"
+ "AND masks that can be performed by two rldic{l,r} insns
+ (but excluding those that could match other constraints of anddi3)"
+ (and (and (and (match_operand 0 "mask64_2_operand")
+ (match_test "(fixed_regs[CR0_REGNO]
+ || !logical_operand (op, DImode))"))
+ (not (match_operand 0 "mask_operand")))
+ (not (match_operand 0 "mask64_operand"))))
+
+(define_constraint "W"
+ "vector constant that does not require memory"
+ (match_operand 0 "easy_vector_constant"))
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-asm.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-asm.h
new file mode 100644
index 000000000..401b4f81b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-asm.h
@@ -0,0 +1,61 @@
+/* Macro definitions to used to support 32/64-bit code in Darwin's
+ * assembly files.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* These are donated from /usr/include/architecture/ppc . */
+
+#if defined(__ppc64__)
+#define MODE_CHOICE(x, y) y
+#else
+#define MODE_CHOICE(x, y) x
+#endif
+
+#define cmpg MODE_CHOICE(cmpw, cmpd)
+#define lg MODE_CHOICE(lwz, ld)
+#define stg MODE_CHOICE(stw, std)
+#define lgx MODE_CHOICE(lwzx, ldx)
+#define stgx MODE_CHOICE(stwx, stdx)
+#define lgu MODE_CHOICE(lwzu, ldu)
+#define stgu MODE_CHOICE(stwu, stdu)
+#define lgux MODE_CHOICE(lwzux, ldux)
+#define stgux MODE_CHOICE(stwux, stdux)
+#define lgwa MODE_CHOICE(lwz, lwa)
+
+#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+
+#define GPR_BYTES MODE_CHOICE(4,8) /* size of a GPR in bytes */
+#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+
+#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* position of saved
+ LR in frame */
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fallback.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fallback.c
new file mode 100644
index 000000000..5d3de3224
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fallback.c
@@ -0,0 +1,471 @@
+/* Fallback frame-state unwinder for Darwin.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combined
+ executable.)
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "dwarf2.h"
+#include "unwind.h"
+#include "unwind-dw2.h"
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <signal.h>
+
+typedef unsigned long reg_unit;
+
+/* Place in GPRS the parameters to the first 'sc' instruction that would
+ have been executed if we were returning from this CONTEXT, or
+ return false if an unexpected instruction is encountered. */
+
+static bool
+interpret_libc (reg_unit gprs[32], struct _Unwind_Context *context)
+{
+ uint32_t *pc = (uint32_t *)_Unwind_GetIP (context);
+ uint32_t cr;
+ reg_unit lr = (reg_unit) pc;
+ reg_unit ctr = 0;
+ uint32_t *invalid_address = NULL;
+
+ int i;
+
+ for (i = 0; i < 13; i++)
+ gprs[i] = 1;
+ gprs[1] = _Unwind_GetCFA (context);
+ for (; i < 32; i++)
+ gprs[i] = _Unwind_GetGR (context, i);
+ cr = _Unwind_GetGR (context, CR2_REGNO);
+
+ /* For each supported Libc, we have to track the code flow
+ all the way back into the kernel.
+
+ This code is believed to support all released Libc/Libsystem builds since
+ Jaguar 6C115, including all the security updates. To be precise,
+
+ Libc Libsystem Build(s)
+ 262~1 60~37 6C115
+ 262~1 60.2~4 6D52
+ 262~1 61~3 6F21-6F22
+ 262~1 63~24 6G30-6G37
+ 262~1 63~32 6I34-6I35
+ 262~1 63~64 6L29-6L60
+ 262.4.1~1 63~84 6L123-6R172
+
+ 320~1 71~101 7B85-7D28
+ 320~1 71~266 7F54-7F56
+ 320~1 71~288 7F112
+ 320~1 71~289 7F113
+ 320.1.3~1 71.1.1~29 7H60-7H105
+ 320.1.3~1 71.1.1~30 7H110-7H113
+ 320.1.3~1 71.1.1~31 7H114
+
+ That's a big table! It would be insane to try to keep track of
+ every little detail, so we just read the code itself and do what
+ it would do.
+ */
+
+ for (;;)
+ {
+ uint32_t ins = *pc++;
+
+ if ((ins & 0xFC000003) == 0x48000000) /* b instruction */
+ {
+ pc += ((((int32_t) ins & 0x3FFFFFC) ^ 0x2000000) - 0x2000004) / 4;
+ continue;
+ }
+ if ((ins & 0xFC600000) == 0x2C000000) /* cmpwi */
+ {
+ int32_t val1 = (int16_t) ins;
+ int32_t val2 = gprs[ins >> 16 & 0x1F];
+ /* Only beq and bne instructions are supported, so we only
+ need to set the EQ bit. */
+ uint32_t mask = 0xF << ((ins >> 21 & 0x1C) ^ 0x1C);
+ if (val1 == val2)
+ cr |= mask;
+ else
+ cr &= ~mask;
+ continue;
+ }
+ if ((ins & 0xFEC38003) == 0x40820000) /* forwards beq/bne */
+ {
+ if ((cr >> ((ins >> 16 & 0x1F) ^ 0x1F) & 1) == (ins >> 24 & 1))
+ pc += (ins & 0x7FFC) / 4 - 1;
+ continue;
+ }
+ if ((ins & 0xFC0007FF) == 0x7C000378) /* or, including mr */
+ {
+ gprs [ins >> 16 & 0x1F] = (gprs [ins >> 11 & 0x1F]
+ | gprs [ins >> 21 & 0x1F]);
+ continue;
+ }
+ if (ins >> 26 == 0x0E) /* addi, including li */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ gprs [ins >> 21 & 0x1F] = src + (int16_t) ins;
+ continue;
+ }
+ if (ins >> 26 == 0x0F) /* addis, including lis */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ gprs [ins >> 21 & 0x1F] = src + ((int16_t) ins << 16);
+ continue;
+ }
+ if (ins >> 26 == 0x20) /* lwz */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ if (p == invalid_address)
+ return false;
+ gprs [ins >> 21 & 0x1F] = *p;
+ continue;
+ }
+ if (ins >> 26 == 0x21) /* lwzu */
+ {
+ uint32_t *p = (uint32_t *)(gprs [ins >> 16 & 0x1F] += (int16_t) ins);
+ if (p == invalid_address)
+ return false;
+ gprs [ins >> 21 & 0x1F] = *p;
+ continue;
+ }
+ if (ins >> 26 == 0x24) /* stw */
+ /* What we hope this is doing is '--in_sigtramp'. We don't want
+ to actually store to memory, so just make a note of the
+ address and refuse to load from it. */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ if (p == NULL || invalid_address != NULL)
+ return false;
+ invalid_address = p;
+ continue;
+ }
+ if (ins >> 26 == 0x2E) /* lmw */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ int i;
+
+ for (i = (ins >> 21 & 0x1F); i < 32; i++)
+ {
+ if (p == invalid_address)
+ return false;
+ gprs[i] = *p++;
+ }
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0803a6) /* mtlr */
+ {
+ lr = gprs [ins >> 21 & 0x1F];
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0802a6) /* mflr */
+ {
+ gprs [ins >> 21 & 0x1F] = lr;
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0903a6) /* mtctr */
+ {
+ ctr = gprs [ins >> 21 & 0x1F];
+ continue;
+ }
+ /* The PowerPC User's Manual says that bit 11 of the mtcrf
+ instruction is reserved and should be set to zero, but it
+ looks like the Darwin assembler doesn't do that... */
+ if ((ins & 0xFC000FFF) == 0x7c000120) /* mtcrf */
+ {
+ int i;
+ uint32_t mask = 0;
+ for (i = 0; i < 8; i++)
+ mask |= ((-(ins >> (12 + i) & 1)) & 0xF) << 4 * i;
+ cr = (cr & ~mask) | (gprs [ins >> 21 & 0x1F] & mask);
+ continue;
+ }
+ if (ins == 0x429f0005) /* bcl- 20,4*cr7+so,.+4, loads pc into LR */
+ {
+ lr = (reg_unit) pc;
+ continue;
+ }
+ if (ins == 0x4e800420) /* bctr */
+ {
+ pc = (uint32_t *) ctr;
+ continue;
+ }
+ if (ins == 0x44000002) /* sc */
+ return true;
+
+ return false;
+ }
+}
+
+/* We used to include <ucontext.h> and <mach/thread_status.h>,
+ but they change so much between different Darwin system versions
+ that it's much easier to just write the structures involved here
+ directly. */
+
+/* These defines are from the kernel's bsd/dev/ppc/unix_signal.c. */
+#define UC_TRAD 1
+#define UC_TRAD_VEC 6
+#define UC_TRAD64 20
+#define UC_TRAD64_VEC 25
+#define UC_FLAVOR 30
+#define UC_FLAVOR_VEC 35
+#define UC_FLAVOR64 40
+#define UC_FLAVOR64_VEC 45
+#define UC_DUAL 50
+#define UC_DUAL_VEC 55
+
+struct gcc_ucontext
+{
+ int onstack;
+ sigset_t sigmask;
+ void * stack_sp;
+ size_t stack_sz;
+ int stack_flags;
+ struct gcc_ucontext *link;
+ size_t mcsize;
+ struct gcc_mcontext32 *mcontext;
+};
+
+struct gcc_float_vector_state
+{
+ double fpregs[32];
+ uint32_t fpscr_pad;
+ uint32_t fpscr;
+ uint32_t save_vr[32][4];
+ uint32_t save_vscr[4];
+};
+
+struct gcc_mcontext32 {
+ uint32_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[5];
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t gpr[32];
+ uint32_t cr;
+ uint32_t xer;
+ uint32_t lr;
+ uint32_t ctr;
+ uint32_t mq;
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
+/* These are based on /usr/include/ppc/ucontext.h and
+ /usr/include/mach/ppc/thread_status.h, but rewritten to be more
+ convenient, to compile on Jaguar, and to work around Radar 3712064
+ on Panther, which is that the 'es' field of 'struct mcontext64' has
+ the wrong type (doh!). */
+
+struct gcc_mcontext64 {
+ uint64_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[4];
+ uint64_t srr0;
+ uint64_t srr1;
+ uint32_t gpr[32][2];
+ uint32_t cr;
+ uint32_t xer[2]; /* These are arrays because the original structure has them misaligned. */
+ uint32_t lr[2];
+ uint32_t ctr[2];
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
+#define UC_FLAVOR_SIZE \
+ (sizeof (struct gcc_mcontext32) - 33*16)
+
+#define UC_FLAVOR_VEC_SIZE (sizeof (struct gcc_mcontext32))
+
+#define UC_FLAVOR64_SIZE \
+ (sizeof (struct gcc_mcontext64) - 33*16)
+
+#define UC_FLAVOR64_VEC_SIZE (sizeof (struct gcc_mcontext64))
+
+/* Given GPRS as input to a 'sc' instruction, and OLD_CFA, update FS
+ to represent the execution of a signal return; or, if not a signal
+ return, return false. */
+
+static bool
+handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
+ _Unwind_Ptr old_cfa)
+{
+ struct gcc_ucontext *uctx;
+ bool is_64, is_vector;
+ struct gcc_float_vector_state * float_vector_state;
+ _Unwind_Ptr new_cfa;
+ int i;
+ static _Unwind_Ptr return_addr;
+
+ /* Yay! We're in a Libc that we understand, and it's made a
+ system call. It'll be one of two kinds: either a Jaguar-style
+ SYS_sigreturn, or a Panther-style 'syscall' call with 184, which
+ is also SYS_sigreturn. */
+
+ if (gprs[0] == 0x67 /* SYS_SIGRETURN */)
+ {
+ uctx = (struct gcc_ucontext *) gprs[3];
+ is_vector = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR_VEC_SIZE);
+ is_64 = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR64_SIZE);
+ }
+ else if (gprs[0] == 0 && gprs[3] == 184)
+ {
+ int ctxstyle = gprs[5];
+ uctx = (struct gcc_ucontext *) gprs[4];
+ is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC
+ || ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC);
+ is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC
+ || ctxstyle == UC_FLAVOR64 || ctxstyle == UC_TRAD64);
+ }
+ else
+ return false;
+
+#define set_offset(r, addr) \
+ (fs->regs.reg[r].how = REG_SAVED_OFFSET, \
+ fs->regs.reg[r].loc.offset = (_Unwind_Ptr)(addr) - new_cfa)
+
+ /* Restore even the registers that are not call-saved, since they
+ might be being used in the prologue to save other registers,
+ for instance GPR0 is sometimes used to save LR. */
+
+ /* Handle the GPRs, and produce the information needed to do the rest. */
+ if (is_64)
+ {
+ /* The context is 64-bit, but it doesn't carry any extra information
+ for us because only the low 32 bits of the registers are
+ call-saved. */
+ struct gcc_mcontext64 *m64 = (struct gcc_mcontext64 *)uctx->mcontext;
+ int i;
+
+ float_vector_state = &m64->fvs;
+
+ new_cfa = m64->gpr[1][1];
+
+ set_offset (CR2_REGNO, &m64->cr);
+ for (i = 0; i < 32; i++)
+ set_offset (i, m64->gpr[i] + 1);
+ set_offset (XER_REGNO, m64->xer + 1);
+ set_offset (LINK_REGISTER_REGNUM, m64->lr + 1);
+ set_offset (COUNT_REGISTER_REGNUM, m64->ctr + 1);
+ if (is_vector)
+ set_offset (VRSAVE_REGNO, &m64->vrsave);
+
+ /* Sometimes, srr0 points to the instruction that caused the exception,
+ and sometimes to the next instruction to be executed; we want
+ the latter. */
+ if (m64->exception == 3 || m64->exception == 4
+ || m64->exception == 6
+ || (m64->exception == 7 && !(m64->srr1 & 0x10000)))
+ return_addr = m64->srr0 + 4;
+ else
+ return_addr = m64->srr0;
+ }
+ else
+ {
+ struct gcc_mcontext32 *m = uctx->mcontext;
+ int i;
+
+ float_vector_state = &m->fvs;
+
+ new_cfa = m->gpr[1];
+
+ set_offset (CR2_REGNO, &m->cr);
+ for (i = 0; i < 32; i++)
+ set_offset (i, m->gpr + i);
+ set_offset (XER_REGNO, &m->xer);
+ set_offset (LINK_REGISTER_REGNUM, &m->lr);
+ set_offset (COUNT_REGISTER_REGNUM, &m->ctr);
+
+ if (is_vector)
+ set_offset (VRSAVE_REGNO, &m->vrsave);
+
+ /* Sometimes, srr0 points to the instruction that caused the exception,
+ and sometimes to the next instruction to be executed; we want
+ the latter. */
+ if (m->exception == 3 || m->exception == 4
+ || m->exception == 6
+ || (m->exception == 7 && !(m->srr1 & 0x10000)))
+ return_addr = m->srr0 + 4;
+ else
+ return_addr = m->srr0;
+ }
+
+ fs->cfa_how = CFA_REG_OFFSET;
+ fs->cfa_reg = STACK_POINTER_REGNUM;
+ fs->cfa_offset = new_cfa - old_cfa;;
+
+ /* The choice of column for the return address is somewhat tricky.
+ Fortunately, the actual choice is private to this file, and
+ the space it's reserved from is the GCC register space, not the
+ DWARF2 numbering. So any free element of the right size is an OK
+ choice. Thus: */
+ fs->retaddr_column = ARG_POINTER_REGNUM;
+ /* FIXME: this should really be done using a DWARF2 location expression,
+ not using a static variable. In fact, this entire file should
+ be implemented in DWARF2 expressions. */
+ set_offset (ARG_POINTER_REGNUM, &return_addr);
+
+ for (i = 0; i < 32; i++)
+ set_offset (32 + i, float_vector_state->fpregs + i);
+ set_offset (SPEFSCR_REGNO, &float_vector_state->fpscr);
+
+ if (is_vector)
+ {
+ for (i = 0; i < 32; i++)
+ set_offset (FIRST_ALTIVEC_REGNO + i, float_vector_state->save_vr + i);
+ set_offset (VSCR_REGNO, float_vector_state->save_vscr);
+ }
+
+ return true;
+}
+
+/* This is also prototyped in rs6000/darwin.h, inside the
+ MD_FALLBACK_FRAME_STATE_FOR macro. */
+extern bool _Unwind_fallback_frame_state_for (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs);
+
+/* Implement the MD_FALLBACK_FRAME_STATE_FOR macro,
+ returning true iff the frame was a sigreturn() frame that we
+ can understand. */
+
+bool
+_Unwind_fallback_frame_state_for (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ reg_unit gprs[32];
+
+ if (!interpret_libc (gprs, context))
+ return false;
+ return handle_syscall (fs, gprs, _Unwind_GetCFA (context));
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fpsave.asm b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fpsave.asm
new file mode 100644
index 000000000..86d4760b0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-fpsave.asm
@@ -0,0 +1,102 @@
+/* This file contains the floating-point save and restore routines.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.)
+
+ MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
+
+#include "darwin-asm.h"
+
+.text
+ .align 2
+
+/* saveFP saves R0 -- assumed to be the callers LR -- to 8/16(R1). */
+
+.private_extern saveFP
+saveFP:
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stg r0,SAVED_LR_OFFSET(r1)
+ blr
+
+/* restFP restores the caller`s LR from 8/16(R1). Note that the code for
+ this starts at the offset of F30 restoration, so calling this
+ routine in an attempt to restore only F31 WILL NOT WORK (it would
+ be a stupid thing to do, anyway.) */
+
+.private_extern restFP
+restFP:
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ /* <OFFSET OF F30 RESTORE> restore callers LR */
+ lg r0,SAVED_LR_OFFSET(r1)
+ lfd f30,-16(r1)
+ /* and prepare for return to caller */
+ mtlr r0
+ lfd f31,-8(r1)
+ blr
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble-format b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble-format
new file mode 100644
index 000000000..0012a332d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble-format
@@ -0,0 +1,84 @@
+Long double format
+==================
+
+ Each long double is made up of two IEEE doubles. The value of the
+long double is the sum of the values of the two parts (except for
+-0.0). The most significant part is required to be the value of the
+long double rounded to the nearest double, as specified by IEEE. For
+Inf values, the least significant part is required to be one of +0.0
+or -0.0. No other requirements are made; so, for example, 1.0 may be
+represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a NaN
+is don't-care.
+
+Classification
+--------------
+
+A long double can represent any value of the form
+ s * 2^e * sum(k=0...105: f_k * 2^(-k))
+where 's' is +1 or -1, 'e' is between 1022 and -968 inclusive, f_0 is
+1, and f_k for k>0 is 0 or 1. These are the 'normal' long doubles.
+
+A long double can also represent any value of the form
+ s * 2^-968 * sum(k=0...105: f_k * 2^(-k))
+where 's' is +1 or -1, f_0 is 0, and f_k for k>0 is 0 or 1. These are
+the 'subnormal' long doubles.
+
+There are four long doubles that represent zero, two that represent
++0.0 and two that represent -0.0. The sign of the high part is the
+sign of the long double, and the sign of the low part is ignored.
+
+Likewise, there are four long doubles that represent infinities, two
+for +Inf and two for -Inf.
+
+Each NaN, quiet or signalling, that can be represented as a 'double'
+can be represented as a 'long double'. In fact, there are 2^64
+equivalent representations for each one.
+
+There are certain other valid long doubles where both parts are
+nonzero but the low part represents a value which has a bit set below
+2^(e-105). These, together with the subnormal long doubles, make up
+the denormal long doubles.
+
+Many possible long double bit patterns are not valid long doubles.
+These do not represent any value.
+
+Limits
+------
+
+The maximum representable long double is 2^1024-2^918. The smallest
+*normal* positive long double is 2^-968. The smallest denormalised
+positive long double is 2^-1074 (this is the same as for 'double').
+
+Conversions
+-----------
+
+A double can be converted to a long double by adding a zero low part.
+
+A long double can be converted to a double by removing the low part.
+
+Comparisons
+-----------
+
+Two long doubles can be compared by comparing the high parts, and if
+those compare equal, comparing the low parts.
+
+Arithmetic
+----------
+
+The unary negate operation operates by negating the low and high parts.
+
+An absolute or absolute-negate operation must be done by comparing
+against zero and negating if necessary.
+
+Addition and subtraction are performed using library routines. They
+are not at present performed perfectly accurately, the result produced
+will be within 1ulp of the range generated by adding or subtracting
+1ulp from the input values, where a 'ulp' is 2^(e-106) given the
+exponent 'e'. In the presence of cancellation, this may be
+arbitrarily inaccurate. Subtraction is done by negation and addition.
+
+Multiplication is also performed using a library routine. Its result
+will be within 2ulp of the correct result.
+
+Division is also performed using a library routine. Its result will
+be within 3ulp of the correct result.
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble.c
new file mode 100644
index 000000000..c30a98cc9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-ldouble.c
@@ -0,0 +1,438 @@
+/* 128-bit long double support routines for Darwin.
+ Copyright (C) 1993, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* Implementations of floating-point long double basic arithmetic
+ functions called by the IBM C compiler when generating code for
+ PowerPC platforms. In particular, the following functions are
+ implemented: __gcc_qadd, __gcc_qsub, __gcc_qmul, and __gcc_qdiv.
+ Double-double algorithms are based on the paper "Doubled-Precision
+ IEEE Standard 754 Floating-Point Arithmetic" by W. Kahan, February 26,
+ 1987. An alternative published reference is "Software for
+ Doubled-Precision Floating-Point Computations", by Seppo Linnainmaa,
+ ACM TOMS vol 7 no 3, September 1981, pages 272-283. */
+
+/* Each long double is made up of two IEEE doubles. The value of the
+ long double is the sum of the values of the two parts. The most
+ significant part is required to be the value of the long double
+ rounded to the nearest double, as specified by IEEE. For Inf
+ values, the least significant part is required to be one of +0.0 or
+ -0.0. No other requirements are made; so, for example, 1.0 may be
+ represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a
+ NaN is don't-care.
+
+ This code currently assumes big-endian. */
+
+#if ((!defined (__NO_FPRS__) || defined (_SOFT_FLOAT)) \
+ && !defined (__LITTLE_ENDIAN__) \
+ && (defined (__MACH__) || defined (__powerpc__) || defined (_AIX)))
+
+#define fabs(x) __builtin_fabs(x)
+#define isless(x, y) __builtin_isless (x, y)
+#define inf() __builtin_inf()
+
+#define unlikely(x) __builtin_expect ((x), 0)
+
+#define nonfinite(a) unlikely (! isless (fabs (a), inf ()))
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+
+/* All these routines actually take two long doubles as parameters,
+ but GCC currently generates poor code when a union is used to turn
+ a long double into a pair of doubles. */
+
+long double __gcc_qadd (double, double, double, double);
+long double __gcc_qsub (double, double, double, double);
+long double __gcc_qmul (double, double, double, double);
+long double __gcc_qdiv (double, double, double, double);
+
+#if defined __ELF__ && defined SHARED \
+ && (defined __powerpc64__ || !(defined __linux__ || defined __gnu_hurd__))
+/* Provide definitions of the old symbol names to satisfy apps and
+ shared libs built against an older libgcc. To access the _xlq
+ symbols an explicit version reference is needed, so these won't
+ satisfy an unadorned reference like _xlqadd. If dot symbols are
+ not needed, the assembler will remove the aliases from the symbol
+ table. */
+__asm__ (".symver __gcc_qadd,_xlqadd@GCC_3.4\n\t"
+ ".symver __gcc_qsub,_xlqsub@GCC_3.4\n\t"
+ ".symver __gcc_qmul,_xlqmul@GCC_3.4\n\t"
+ ".symver __gcc_qdiv,_xlqdiv@GCC_3.4\n\t"
+ ".symver .__gcc_qadd,._xlqadd@GCC_3.4\n\t"
+ ".symver .__gcc_qsub,._xlqsub@GCC_3.4\n\t"
+ ".symver .__gcc_qmul,._xlqmul@GCC_3.4\n\t"
+ ".symver .__gcc_qdiv,._xlqdiv@GCC_3.4");
+#endif
+
+typedef union
+{
+ long double ldval;
+ double dval[2];
+} longDblUnion;
+
+/* Add two 'long double' values and return the result. */
+long double
+__gcc_qadd (double a, double aa, double c, double cc)
+{
+ longDblUnion x;
+ double z, q, zz, xh;
+
+ z = a + c;
+
+ if (nonfinite (z))
+ {
+ z = cc + aa + c + a;
+ if (nonfinite (z))
+ return z;
+ x.dval[0] = z; /* Will always be DBL_MAX. */
+ zz = aa + cc;
+ if (fabs(a) > fabs(c))
+ x.dval[1] = a - z + c + zz;
+ else
+ x.dval[1] = c - z + a + zz;
+ }
+ else
+ {
+ q = a - z;
+ zz = q + c + (a - (q + z)) + aa + cc;
+
+ /* Keep -0 result. */
+ if (zz == 0.0)
+ return z;
+
+ xh = z + zz;
+ if (nonfinite (xh))
+ return xh;
+
+ x.dval[0] = xh;
+ x.dval[1] = z - xh + zz;
+ }
+ return x.ldval;
+}
+
+long double
+__gcc_qsub (double a, double b, double c, double d)
+{
+ return __gcc_qadd (a, b, -c, -d);
+}
+
+#ifdef _SOFT_FLOAT
+static double fmsub (double, double, double);
+#endif
+
+long double
+__gcc_qmul (double a, double b, double c, double d)
+{
+ longDblUnion z;
+ double t, tau, u, v, w;
+
+ t = a * c; /* Highest order double term. */
+
+ if (unlikely (t == 0) /* Preserve -0. */
+ || nonfinite (t))
+ return t;
+
+ /* Sum terms of two highest orders. */
+
+ /* Use fused multiply-add to get low part of a * c. */
+#ifndef _SOFT_FLOAT
+ asm ("fmsub %0,%1,%2,%3" : "=f"(tau) : "f"(a), "f"(c), "f"(t));
+#else
+ tau = fmsub (a, c, t);
+#endif
+ v = a*d;
+ w = b*c;
+ tau += v + w; /* Add in other second-order terms. */
+ u = t + tau;
+
+ /* Construct long double result. */
+ if (nonfinite (u))
+ return u;
+ z.dval[0] = u;
+ z.dval[1] = (t - u) + tau;
+ return z.ldval;
+}
+
+long double
+__gcc_qdiv (double a, double b, double c, double d)
+{
+ longDblUnion z;
+ double s, sigma, t, tau, u, v, w;
+
+ t = a / c; /* highest order double term */
+
+ if (unlikely (t == 0) /* Preserve -0. */
+ || nonfinite (t))
+ return t;
+
+ /* Finite nonzero result requires corrections to the highest order term. */
+
+ s = c * t; /* (s,sigma) = c*t exactly. */
+ w = -(-b + d * t); /* Written to get fnmsub for speed, but not
+ numerically necessary. */
+
+ /* Use fused multiply-add to get low part of c * t. */
+#ifndef _SOFT_FLOAT
+ asm ("fmsub %0,%1,%2,%3" : "=f"(sigma) : "f"(c), "f"(t), "f"(s));
+#else
+ sigma = fmsub (c, t, s);
+#endif
+ v = a - s;
+
+ tau = ((v-sigma)+w)/c; /* Correction to t. */
+ u = t + tau;
+
+ /* Construct long double result. */
+ if (nonfinite (u))
+ return u;
+ z.dval[0] = u;
+ z.dval[1] = (t - u) + tau;
+ return z.ldval;
+}
+
+#if defined (_SOFT_FLOAT) && defined (__LONG_DOUBLE_128__)
+
+long double __gcc_qneg (double, double);
+int __gcc_qeq (double, double, double, double);
+int __gcc_qne (double, double, double, double);
+int __gcc_qge (double, double, double, double);
+int __gcc_qle (double, double, double, double);
+int __gcc_qunord (double, double, double, double);
+long double __gcc_stoq (float);
+long double __gcc_dtoq (double);
+float __gcc_qtos (double, double);
+double __gcc_qtod (double, double);
+int __gcc_qtoi (double, double);
+unsigned int __gcc_qtou (double, double);
+long double __gcc_itoq (int);
+long double __gcc_utoq (unsigned int);
+
+extern int __eqdf2 (double, double);
+extern int __ledf2 (double, double);
+extern int __gedf2 (double, double);
+extern int __unorddf2 (double, double);
+
+/* Negate 'long double' value and return the result. */
+long double
+__gcc_qneg (double a, double aa)
+{
+ longDblUnion x;
+
+ x.dval[0] = -a;
+ x.dval[1] = -aa;
+ return x.ldval;
+}
+
+/* Compare two 'long double' values for equality. */
+int
+__gcc_qeq (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __eqdf2 (aa, cc);
+ return 1;
+}
+
+strong_alias (__gcc_qeq, __gcc_qne);
+
+/* Compare two 'long double' values for less than or equal. */
+int
+__gcc_qle (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __ledf2 (aa, cc);
+ return __ledf2 (a, c);
+}
+
+strong_alias (__gcc_qle, __gcc_qlt);
+
+/* Compare two 'long double' values for greater than or equal. */
+int
+__gcc_qge (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __gedf2 (aa, cc);
+ return __gedf2 (a, c);
+}
+
+strong_alias (__gcc_qge, __gcc_qgt);
+
+/* Compare two 'long double' values for unordered. */
+int
+__gcc_qunord (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __unorddf2 (aa, cc);
+ return __unorddf2 (a, c);
+}
+
+/* Convert single to long double. */
+long double
+__gcc_stoq (float a)
+{
+ longDblUnion x;
+
+ x.dval[0] = (double) a;
+ x.dval[1] = 0.0;
+
+ return x.ldval;
+}
+
+/* Convert double to long double. */
+long double
+__gcc_dtoq (double a)
+{
+ longDblUnion x;
+
+ x.dval[0] = a;
+ x.dval[1] = 0.0;
+
+ return x.ldval;
+}
+
+/* Convert long double to single. */
+float
+__gcc_qtos (double a, double aa __attribute__ ((__unused__)))
+{
+ return (float) a;
+}
+
+/* Convert long double to double. */
+double
+__gcc_qtod (double a, double aa __attribute__ ((__unused__)))
+{
+ return a;
+}
+
+/* Convert long double to int. */
+int
+__gcc_qtoi (double a, double aa)
+{
+ double z = a + aa;
+ return (int) z;
+}
+
+/* Convert long double to unsigned int. */
+unsigned int
+__gcc_qtou (double a, double aa)
+{
+ double z = a + aa;
+ return (unsigned int) z;
+}
+
+/* Convert int to long double. */
+long double
+__gcc_itoq (int a)
+{
+ return __gcc_dtoq ((double) a);
+}
+
+/* Convert unsigned int to long double. */
+long double
+__gcc_utoq (unsigned int a)
+{
+ return __gcc_dtoq ((double) a);
+}
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+#include "config/soft-fp/quad.h"
+
+/* Compute floating point multiply-subtract with higher (quad) precision. */
+static double
+fmsub (double a, double b, double c)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ FP_DECL_D(B);
+ FP_DECL_D(C);
+ FP_DECL_Q(X);
+ FP_DECL_Q(Y);
+ FP_DECL_Q(Z);
+ FP_DECL_Q(U);
+ FP_DECL_Q(V);
+ FP_DECL_D(R);
+ double r;
+ long double u, v, x, y, z;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_RAW_D (A, a);
+ FP_UNPACK_RAW_D (B, b);
+ FP_UNPACK_RAW_D (C, c);
+
+ /* Extend double to quad. */
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_EXTEND(Q,D,4,2,X,A);
+ FP_EXTEND(Q,D,4,2,Y,B);
+ FP_EXTEND(Q,D,4,2,Z,C);
+#else
+ FP_EXTEND(Q,D,2,1,X,A);
+ FP_EXTEND(Q,D,2,1,Y,B);
+ FP_EXTEND(Q,D,2,1,Z,C);
+#endif
+ FP_PACK_RAW_Q(x,X);
+ FP_PACK_RAW_Q(y,Y);
+ FP_PACK_RAW_Q(z,Z);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Multiply. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_Q(X,x);
+ FP_UNPACK_Q(Y,y);
+ FP_MUL_Q(U,X,Y);
+ FP_PACK_Q(u,U);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Subtract. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(U,u);
+ FP_UNPACK_SEMIRAW_Q(Z,z);
+ FP_SUB_Q(V,U,Z);
+ FP_PACK_SEMIRAW_Q(v,V);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Truncate quad to double. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(V,v);
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_TRUNC(D,Q,2,4,R,V);
+#else
+ FP_TRUNC(D,Q,1,2,R,V);
+#endif
+ FP_PACK_SEMIRAW_D(r,R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+#endif
+
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.4.ver b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.4.ver
new file mode 100644
index 000000000..019218dd6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.4.ver
@@ -0,0 +1,76 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdi3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldi3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.5.ver b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.5.ver
new file mode 100644
index 000000000..7e0dd52b0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-libgcc.10.5.ver
@@ -0,0 +1,89 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetIPInfo
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divtc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___floatundidf
+___floatundisf
+___floatunditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___multc3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powitf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-tramp.asm b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-tramp.asm
new file mode 100644
index 000000000..62522b9e3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-tramp.asm
@@ -0,0 +1,135 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000, 2004, 2005 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+#include "darwin-asm.h"
+
+/* Set up trampolines. */
+
+.text
+ .align LOG2_GPR_BYTES
+Ltrampoline_initial:
+ mflr r0
+ bl 1f
+Lfunc = .-Ltrampoline_initial
+ .g_long 0 /* will be replaced with function address */
+Lchain = .-Ltrampoline_initial
+ .g_long 0 /* will be replaced with static chain */
+1: mflr r11
+ lg r12,0(r11) /* function address */
+ mtlr r0
+ mtctr r12
+ lg r11,GPR_BYTES(r11) /* static chain */
+ bctr
+
+trampoline_size = .-Ltrampoline_initial
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+ .globl ___trampoline_setup
+___trampoline_setup:
+ mflr r0 /* save return address */
+ bcl 20,31,LCF0 /* load up __trampoline_initial into r7 */
+LCF0:
+ mflr r11
+ addis r7,r11,ha16(LTRAMP-LCF0)
+ lg r7,lo16(LTRAMP-LCF0)(r7)
+ subi r7,r7,4
+ li r8,trampoline_size /* verify trampoline big enough */
+ cmpg cr1,r8,r4
+ srwi r4,r4,2 /* # words to move (insns always 4-byte) */
+ addi r9,r3,-4 /* adjust pointer for lgu */
+ mtctr r4
+ blt cr1,Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz Lmove
+
+ /* Store correct function and static chain */
+ stg r5,Lfunc(r3)
+ stg r6,Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz Lcache
+
+ /* Ensure cache-flushing has finished. */
+ sync
+ isync
+
+ /* Make stack writeable. */
+ b ___enable_execute_stack
+
+Labort:
+#ifdef __DYNAMIC__
+ bl L_abort$stub
+.data
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 2
+L_abort$stub:
+ .indirect_symbol _abort
+ mflr r0
+ bcl 20,31,L0$_abort
+L0$_abort:
+ mflr r11
+ addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort)
+ mtlr r0
+ lgu r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
+ mtctr r12
+ bctr
+.data
+.lazy_symbol_pointer
+L_abort$lazy_ptr:
+ .indirect_symbol _abort
+ .g_long dyld_stub_binding_helper
+#else
+ bl _abort
+#endif
+.data
+ .align LOG2_GPR_BYTES
+LTRAMP:
+ .g_long Ltrampoline_initial
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-unwind.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-unwind.h
new file mode 100644
index 000000000..9218c5ad4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-unwind.h
@@ -0,0 +1,35 @@
+/* DWARF2 EH unwinding support for Darwin.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combined
+ executable.)
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+extern bool _Unwind_fallback_frame_state_for
+ (struct _Unwind_Context *context, _Unwind_FrameState *fs);
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS) \
+ (_Unwind_fallback_frame_state_for (CONTEXT, FS) \
+ ? _URC_NO_REASON : _URC_END_OF_STACK)
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-vecsave.asm b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-vecsave.asm
new file mode 100644
index 000000000..693879f82
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-vecsave.asm
@@ -0,0 +1,165 @@
+/* This file contains the vector save and restore routines.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* Vector save/restore routines for Darwin. Note that each vector
+ save/restore requires 2 instructions (8 bytes.)
+
+ THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.). */
+
+ .machine ppc7400
+.text
+ .align 2
+
+.private_extern saveVEC
+saveVEC:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ blr
+
+.private_extern restVEC
+restVEC:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ blr
+
+/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */
+
+.private_extern saveVEC_vr11
+saveVEC_vr11:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ mfspr r11,VRsave
+ blr
+
+/* As restVec, but the original VRsave value passed in R10. */
+
+.private_extern restVEC_vr10
+restVEC_vr10:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ /* restore VRsave from R10. */
+ mtspr VRsave,r10
+ blr
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-world.asm b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-world.asm
new file mode 100644
index 000000000..7ff51b51f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin-world.asm
@@ -0,0 +1,269 @@
+/* This file contains the exception-handling save_world and
+ * restore_world routines, which need to do a run-time check to see if
+ * they should save and restore the vector registers.
+ *
+ * Copyright (C) 2004 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+ .machine ppc7400
+.data
+ .align 2
+
+#ifdef __DYNAMIC__
+
+.non_lazy_symbol_pointer
+L_has_vec$non_lazy_ptr:
+ .indirect_symbol __cpu_has_altivec
+#ifdef __ppc64__
+ .quad 0
+#else
+ .long 0
+#endif
+
+#else
+
+/* For static, "pretend" we have a non-lazy-pointer. */
+
+L_has_vec$non_lazy_ptr:
+ .long __cpu_has_altivec
+
+#endif
+
+
+.text
+ .align 2
+
+/* save_world and rest_world save/restore F14-F31 and possibly V20-V31
+ (assuming you have a CPU with vector registers; we use a global var
+ provided by the System Framework to determine this.)
+
+ SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11
+ (the stack frame size) as parameters. It returns VRsave in R0 if
+ we`re on a CPU with vector regs.
+
+ With gcc3, we now need to save and restore CR as well, since gcc3's
+ scheduled prologs can cause comparisons to be moved before calls to
+ save_world!
+
+ USES: R0 R11 R12 */
+
+.private_extern save_world
+save_world:
+ stw r0,8(r1)
+ mflr r0
+ bcl 20,31,Ls$pb
+Ls$pb: mflr r12
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12)
+ mtlr r0
+ lwz r12,0(r12)
+ /* grab CR */
+ mfcr r0
+ /* test HAS_VEC */
+ cmpwi r12,0
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stmw r13,-220(r1)
+ /* stash CR */
+ stw r0,4(r1)
+ /* set R12 pointing at Vector Reg save area */
+ addi r12,r1,-224
+ /* allocate stack frame */
+ stwux r1,r1,r11
+ /* ...but return if HAS_VEC is zero */
+ bne+ L$saveVMX
+ /* Not forgetting to restore CR. */
+ mtcr r0
+ blr
+
+L$saveVMX:
+ /* We're saving Vector regs too. */
+ /* Restore CR from R0. No More Branches! */
+ mtcr r0
+
+ /* We should really use VRSAVE to figure out which vector regs
+ we actually need to save and restore. Some other time :-/ */
+
+ li r11,-192
+ stvx v20,r11,r12
+ li r11,-176
+ stvx v21,r11,r12
+ li r11,-160
+ stvx v22,r11,r12
+ li r11,-144
+ stvx v23,r11,r12
+ li r11,-128
+ stvx v24,r11,r12
+ li r11,-112
+ stvx v25,r11,r12
+ li r11,-96
+ stvx v26,r11,r12
+ li r11,-80
+ stvx v27,r11,r12
+ li r11,-64
+ stvx v28,r11,r12
+ li r11,-48
+ stvx v29,r11,r12
+ li r11,-32
+ stvx v30,r11,r12
+ mfspr r0,VRsave
+ li r11,-16
+ stvx v31,r11,r12
+ /* VRsave lives at -224(R1) */
+ stw r0,0(r12)
+ blr
+
+
+/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR.
+ R10 is the C++ EH stack adjust parameter, we return to the caller`s caller.
+
+ USES: R0 R10 R11 R12 and R7 R8
+ RETURNS: C++ EH Data registers (R3 - R6.)
+
+ We now set up R7/R8 and jump to rest_world_eh_r7r8.
+
+ rest_world doesn't use the R10 stack adjust parameter, nor does it
+ pick up the R3-R6 exception handling stuff. */
+
+.private_extern rest_world
+rest_world:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ li r7, 0
+ lwz r8, 8(r11)
+ li r10, 0
+ b rest_world_eh_r7r8
+
+.private_extern eh_rest_world_r10
+eh_rest_world_r10:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ mr r7,r10
+ lwz r8, 8(r11)
+ /* pickup the C++ EH data regs (R3 - R6.) */
+ lwz r6,-420(r11)
+ lwz r5,-424(r11)
+ lwz r4,-428(r11)
+ lwz r3,-432(r11)
+
+ b rest_world_eh_r7r8
+
+/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing
+ the exception-handling epilog. R7 contains the offset to add to
+ the SP, and R8 contains the 'real' return address.
+
+ USES: R0 R11 R12 [R7/R8]
+ RETURNS: C++ EH Data registers (R3 - R6.) */
+
+rest_world_eh_r7r8:
+ bcl 20,31,Lr7r8$pb
+Lr7r8$pb: mflr r12
+ lwz r11,0(r1)
+ /* R11 := previous SP */
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12)
+ lwz r0,4(r11)
+ /* R0 := old CR */
+ lwz r12,0(r12)
+ /* R12 := HAS_VEC */
+ mtcr r0
+ cmpwi r12,0
+ lmw r13,-220(r11)
+ beq L.rest_world_fp_eh
+ /* restore VRsave and V20..V31 */
+ lwz r0,-224(r11)
+ li r12,-416
+ mtspr VRsave,r0
+ lvx v20,r11,r12
+ li r12,-400
+ lvx v21,r11,r12
+ li r12,-384
+ lvx v22,r11,r12
+ li r12,-368
+ lvx v23,r11,r12
+ li r12,-352
+ lvx v24,r11,r12
+ li r12,-336
+ lvx v25,r11,r12
+ li r12,-320
+ lvx v26,r11,r12
+ li r12,-304
+ lvx v27,r11,r12
+ li r12,-288
+ lvx v28,r11,r12
+ li r12,-272
+ lvx v29,r11,r12
+ li r12,-256
+ lvx v30,r11,r12
+ li r12,-240
+ lvx v31,r11,r12
+
+L.rest_world_fp_eh:
+ lfd f14,-144(r11)
+ lfd f15,-136(r11)
+ lfd f16,-128(r11)
+ lfd f17,-120(r11)
+ lfd f18,-112(r11)
+ lfd f19,-104(r11)
+ lfd f20,-96(r11)
+ lfd f21,-88(r11)
+ lfd f22,-80(r11)
+ lfd f23,-72(r11)
+ lfd f24,-64(r11)
+ lfd f25,-56(r11)
+ lfd f26,-48(r11)
+ lfd f27,-40(r11)
+ lfd f28,-32(r11)
+ lfd f29,-24(r11)
+ lfd f30,-16(r11)
+ /* R8 is the exception-handler's address */
+ mtctr r8
+ lfd f31,-8(r11)
+ /* set SP to original value + R7 offset */
+ add r1,r11,r7
+ bctr
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.h
new file mode 100644
index 000000000..672fd2889
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.h
@@ -0,0 +1,605 @@
+/* Target definitions for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC)");
+
+/* The "Darwin ABI" is mostly like AIX, but with some key differences. */
+
+#define DEFAULT_ABI ABI_DARWIN
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __powerpc64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
+/* The object file format is Mach-O. */
+
+#define TARGET_OBJECT_FORMAT OBJECT_MACHO
+
+/* Size of the Obj-C jump buffer. */
+#define OBJC_JBLEN ((TARGET_64BIT) ? (26*2 + 18*2 + 129 + 1) : (26 + 18*2 + 129 + 1))
+
+/* We're not ever going to do TOCs. */
+
+#define TARGET_TOC 0
+#define TARGET_NO_TOC 1
+
+/* Override the default rs6000 definition. */
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int")
+
+/* APPLE LOCAL begin mdynamic-no-pic */
+/* This was a stupid idea in the first place. */
+#if 0
+/* Translate config/rs6000/darwin.opt to config/darwin.h. */
+#define TARGET_DYNAMIC_NO_PIC (TARGET_MACHO_DYNAMIC_NO_PIC)
+#endif
+/* APPLE LOCAL end mdynamic-no-pic */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ if (!TARGET_64BIT) builtin_define ("__ppc__"); \
+ if (TARGET_64BIT) builtin_define ("__ppc64__"); \
+ builtin_define ("__POWERPC__"); \
+ builtin_define ("__NATURAL_ALIGNMENT__"); \
+ darwin_cpp_builtins (pfile); \
+ } \
+ while (0)
+
+/* APPLE LOCAL begin mainline 2007-02-20 5005743 */ \
+#define SUBTARGET_OVERRIDE_OPTIONS darwin_rs6000_override_options ()
+
+/* APPLE LOCAL end mainline 2007-02-20 5005743 */ \
+/* APPLE LOCAL begin mainline */
+#define C_COMMON_OVERRIDE_OPTIONS do { \
+ /* On powerpc, __cxa_get_exception_ptr is available starting in the \
+ 10.4.6 libstdc++.dylib. */ \
+/* APPLE LOCAL begin ARM 5683689 */ \
+ if (!darwin_iphoneos_version_min \
+ && (!darwin_macosx_version_min \
+ || strverscmp (darwin_macosx_version_min, "10.4.6") < 0) \
+/* APPLE LOCAL end 5683689 */ \
+ && flag_use_cxa_get_exception_ptr == 2) \
+ flag_use_cxa_get_exception_ptr = 0; \
+ /* APPLE LOCAL begin 5731065 */ \
+ /* moved flag_no_builtin to darwin.h */ \
+ /* APPLE LOCAL end 5731065 */ \
+ SUBTARGET_C_COMMON_OVERRIDE_OPTIONS; \
+} while (0)
+/* APPLE LOCAL end mainline */
+
+/* Darwin has 128-bit long double support in libc in 10.4 and later.
+ Default to 128-bit long doubles even on earlier platforms for ABI
+ consistency; arithmetic will work even if libc and libm support is
+ not available. */
+
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
+
+
+/* We want -fPIC by default, unless we're using -static to compile for
+ the kernel or some such. */
+
+#define CC1_SPEC "\
+ "/* APPLE LOCAL ARM ignore -mthumb and -mno-thumb */"\
+ %<mthumb %<mno-thumb \
+ "/* APPLE LOCAL ignore -msse and -msse2 and other x86 options */"\
+ %<msse %<msse2 %<msse3 %<march=pentium4 %<mcpu=pentium4 \
+ %{g: %{!fno-eliminate-unused-debug-symbols: -feliminate-unused-debug-symbols }} \
+ %{static: %{Zdynamic: %e conflicting code gen style switches are used}}\
+ "/* APPLE LOCAL ARM 5683689 */"\
+ %{!mmacosx-version-min=*: %{!miphoneos-version-min=*: %(darwin_cc1_minversion)}} \
+ "/* APPLE LOCAL -fast or -fastf or -fastcp */"\
+ %{!mkernel:%{!static:%{!fast:%{!fastf:%{!fastcp:%{!mdynamic-no-pic:-fPIC}}}}}}"
+
+#define DARWIN_ARCH_SPEC "%{m64:ppc64;:ppc}"
+
+#define DARWIN_SUBARCH_SPEC " \
+ %{m64: ppc64} \
+ %{!m64: \
+ %{mcpu=601:ppc601; \
+ mcpu=603:ppc603; \
+ mcpu=603e:ppc603; \
+ mcpu=604:ppc604; \
+ mcpu=604e:ppc604e; \
+ mcpu=740:ppc750; \
+ mcpu=750:ppc750; \
+ mcpu=G3:ppc750; \
+ mcpu=7400:ppc7400; \
+ mcpu=G4:ppc7400; \
+ mcpu=7450:ppc7450; \
+ mcpu=970:ppc970; \
+ mcpu=power4:ppc970; \
+ mcpu=G5:ppc970; \
+ :ppc}}"
+
+/* crt2.o is at least partially required for 10.3.x and earlier. */
+/* APPLE LOCAL begin ARM 5683689 */
+#define DARWIN_CRT2_SPEC \
+ "%{!m64: %{mmacosx-version-min=*: \
+ %:version-compare(!> 10.4 mmacosx-version-min= crt2.o%s)}}"
+/* APPLE LOCAL end ARM 5683689 */
+
+/* APPLE LOCAL begin mainline 2007-03-13 5005743 5040758 */ \
+/* Determine a minimum version based on compiler options. */
+#define DARWIN_MINVERSION_SPEC \
+ "%{m64:%{fgnu-runtime:10.4; \
+ ,objective-c|,objc-cpp-output:10.5; \
+ ,objective-c-header:10.5; \
+ ,objective-c++|,objective-c++-cpp-output:10.5; \
+ ,objective-c++-header|,objc++-cpp-output:10.5; \
+ :10.4}; \
+ shared-libgcc:10.3; \
+ :10.1}"
+
+/* APPLE LOCAL end mainline 2007-03-13 5040758 5005743 */
+/* APPLE LOCAL begin ARM 5683689 */
+/* Default cc1 option for specifying minimum version number. */
+#define DARWIN_CC1_MINVERSION_SPEC "-mmacosx-version-min=%(darwin_minversion)"
+
+/* Default ld option for specifying minimum version number. */
+#define DARWIN_LD_MINVERSION_SPEC "-macosx_version_min %(darwin_minversion)"
+
+/* Use macosx version numbers by default. */
+#define DARWIN_DEFAULT_VERSION_TYPE DARWIN_VERSION_MACOSX
+/* APPLE LOCAL end ARM 5683689 */
+
+/* APPLE LOCAL ARM 5681645 8307333 */
+#define DARWIN_IPHONEOS_LIBGCC_SPEC "-lgcc"
+
+/* APPLE LOCAL begin mainline */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ DARWIN_EXTRA_SPECS \
+ { "darwin_arch", DARWIN_ARCH_SPEC }, \
+ { "darwin_crt2", DARWIN_CRT2_SPEC }, \
+ { "darwin_subarch", DARWIN_SUBARCH_SPEC },
+/* APPLE LOCAL end mainline */
+
+/* Output a .machine directive. */
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rs6000_darwin_file_start
+
+/* The "-faltivec" option should have been called "-maltivec" all
+ along. -ffix-and-continue and -findirect-data is for compatibility
+ for old compilers. */
+
+#define SUBTARGET_OPTION_TRANSLATE_TABLE \
+ { "-ffix-and-continue", "-mfix-and-continue" }, \
+ { "-findirect-data", "-mfix-and-continue" }, \
+ /* APPLE LOCAL AltiVec */ \
+ { "-faltivec", "-faltivec -mpim-altivec" }, \
+ { "-fno-altivec", "-mno-altivec" }, \
+ { "-Waltivec-long-deprecated", "-mwarn-altivec-long" }, \
+ { "-Wno-altivec-long-deprecated", "-mno-warn-altivec-long" }
+
+/* Make both r2 and r13 available for allocation. */
+#define FIXED_R2 0
+#define FIXED_R13 0
+
+/* Base register for access to local variables of the function. */
+
+#undef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM 30
+
+#undef RS6000_PIC_OFFSET_TABLE_REGNUM
+#define RS6000_PIC_OFFSET_TABLE_REGNUM 31
+
+/* APPLE LOCAL begin -pg fix */
+/* -pg has a problem which is normally concealed by -fPIC;
+ either -mdynamic-no-pic or -static exposes the -pg problem, causing the
+ crash. FSF gcc for Darwin also has this bug. The problem is that -pg
+ causes several int registers to be saved and restored although they may
+ not actually be used (config/rs6000/rs6000.c:first_reg_to_save()). In the
+ rare case where none of them is actually used, a consistency check fails
+ (correctly). This cannot happen with -fPIC because the PIC register (R31)
+ is always "used" in the sense checked by the consistency check. The
+ easy fix, here, is therefore to mark R31 always "used" whenever -pg is on.
+ A better, but harder, fix would be to improve -pg's register-use
+ logic along the lines suggested by comments in the function listed above. */
+#undef PIC_OFFSET_TABLE_REGNUM
+#define PIC_OFFSET_TABLE_REGNUM ((flag_pic || profile_flag) \
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM \
+ : INVALID_REGNUM)
+/* APPLE LOCAL end -pg fix */
+
+/* Pad the outgoing args area to 16 bytes instead of the usual 8. */
+
+#undef STARTING_FRAME_OFFSET
+#define STARTING_FRAME_OFFSET \
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : (RS6000_ALIGN (current_function_outgoing_args_size, 16) \
+ + RS6000_SAVE_AREA))
+
+#undef STACK_DYNAMIC_OFFSET
+#define STACK_DYNAMIC_OFFSET(FUNDECL) \
+ (RS6000_ALIGN (current_function_outgoing_args_size, 16) \
+ + (STACK_POINTER_OFFSET))
+
+/* These are used by -fbranch-probabilities */
+#define HOT_TEXT_SECTION_NAME "__TEXT,__text,regular,pure_instructions"
+#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
+ "__TEXT,__unlikely,regular,pure_instructions"
+
+/* APPLE LOCAL begin long call hot cold */
+/* The following is used by hot/cold partitioning to determine whether to
+ unconditional branches are "long enough" to span the distance between
+ hot and cold sections (otherwise we have to use indirect jumps). It
+ is set based on the -mlongcall flag.
+ If -mlongcall is set, we use the indirect jumps (the macro below gets '0');
+ otherwise we use unconditional branches (the macro below gets '1'). */
+#define HAS_LONG_UNCOND_BRANCH (rs6000_default_long_calls ? 0 : 1)
+/* APPLE LOCAL end long call hot cold */
+
+/* APPLE LOCAL begin long-branch */
+/* Define cutoff for using external functions to save floating point.
+ For Darwin, use the function for more than a few registers. */
+
+/* APPLE LOCAL begin inline FP save/restore (radar 3414605) */
+#undef FP_SAVE_INLINE
+#define FP_SAVE_INLINE(FIRST_REG) \
+(optimize >= 3 \
+|| ((FIRST_REG) > 60 && (FIRST_REG) < 64) \
+|| rs6000_default_long_calls)
+/* APPLE LOCAL end inline FP save/restore (radar 3414605) */
+
+/* Define cutoff for using external functions to save vector registers. */
+
+#undef VECTOR_SAVE_INLINE
+#define VECTOR_SAVE_INLINE(FIRST_REG) \
+ (((FIRST_REG) >= LAST_ALTIVEC_REGNO - 1 && (FIRST_REG) <= LAST_ALTIVEC_REGNO) \
+ || rs6000_default_long_calls)
+/* APPLE LOCAL end long-branch */
+
+/* Darwin uses a function call if everything needs to be saved/restored. */
+#undef WORLD_SAVE_P
+#define WORLD_SAVE_P(INFO) ((INFO)->world_save_p)
+
+/* The assembler wants the alternate register names, but without
+ leading percent sign. */
+#undef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
+ "mq", "lr", "ctr", "ap", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "xer", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
+ "vrsave", "vscr", \
+ "spe_acc", "spefscr", \
+ "sfp" \
+ /* APPLE LOCAL 3399553 */ \
+ , "fpscr" \
+}
+
+/* This outputs NAME to FILE. */
+
+#undef RS6000_OUTPUT_BASENAME
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ assemble_name (FILE, NAME)
+
+/* Globalizing directive for a label. */
+#undef GLOBAL_ASM_OP
+#define GLOBAL_ASM_OP "\t.globl "
+#undef TARGET_ASM_GLOBALIZE_LABEL
+
+/* This is how to output an internal label prefix. rs6000.c uses this
+ when generating traceback tables. */
+/* Not really used for Darwin? */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL_PREFIX
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ fprintf (FILE, "%s", PREFIX)
+
+/* This says how to output an assembler line to define a global common
+ symbol. */
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ do { \
+ unsigned HOST_WIDE_INT _new_size = SIZE; \
+ fputs (".comm ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if (_new_size == 0) _new_size = 1; \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", _new_size); \
+ } while (0)
+
+/* Override the standard rs6000 definition. */
+
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START ";"
+
+/* APPLE LOCAL reduce code size */
+/* Don't define SAVE_FP_PREFIX and friends */
+
+/* This is how to output an assembler line that says to advance
+ the location counter to a multiple of 2**LOG bytes using the
+ "nop" instruction as padding. */
+
+#define ASM_OUTPUT_ALIGN_WITH_NOP(FILE,LOG) \
+ do \
+ { \
+ if ((LOG) < 3) \
+ { \
+ ASM_OUTPUT_ALIGN (FILE,LOG); \
+ } \
+ else /* nop == ori r0,r0,0 */ \
+ fprintf (FILE, "\t.align32 %d,0x60000000\n", (LOG)); \
+ } while (0)
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* This is supported in cctools 465 and later. The macro test
+ above prevents using it in earlier build environments. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ }
+#endif
+
+/* Generate insns to call the profiler. */
+
+#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
+
+/* Function name to call to do profiling. */
+
+#define RS6000_MCOUNT "*mcount"
+
+/* APPLE LOCAL begin 4298879. */
+/* Default processor (for -mtune): G5 when not optimizing for size othwerise G4.
+ It is G5 by default for 64-bit in all cases. */
+/* APPLE LOCAL end 4298879. */
+#undef PROCESSOR_DEFAULT
+/* APPLE LOCAL 4298879. */
+#define PROCESSOR_DEFAULT (optimize_size ? PROCESSOR_PPC7400 : PROCESSOR_POWER4)
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4
+
+/* Default target flag settings. Despite the fact that STMW/LMW
+ serializes, it's still a big code size win to use them. Use FSEL by
+ default as well. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_MULTIPLE | MASK_NEW_MNEMONICS \
+ | MASK_PPC_GFXOPT)
+
+/* Darwin only runs on PowerPC, so short-circuit POWER patterns. */
+#undef TARGET_POWER
+#define TARGET_POWER 0
+#undef TARGET_IEEEQUAD
+#define TARGET_IEEEQUAD 0
+
+/* Since Darwin doesn't do TOCs, stub this out. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) ((void)X, (void)MODE, 0)
+
+/* Unlike most other PowerPC targets, chars are signed, for
+ consistency with other Darwin architectures. */
+
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR (1)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the RS/6000, we have to return NO_REGS when we want to reload a
+ floating-point CONST_DOUBLE to force it to be copied to memory.
+
+ Don't allow R0 when loading the address of, or otherwise furtling with,
+ a SYMBOL_REF. */
+
+#undef PREFERRED_RELOAD_CLASS
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CONSTANT_P (X) \
+ && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \
+ ? NO_REGS \
+ : ((GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == HIGH) \
+ && reg_class_subset_p (BASE_REGS, (CLASS))) \
+ ? BASE_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (CLASS))
+
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 --ff */
+#define darwin_alignment_flags rs6000_alignment_flags
+#define OPTION_ALIGN_NATURAL TARGET_ALIGN_NATURAL
+#define OPTION_MASK_ALIGN_NATURAL MASK_ALIGN_NATURAL
+/* APPLE LOCAL begin mainline 2006-10-31 PR 23067, radar 4869885 */
+/* This now supports the Macintosh power, mac68k, and natural
+ alignment modes.
+
+ Compute field alignment. This is similar to the version of the
+ macro in the Apple version of GCC, except that version supports
+ 'mac68k' alignment, and that version uses the computed alignment
+ always for the first field of a structure. The first-field
+ behavior is dealt with by
+ darwin_rs6000_special_round_type_align. */
+/* APPLE LOCAL end mainline 2006-10-31 PR 23067, radar 4869885 */
+#undef ADJUST_FIELD_ALIGN
+/* APPLE LOCAL begin mainline 2006-10-31 PR 23067, radar 4869885 */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ (TARGET_ALIGN_NATURAL ? (COMPUTED) \
+ : (((COMPUTED) == RS6000_VECTOR_ALIGNMENT) \
+ ? RS6000_VECTOR_ALIGNMENT \
+ : (MIN ((COMPUTED), \
+ (OPTION_ALIGN_MAC68K ? 16 \
+ : 32)))))
+/* APPLE LOCAL end mainline 2006-10-31 PR 23067, radar 4869885 */
+
+/* When adjusting (lowering) the alignment of fields when in the
+ mac68k alignment mode, the 128-bit alignment of vectors *MUST*
+ be preserved. */
+#undef PEG_ALIGN_FOR_MAC68K
+#define PEG_ALIGN_FOR_MAC68K(DESIRED) \
+ ((DESIRED) == RS6000_VECTOR_ALIGNMENT ? RS6000_VECTOR_ALIGNMENT \
+ : MIN ((DESIRED), 16))
+
+/* APPLE LOCAL begin mainline 2006-10-31 PR 23067, radar 4869885 */
+/* Darwin increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TARGET_ALIGN_NATURAL == 0 \
+ ? darwin_rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
+ : (TREE_CODE (STRUCT) == VECTOR_TYPE \
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (STRUCT))) \
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+/* APPLE LOCAL end mainline 2006-10-31 PR 23067, radar 4869885 */
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 --ff */
+
+/* APPLE LOCAL begin alignment */
+/* Make sure local alignments come from the type node, not the mode;
+ mode-based alignments are wrong for vectors. */
+#undef LOCAL_ALIGNMENT
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ (MIN (BIGGEST_ALIGNMENT, \
+ MAX ((unsigned) ALIGN, TYPE_ALIGN (TYPE))))
+/* APPLE LOCAL end alignment */
+
+/* Specify padding for the last element of a block move between
+ registers and memory. FIRST is nonzero if this is the only
+ element. */
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE))
+
+/* XXX: Darwin supports neither .quad, or .llong, but it also doesn't
+ support 64 bit PowerPC either, so this just keeps things happy. */
+#define DOUBLE_INT_ASM_OP "\t.quad\t"
+
+/* APPLE LOCAL begin branch cost */
+#undef BRANCH_COST
+/* Better code is generated by saying conditional branches take 1 tick. */
+#define BRANCH_COST 1
+/* APPLE LOCAL end branch cost */
+
+/* APPLE LOCAL begin indirect calls in R12 */
+/* Address of indirect call must be computed here */
+#define MAGIC_INDIRECT_CALL_REG 12
+/* APPLE LOCAL end indirect calls in R12 */
+
+/* For binary compatibility with 2.95; Darwin C APIs use bool from
+ stdbool.h, which was an int-sized enum in 2.95. Users can explicitly
+ choose to have sizeof(bool)==1 with the -mone-byte-bool switch. */
+#define BOOL_TYPE_SIZE (darwin_one_byte_bool ? CHAR_TYPE_SIZE : INT_TYPE_SIZE)
+
+#undef REGISTER_TARGET_PRAGMAS
+#define REGISTER_TARGET_PRAGMAS() \
+ do \
+ { \
+ DARWIN_REGISTER_TARGET_PRAGMAS(); \
+ targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \
+ } \
+ while (0)
+
+#ifdef IN_LIBGCC2
+#include <stdbool.h>
+#endif
+
+#define MD_UNWIND_SUPPORT "config/rs6000/darwin-unwind.h"
+
+#define HAS_MD_FALLBACK_FRAME_STATE_FOR 1
+
+/* True, iff we're generating fast turn around debugging code. When
+ true, we arrange for function prologues to start with 5 nops so
+ that gdb may insert code to redirect them, and for data to be
+ accessed indirectly. The runtime uses this indirection to forward
+ references for data to the original instance of that data. */
+
+#define TARGET_FIX_AND_CONTINUE (darwin_fix_and_continue)
+
+/* APPLE LOCAL begin radar 4590221 */
+/* This is reserved to set flag_objc_direct_dispatch for Objective-C. */
+#define HAVE_OFFS_MSGSEND_FAST \
+ (flag_next_runtime \
+ && flag_objc_direct_dispatch != 0 \
+ && !TARGET_64BIT \
+/* APPLE LOCAL begin ARM 5683689 */ \
+ && (darwin_iphoneos_version_min \
+ || strverscmp (darwin_macosx_version_min, "10.4") >= 0 \
+/* APPLE LOCAL end ARM 5683689 */ \
+ || flag_objc_direct_dispatch == 1))
+
+/* This is the reserved direct dispatch address for Objective-C. */
+#define OFFS_MSGSEND_FAST \
+ (HAVE_OFFS_MSGSEND_FAST ? 0xFFFEFF00 : 0)
+
+/* This is the reserved ivar address Objective-C. */
+#define OFFS_ASSIGNIVAR_FAST \
+ (HAVE_OFFS_MSGSEND_FAST ? 0xFFFEFEC0 : 0)
+/* APPLE LOCAL end radar 4590221 */
+
+/* Old versions of Mac OS/Darwin don't have C99 functions available. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS \
+ (TARGET_64BIT \
+ /* APPLE LOCAL begin ARM 5683689 */ \
+ || darwin_iphoneos_version_min \
+ || strverscmp (darwin_macosx_version_min, "10.3") >= 0)
+ /* APPLE LOCAL end ARM 5683689 */
+
+/* APPLE LOCAL begin track initialization status 4964532 */
+/* APPLE LOCAL begin ARM 5683689 */
+#undef TARGET_DWARF_UNINIT_VARS
+#define TARGET_DWARF_UNINIT_VARS \
+ (darwin_iphoneos_version_min \
+ || (strverscmp (darwin_macosx_version_min, "10.4") >= 0))
+/* APPLE LOCAL end ARM 5683689 */
+/* APPLE LOCAL end track initialization status 4964532 */
+
+/* When generating kernel code or kexts, we don't use Altivec by
+ default, as kernel code doesn't save/restore those registers. */
+#define OS_MISSING_ALTIVEC (flag_mkernel || flag_apple_kext)
+
+/* APPLE LOCAL begin x86_64 */
+#define ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX(ASM_OUT_FILE, ENCODING, SIZE, ADDR, DONE) \
+ if (ENCODING == ASM_PREFERRED_EH_DATA_FORMAT (2, 1)) \
+ { \
+ darwin_non_lazy_pcrel (ASM_OUT_FILE, ADDR); \
+ goto DONE; \
+ }
+/* APPLE LOCAL end x86_64 */
+
+/* APPLE LOCAL KEXT */
+#define TARGET_SUPPORTS_KEXTABI1 (! TARGET_64BIT)
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.md
new file mode 100644
index 000000000..6d1942f17
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.md
@@ -0,0 +1,532 @@
+/* Machine description patterns for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+(define_insn "adddi3_high"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (high:DI (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{cau|addis} %0,%1,ha16(%2)"
+ [(set_attr "length" "4")])
+
+(define_insn "movdf_low_si"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && !TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ {
+ if (TARGET_POWERPC64 && TARGET_32BIT)
+ /* Note, old assemblers didn't support relocation here. */
+ return \"ld %0,lo16(%2)(%1)\";
+ else
+ {
+ output_asm_insn (\"{cal|la} %0,lo16(%2)(%1)\", operands);
+ output_asm_insn (\"{l|lwz} %L0,4(%0)\", operands);
+ return (\"{l|lwz} %0,0(%0)\");
+ }
+ }
+ default:
+ gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,12")])
+
+
+(define_insn "movdf_low_di"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ return \"ld %0,lo16(%2)(%1)\";
+ default:
+ gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,4")])
+
+(define_insn "movdf_low_st_si"
+ [(set (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdf_low_st_di"
+ [(set (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_si"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_di"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st_si"
+ [(set (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st_di"
+ [(set (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+;; 64-bit MachO load/store support
+(define_insn "movdi_low"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{l|ld} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_low_st"
+ [(set (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:SI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdi_low_st"
+ [(set (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{st|std} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+;; Mach-O PIC trickery.
+(define_expand "macho_high"
+ [(set (match_operand 0 "" "")
+ (high (match_operand 1 "" "")))]
+ "TARGET_MACHO"
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_macho_high_di (operands[0], operands[1]));
+ else
+ emit_insn (gen_macho_high_si (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_insn "macho_high_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+
+(define_insn "macho_high_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b*r")
+ (high:DI (match_operand 1 "" "")))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+(define_expand "macho_low"
+ [(set (match_operand 0 "" "")
+ (lo_sum (match_operand 1 "" "")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO"
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_macho_low_di (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_macho_low_si (operands[0], operands[1], operands[2]));
+
+ DONE;
+})
+
+(define_insn "macho_low_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+(define_insn "macho_low_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+(define_split
+ [(set (mem:V4SI (plus:DI (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "short_cint_operand" "")))
+ (match_operand:V4SI 2 "register_operand" ""))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" ""))]
+ "TARGET_MACHO && TARGET_64BIT"
+ [(set (match_dup 3) (plus:DI (match_dup 0) (match_dup 1)))
+ (set (mem:V4SI (match_dup 3))
+ (match_dup 2))]
+ "")
+
+(define_expand "load_macho_picbase"
+ [(set (match_operand 0 "" "")
+ (unspec [(match_operand 1 "" "")]
+ UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_load_macho_picbase_si (operands[0], operands[1]));
+ else
+ emit_insn (gen_load_macho_picbase_di (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_insn "load_macho_picbase_si"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "s")
+ (pc)] UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+ "bcl 20,31,%1\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_macho_picbase_di"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (unspec:DI [(match_operand:DI 1 "immediate_operand" "s")
+ (pc)] UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic && TARGET_64BIT"
+ "bcl 20,31,%1\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+;; APPLE LOCAL begin special ObjC method use of R12
+
+(define_expand "load_macho_picbase_label"
+ [(set (match_operand 0 "" "")
+ (unspec [(match_operand 1 "" "")]
+ UNSPEC_LD_MPIC_L))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_load_macho_picbase_label_si (operands[0], operands[1]));
+ else
+ emit_insn (gen_load_macho_picbase_label_di (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_insn "load_macho_picbase_label_si"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec_volatile:SI [(match_operand:SI 1 "immediate_operand" "s")]
+ UNSPEC_LD_MPIC_L))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+ ";bcl 20,31,%1\\n%1:"
+ [(set_attr "length" "0")])
+
+(define_insn "load_macho_picbase_label_di"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (unspec_volatile:DI [(match_operand:DI 1 "immediate_operand" "s")]
+ UNSPEC_LD_MPIC_L))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic && TARGET_64BIT"
+ ";bcl 20,31,%1\\n%1:"
+ [(set_attr "length" "0")])
+
+;; APPLE LOCAL end special ObjC method use of R12
+
+(define_expand "macho_correct_pic"
+ [(set (match_operand 0 "" "")
+ (plus (match_operand 1 "" "")
+ (unspec [(match_operand 2 "" "")
+ (match_operand 3 "" "")]
+ UNSPEC_MPIC_CORRECT)))]
+ "DEFAULT_ABI == ABI_DARWIN"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_macho_correct_pic_si (operands[0], operands[1], operands[2],
+ operands[3]));
+ else
+ emit_insn (gen_macho_correct_pic_di (operands[0], operands[1], operands[2],
+ operands[3]));
+
+ DONE;
+})
+
+(define_insn "macho_correct_pic_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "s")
+ (match_operand:SI 3 "immediate_operand" "s")]
+ UNSPEC_MPIC_CORRECT)))]
+ "DEFAULT_ABI == ABI_DARWIN"
+ "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
+ [(set_attr "length" "8")])
+
+(define_insn "macho_correct_pic_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "immediate_operand" "s")
+ (match_operand:DI 3 "immediate_operand" "s")]
+ 16)))]
+ "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT"
+ "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
+ [(set_attr "length" "8")])
+
+(define_insn "*call_indirect_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT"
+{
+ return "b%T0l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+{
+#if TARGET_MACHO
+ return output_call(insn, operands, 0, 2);
+#else
+ gcc_unreachable ();
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_indirect_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_DARWIN"
+{
+ return "b%T1l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_value_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+{
+#if TARGET_MACHO
+ return output_call(insn, operands, 1, 3);
+#else
+ gcc_unreachable ();
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "immediate_operand" "O,n"))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+{
+ return "b %z0";
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_value_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ return \"b %z1\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*sibcall_symbolic_64"
+ [(call (mem:SI (match_operand:DI 0 "call_operand" "s,c")) ; 64
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z0\";
+ case 1: return \"b%T0\";
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbolic_64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "call_operand" "s,c"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "" ""))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z1\";
+ case 1: return \"b%T1\";
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+/* APPLE LOCAL begin 64-bit */
+(define_insn "*save_fpregs_with_label_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (use (match_operand:DI 3 "" ""))
+ (set (match_operand:DF 4 "memory_operand" "=m")
+ (match_operand:DF 5 "gpc_reg_operand" "f"))])]
+ "TARGET_64BIT"
+ "*
+#if TARGET_MACHO
+ const char *picbase = machopic_function_base_name ();
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*save_vregs_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (set (match_operand:V4SI 3 "any_operand" "=m")
+ (match_operand:V4SI 4 "register_operand" "v"))])]
+ "TARGET_64BIT"
+ "bl %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*restore_vregs_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" "=r"))
+ (set (match_operand:V4SI 4 "register_operand" "=v")
+ (match_operand:V4SI 5 "any_operand" "m"))])]
+ "TARGET_64BIT"
+ "bl %z2")
+
+(define_insn "*save_vregs_with_label_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (use (match_operand:DI 3 "" ""))
+ (set (match_operand:V4SI 4 "any_operand" "=m")
+ (match_operand:V4SI 5 "register_operand" "v"))])]
+ "TARGET_64BIT"
+ "*
+#if TARGET_MACHO
+ const char *picbase = machopic_function_base_name ();
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+/* APPLE LOCAL end 64-bit */
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.opt b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.opt
new file mode 100644
index 000000000..f67250296
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin.opt
@@ -0,0 +1,31 @@
+; Darwin options for PPC port.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+m64
+Target RejectNegative Mask(64BIT)
+Generate 64-bit code
+
+m32
+Target RejectNegative InverseMask(64BIT)
+Generate 32-bit code
+; APPLE LOCAL begin mdynamic-no-pic
+; APPLE LOCAL end mdynamic-no-pic
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin64.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin64.h
new file mode 100644
index 000000000..80e802d89
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin64.h
@@ -0,0 +1,36 @@
+/* Target definitions for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC64)");
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_POWERPC64 | MASK_64BIT \
+ | MASK_MULTIPLE | MASK_NEW_MNEMONICS | MASK_PPC_GFXOPT)
+
+#undef DARWIN_ARCH_SPEC
+#define DARWIN_ARCH_SPEC "ppc64"
+
+#undef DARWIN_SUBARCH_SPEC
+#define DARWIN_SUBARCH_SPEC DARWIN_ARCH_SPEC
+
+#undef DARWIN_CRT2_SPEC
+#define DARWIN_CRT2_SPEC ""
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin7.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin7.h
new file mode 100644
index 000000000..4c1cda3ca
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin7.h
@@ -0,0 +1,31 @@
+/* Target definitions for Darwin 7.x (Mac OS X) systems.
+ Copyright (C) 2004, 2005
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Machine dependent libraries. Include libmx when compiling for
+ Darwin 7.0 and above, but before libSystem, since the functions are
+ actually in libSystem but for 7.x compatibility we want them to be
+ looked for in libmx first. Include libmx by default because otherwise
+ libstdc++ isn't usable. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:\
+ %:version-compare(!< 10.3 mmacosx-version-min= -lmx)\
+ -lSystem}"
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin8.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin8.h
new file mode 100644
index 000000000..ee583a2cc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/darwin8.h
@@ -0,0 +1,33 @@
+/* Target definitions for Darwin 8.0 and above (Mac OS X) systems.
+ Copyright (C) 2004, 2005
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Machine dependent libraries. Include libmx when compiling on
+ Darwin 7.0 and above, but before libSystem, since the functions are
+ actually in libSystem but for 7.x compatibility we want them to be
+ looked for in libmx first---but only do this if 7.x compatibility
+ is a concern, which it's not in 64-bit mode. Include
+ libSystemStubs when compiling on (not necessarily for) 8.0 and
+ above and not 64-bit long double. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:\
+ %{!mlong-double-64:%{pg:-lSystemStubs_profile;:-lSystemStubs}} \
+ %{!m64:%:version-compare(>< 10.3 10.4 mmacosx-version-min= -lmx)} -lSystem}"
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/host-darwin.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/host-darwin.c
new file mode 100644
index 000000000..754816be1
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/host-darwin.c
@@ -0,0 +1,157 @@
+/* APPLE LOCAL file mainline 2006-06-02 4508814 */
+/* Darwin/powerpc host-specific hook definitions.
+ Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include <signal.h>
+#include <sys/ucontext.h>
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "toplev.h"
+#include "diagnostic.h"
+#include "config/host-darwin.h"
+
+static void segv_crash_handler (int);
+static void segv_handler (int, siginfo_t *, void *);
+static void darwin_rs6000_extra_signals (void);
+
+#ifndef HAVE_DECL_SIGALTSTACK
+/* This doesn't have a prototype in signal.h in 10.2.x and earlier,
+ fixed in later releases. */
+extern int sigaltstack(const struct sigaltstack *, struct sigaltstack *);
+#endif
+
+/* The fields of the mcontext_t type have acquired underscores in later
+ OS versions. */
+#ifdef HAS_MCONTEXT_T_UNDERSCORES
+#define MC_FLD(x) __ ## x
+#else
+#define MC_FLD(x) x
+#endif
+
+#undef HOST_HOOKS_EXTRA_SIGNALS
+#define HOST_HOOKS_EXTRA_SIGNALS darwin_rs6000_extra_signals
+
+/* On Darwin/powerpc, hitting the stack limit turns into a SIGSEGV.
+ This code detects the difference between hitting the stack limit and
+ a true wild pointer dereference by looking at the instruction that
+ faulted; only a few kinds of instruction are used to access below
+ the previous bottom of the stack. */
+
+static void
+segv_crash_handler (int sig ATTRIBUTE_UNUSED)
+{
+ internal_error ("Segmentation Fault (code)");
+}
+
+static void
+segv_handler (int sig ATTRIBUTE_UNUSED,
+ siginfo_t *sip ATTRIBUTE_UNUSED,
+ void *scp)
+{
+ ucontext_t *uc = (ucontext_t *)scp;
+ sigset_t sigset;
+ unsigned faulting_insn;
+
+ /* The fault might have happened when trying to run some instruction, in
+ which case the next line will segfault _again_. Handle this case. */
+ signal (SIGSEGV, segv_crash_handler);
+ sigemptyset (&sigset);
+ sigaddset (&sigset, SIGSEGV);
+ sigprocmask (SIG_UNBLOCK, &sigset, NULL);
+
+ faulting_insn = *(unsigned *)uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0);
+
+ /* Note that this only has to work for GCC, so we don't have to deal
+ with all the possible cases (GCC has no AltiVec code, for
+ instance). It's complicated because Darwin allows stores to
+ below the stack pointer, and the prologue code takes advantage of
+ this. */
+
+ if ((faulting_insn & 0xFFFF8000) == 0x94218000 /* stwu %r1, -xxx(%r1) */
+ || (faulting_insn & 0xFC1F03FF) == 0x7C01016E /* stwux xxx, %r1, xxx */
+ || (faulting_insn & 0xFC1F8000) == 0x90018000 /* stw xxx, -yyy(%r1) */
+ || (faulting_insn & 0xFC1F8000) == 0xD8018000 /* stfd xxx, -yyy(%r1) */
+ || (faulting_insn & 0xFC1F8000) == 0xBC018000 /* stmw xxx, -yyy(%r1) */)
+ {
+ char *shell_name;
+
+ fnotice (stderr, "Out of stack space.\n");
+ shell_name = getenv ("SHELL");
+ if (shell_name != NULL)
+ shell_name = strrchr (shell_name, '/');
+ if (shell_name != NULL)
+ {
+ static const char * shell_commands[][2] = {
+ { "sh", "ulimit -S -s unlimited" },
+ { "bash", "ulimit -S -s unlimited" },
+ { "tcsh", "limit stacksize unlimited" },
+ { "csh", "limit stacksize unlimited" },
+ /* zsh doesn't have "unlimited", this will work under the
+ default configuration. */
+ { "zsh", "limit stacksize 32m" }
+ };
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE (shell_commands); i++)
+ if (strcmp (shell_commands[i][0], shell_name + 1) == 0)
+ {
+ fnotice (stderr,
+ "Try running '%s' in the shell to raise its limit.\n",
+ shell_commands[i][1]);
+ }
+ }
+
+ if (global_dc->abort_on_error)
+ fancy_abort (__FILE__, __LINE__, __FUNCTION__);
+
+ exit (FATAL_EXIT_CODE);
+ }
+
+ fprintf (stderr, "[address=%08lx pc=%08x]\n",
+ uc->uc_mcontext->MC_FLD(es).MC_FLD(dar),
+ uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0));
+ internal_error ("Segmentation Fault");
+ exit (FATAL_EXIT_CODE);
+}
+
+static void
+darwin_rs6000_extra_signals (void)
+{
+ struct sigaction sact;
+ stack_t sigstk;
+
+ sigstk.ss_sp = xmalloc (SIGSTKSZ);
+ sigstk.ss_size = SIGSTKSZ;
+ sigstk.ss_flags = 0;
+ if (sigaltstack (&sigstk, NULL) < 0)
+ fatal_error ("While setting up signal stack: %m");
+
+ sigemptyset(&sact.sa_mask);
+ sact.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ sact.sa_sigaction = segv_handler;
+ if (sigaction (SIGSEGV, &sact, 0) < 0)
+ fatal_error ("While setting up signal handler: %m");
+}
+
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/host-ppc64-darwin.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/host-ppc64-darwin.c
new file mode 100644
index 000000000..ec7f9b3f1
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/host-ppc64-darwin.c
@@ -0,0 +1,31 @@
+/* ppc64-darwin host-specific hook definitions.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "config/host-darwin.h"
+
+/* Darwin doesn't do anything special for ppc64 hosts; this file exists just
+ to include config/host-darwin.h. */
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/libgcc-ppc64.ver b/gcc-4.2.1-5666.3/gcc/config/rs6000/libgcc-ppc64.ver
new file mode 100644
index 000000000..b27b4b492
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/libgcc-ppc64.ver
@@ -0,0 +1,7 @@
+GCC_3.4.4 {
+ # long double support
+ __gcc_qadd
+ __gcc_qsub
+ __gcc_qmul
+ __gcc_qdiv
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/mpc.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/mpc.md
new file mode 100644
index 000000000..75e475208
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/mpc.md
@@ -0,0 +1,110 @@
+;; Scheduling description for Motorola PowerPC processor cores.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "mpc,mpcfp")
+(define_cpu_unit "iu_mpc,mciu_mpc" "mpc")
+(define_cpu_unit "fpu_mpc" "mpcfp")
+(define_cpu_unit "lsu_mpc,bpu_mpc" "mpc")
+
+;; MPCCORE 32-bit SCIU, MCIU, LSU, FPU, BPU
+;; 505/801/821/823
+
+(define_insn_reservation "mpccore-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "mpccore"))
+ "lsu_mpc")
+
+(define_insn_reservation "mpccore-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "mpccore"))
+ "lsu_mpc")
+
+(define_insn_reservation "mpccore-fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "mpccore"))
+ "lsu_mpc")
+
+(define_insn_reservation "mpccore-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc")
+
+(define_insn_reservation "mpccore-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,iu_mpc")
+
+(define_insn_reservation "mpccore-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,iu_mpc,iu_mpc")
+
+(define_insn_reservation "mpccore-imul" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "mpccore"))
+ "mciu_mpc")
+
+; Divide latency varies greatly from 2-11, use 6 as average
+(define_insn_reservation "mpccore-idiv" 6
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "mpccore"))
+ "mciu_mpc*6")
+
+(define_insn_reservation "mpccore-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,nothing,bpu_mpc")
+
+(define_insn_reservation "mpccore-fpcompare" 2
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc,bpu_mpc")
+
+(define_insn_reservation "mpccore-fp" 4
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*2")
+
+(define_insn_reservation "mpccore-dmul" 5
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*5")
+
+(define_insn_reservation "mpccore-sdiv" 10
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*10")
+
+(define_insn_reservation "mpccore-ddiv" 17
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*17")
+
+(define_insn_reservation "mpccore-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "mpccore"))
+ "bpu_mpc")
+
+(define_insn_reservation "mpccore-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr,mfcr,mtcr,isync")
+ (eq_attr "cpu" "mpccore"))
+ "bpu_mpc")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/ops-to-gp b/gcc-4.2.1-5666.3/gcc/config/rs6000/ops-to-gp
new file mode 100755
index 000000000..becb40674
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/ops-to-gp
@@ -0,0 +1,620 @@
+#!/bin/sh
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+# Creates vec.h used by rs6000.c
+
+arg0=`basename $0`
+err() {
+ echo "$arg0: $*" 1>&2
+ exit 2
+}
+
+if [ $# -eq 0 ] ; then
+ echo "Usage: $arg0 [ -mcc | -gcc ] builtin-ops ..." 1>&2
+ exit 1
+fi
+
+MCC=1
+GCC=0
+suffix="gp"
+if [ "$1" = "-mcc" ] ; then
+ shift;
+elif [ "$1" = "-gcc" ] ; then
+ GCC=1
+ MCC=0
+ suffix="h"
+ shift;
+fi
+
+output=`basename $1 .ops`
+gperf="gperf -G -a -o -k1-15 -p -t -D -T -N Is_Builtin_Function $output.gp";
+
+# Lines in the ops file have the form
+# @ @ betype betype-code type-spelling
+# @ fetype betype [code]
+# @ @ @ instruction type
+# generic op1 op2 ... opn = result specific when configure [addressible
+# [instruction [const_ptr_ok [volatile_ptr_ok [transform [predicate]]]]]]
+
+# Sort the ops file to put it in a canonical order.
+sort -u $* | \
+
+# Add specific function uid's, make generic functions from specific
+# functions, validate the types used, compute default parameters, and
+# compute parts of the default transform and predicate functions.
+awk 'BEGIN {
+ i = 0
+ EQ = i++
+ RESULT = i++
+ SPECIFIC = i++
+ WHEN = i++
+ CONFIGURED = i++
+ ADDRESSIBLE = i++
+ INSTRUCTION = i++
+ CONST_PTR_OK = i++
+ VOLATILE_PTR_OK = i++
+ TRANSFORM = i++
+ PREDICATE = i++
+ n_lines = 1;
+ tree[3] = "Make_Folded_4tree";
+ tree[2] = "Make_Folded_3tree";
+ tree[1] = "Make_Folded_Btree";
+ tree[0] = "Make_Utree";
+ optimize["vec_sub"] = 1;
+ optimize["vec_subs"] = 1;
+ optimize["vec_xor"] = 1;
+ optimize["vec_andc"] = 1;
+ optimize["vec_avg"] = 2;
+ optimize["vec_or"] = 2;
+ optimize["vec_and"] = 2;
+ optimize["vec_max"] = 2;
+ optimize["vec_min"] = 2;
+ optimize["vec_sld"] = 3;
+ optimize["vec_splat_s8"] = 4;
+ optimize["vec_splat_s16"] = 5;
+ optimize["vec_splat_s32"] = 6;
+ optimize["vec_splat_u8"] = 4;
+ optimize["vec_splat_u16"] = 5;
+ optimize["vec_splat_u32"] = 6;
+ optimize["vec_cmpeq"] = 7;
+ optimize["vec_lvsl"] = 8;
+ optimize["vec_lvsr"] = 9;
+ # These operations need additional transformation. Key off the
+ # optimize attribute to identify them.
+ optimize["vec_cmplt"] = 10;
+ optimize["vec_cmple"] = 10;
+ optimize["vec_abs"] = 11;
+ optimize["vec_abss"] = 11;
+ }
+ function no_type(t) {
+ printf "%% Error: type %s not declared.\n", t;
+ status = 1;
+ exit;
+ }
+ # Record the type.
+ $1 == "@" {
+ if ($2 == "@") {
+ if ($3 == "@") {
+ # Definition of an instruction.
+ insn_type[$4] = $5; # type
+ } else {
+ # Definition of a betype.
+ becode[$3] = $4; # betype-code
+ bespell[$3] = $5; # type-spelling
+ gsub(/\=/, " ", bespell[$3]);
+ }
+ } else {
+ # Definition of a fetype.
+ print $0;
+ if (!becode[$3]) no_type($3); # Must have defined the betype.
+ betype[$2] = $3; # betype;
+ if (NF == 3)
+ code[$2] = "";
+ else
+ code[$2] = $4; # code
+ }
+ }
+ function no_equal(i,l) {
+ printf "%% Syntax error %d: %s\n", i, l;
+ status = 1;
+ exit;
+ }
+ function error(f,a) {
+ printf( ("%% error: " f), a);
+ status = 1;
+ exit;
+ }
+ # Ignore comment lines.
+ $1 != "#" && $1 != "@" {
+ # Generate the signature of the specific function, the predicate,
+ # the transform, the arguments to the transform function, the
+ # arguments to the predicate function, and the spelling of the
+ # function type.
+ signature = "";
+ predicate = "";
+ transform = "";
+ insn_code = "";
+ transform_args = "";
+ predicate_args = "";
+ function_type = "";
+ # First, consider the parameter types.
+ for (i = 2; $i != "=" && i < NF; i++) {
+ if ($i != "...") {
+ if (!betype[$i]) no_type($i);
+ signature = (signature " " $i);
+ predicate = (predicate "_" betype[$i]);
+ transform = (transform code[$i]);
+ transform_args = (transform_args ", ND_kid(t," i-1 ")");
+ predicate_args = (predicate_args " " becode[betype[$i]]);
+ if (function_type)
+ function_type = (function_type ", " bespell[betype[$i]]);
+ else
+ function_type = bespell[betype[$i]];
+ }
+ }
+ constraints = (transform "@");
+ # Check the syntax of the ops file.
+ if ($i != "=" || NF > i+PREDICATE || NF < i+CONFIGURE) no_equal(i,$0);
+ if (!betype[$(i+RESULT)]) no_type($(i+RESULT));
+ # Incorporate the result type.
+ if (i == 2) {
+ predicate = "_void";
+ function_type = "void";
+ }
+ signature = ($(i+SPECIFIC) signature);
+ predicate = sprintf("is_%s_func%s", betype[$(i+RESULT)], predicate);
+ predicate_args = (becode[betype[$(i+RESULT)]] predicate_args);
+ function_type = sprintf("(%s (*)(%s))", bespell[betype[$(i+RESULT)]], \
+ function_type);
+ if (substr(code[$(i+RESULT)], 1, 1) == "j") {
+ # Handle a jump asm. The code is expedted to be
+ # j={cc-bit-num}={cc-bit-value}[={r|d}]. The operation must have
+ # one operand if the code d is used and two operands otherwise.
+ # The transform function can implement the r code by reversing the
+ # two operands. In all cases, the first operand is a computed
+ # constant encoding both the bit number and the test.
+ n = split(code[$(i+RESULT)], jmp, "=");
+ if (jmp[n] == "d" && i != 3) error("%d operands", i-2);
+ if (jmp[n] != "d" && i != 4) error("%d operands", i-2);
+ if (jmp[n] == "r")
+ transform_args = ", ND_kid(t,2), ND_kid(t,1)";
+ transform_args = sprintf("%s(OP_VCMP%s%s", tree[i-2], \
+ toupper(jmp[3]), transform_args);
+ if (jmp[n] == "r")
+ transform = ("r" transform);
+ insn_code = sprintf("CODE_FOR_j_%d_%s_f%s", jmp[2], jmp[3], \
+ transform);
+ transform = sprintf("transform_j_%d_%s_f%s", jmp[2], jmp[3], \
+ transform);
+ } else {
+ transform_args = sprintf("%s(OP_%sASM%s%s", tree[i-2], \
+ toupper(code[$(i+RESULT)]), \
+ toupper(transform), transform_args);
+ insn_code = sprintf("CODE_FOR_%sf%s", code[$(i+RESULT)], transform);
+ transform = sprintf("transform_%sf%s", code[$(i+RESULT)], transform);
+ }
+ # Give a unique id to the signature
+ if (count[signature] == 0)
+ count[signature] = ++uid[$(i+SPECIFIC)];
+
+ # Compute the default instruction name
+ nf = split($(i+SPECIFIC), part, "_");
+ instruction = ("MOP_" part[nf]);
+
+ # Compute the insn_code, but use the instruction override if given.
+ if (NF >= i+INSTRUCTION)
+ instruction = $(i+INSTRUCTION);
+ if (insn_type[instruction])
+ insn_code = (insn_code "_" insn_type[instruction]);
+
+ # Allow the user to override the addressibility, instruction,
+ # const_ptr_ok, volatile_ptr_ok, transform, and predicate.
+ if (NF >= i+ADDRESSIBLE)
+ addressible = "";
+ else
+ addressible = "FALSE";
+
+ if (NF >= i+INSTRUCTION)
+ instruction = "";
+ else if (substr($1, 1, 4) == "vec_")
+ print "@ @3", instruction;
+
+ if (NF >= i+CONST_PTR_OK)
+ const_ptr_ok = "";
+ else
+ const_ptr_ok = "FALSE";
+
+ if (NF >= i+VOLATILE_PTR_OK)
+ volatile_ptr_ok = "";
+ else
+ volatile_ptr_ok = "FALSE";
+
+ if (NF >= i+TRANSFORM)
+ transform = "";
+ else
+ print "@ @1", transform, transform_args;
+
+ if (NF >= i+PREDICATE)
+ predicate = "";
+ else
+ print "@ @2", i-2, predicate, predicate_args, function_type;
+
+ if (optimize[$1])
+ optimize_method = optimize[$1];
+ else
+ optimize_method = "0";
+
+ # Record the line, addressibility, instruction, transform,
+ # predicate, and unique id.
+ line[n_lines++] = ($0 " " addressible " " instruction " " \
+ const_ptr_ok " " volatile_ptr_ok " " transform " " \
+ predicate " " insn_code " " constraints " " \
+ optimize_method " " count[signature]);
+ }
+ END {
+ if (status) exit;
+ # generic op1 op2 ... opn = result specific when configured
+ # addressable instruction const_ptr_ok volatile_ptr_ok
+ # transform predicate insn_code constraints optimize uid
+ SPECIFIC = 12
+ for (i = 1; i < n_lines; i++) {
+ nf = split(line[i], part);
+ specific = part[nf-SPECIFIC];
+
+ # Print the generic form.
+ printf "%s", part[1];
+ for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j];
+ if (uid[specific] > 1) printf ":%d", part[nf];
+ while (j < nf) printf " %s", part[j++];
+ printf "\n";
+
+ # Print the specific form.
+ printf "%s", specific;
+ for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j];
+ if (uid[specific] > 1) printf ":%d", part[nf];
+ while (j < nf) printf " %s", part[j++];
+ printf "\n";
+ }
+ }' | \
+
+# Strip out load and store qualifiers.
+sed -e 's/_load_op//g' -e 's/_store_op//g' | \
+
+# Sort the processed file and eliminate duplicates.
+sort -u | \
+
+# Append the count of each generic function to each line.
+awk 'function push() {
+ if (num)
+ for (i = 0; i < num; i++)
+ print line[i], num;
+ num = 0;
+ }
+ $1 == "@" {
+ print $0;
+ }
+ $1 != "@" {
+ if (last != $1)
+ push();
+ last = $1;
+ line[num++] = $0;
+ }
+ END {
+ push();
+ }' | \
+
+# Now compute the gperf input file.
+# Lines now have a fixed format
+# generic op1 ... opn = result specific instruction when configured
+# addressible const_ptr_ok volatile_ptr_ok transform predicate
+# insn_code constraints optimize count
+awk 'BEGIN {
+ MCC = '$MCC'
+ GCC = '$GCC'
+ i = 0;
+ COUNT = i++
+ OPTIMIZE = i++
+ CONSTRAINTS = i++
+ INSN_CODE = i++
+ PREDICATE = i++
+ TRANSFORM = i++
+ VOLATILE_PTR_OK = i++
+ CONST_PTR_OK = i++
+ INSTRUCTION = i++
+ ADDRESSIBLE = i++
+ CONFIGURED = i++
+ WHEN = i++
+ SPECIFIC = i++
+ RESULT = i++
+ EQ = i++
+ OPN = i++
+ NARGS = i++
+ if (MCC) {
+ print "%{";
+ print "/* Command-line: '"$gperf"' */";
+ MAXARGS = 5
+ }
+ if (GCC)
+ MAXARGS = 3
+ }
+ function write_test(tree, type, num) {
+ if (type == "PTR") {
+ printf "\n && TY_kind(%s) == KIND_POINTER", tree;
+ } else if (type == "I5") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && ((UINT32)Targ_To_Host(tc) + 16) < 32";
+ } else if (type == "U5") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 32";
+ } else if (type == "U4") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 16";
+ } else if (type == "U2") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 4";
+ } else if (type == "BETYPE_U4" || type == "BETYPE_I4") {
+ printf "\n && is_integer_type(%s)", tree;
+ } else {
+ printf "\n && Similar_Types(%s,", tree;
+ printf "\n\t\t Be_Type_Tbl(%s), IGNORE_QUALIFIERS)", type;
+ }
+ }
+ $1 == "@" {
+ if (MCC) {
+ if ($2 == "@1") {
+ # Write the predicate function from the given parameters.
+ # The format is:
+ # @ @1 transform_ifii Make_3tree(OP_IASMII, ND_kid(t,1), ND_kid(t,2)
+ print "";
+ print "/*ARGSUSED*/";
+ print "static void";
+ print $3 "(ND *func, ND *parent, ND *t, struct builtin *self)";
+ print "{";
+ printf " *t = *%s", $4;
+ for (i = 5; i <= NF; i++) printf " %s", $i;
+ print ",";
+ if (split($3,jmp,"_") == 5 && jmp[2] == "j")
+ printf "\t\t MK_I4CONST_ND((self->data << 5) + %d));\n", \
+ jmp[3];
+ else
+ print "\t\t MK_I4CONST_ND(self->data));";
+
+ print " Is_True(self->data > 0, (\"No implementation for %s\", self->name));";
+ print "}";
+ } else if ($2 == "@2") {
+ # Write the transform function from the given parameters.
+ # The format is:
+ # @ @2 2 is_int_func_int_int BETYPE_I4 BETYPE_I4 BETYPE_I4
+ # (int (*)(int, int))
+ print "";
+ print "/*ARGSUSED*/";
+ print "static BOOL";
+ print $4 "(ND *func, ND *parent, ND *t, struct builtin *self)";
+ print "{";
+ print " TCON tc;";
+ printf " if (ND_nkids(t) == %d", $3+1;
+ write_test("ST_type(ND_dec(func))", $5, "");
+ for (i = 1; i <= $3; i++) {
+ printf "\n && ND_name(ND_kid(t,%d)) == TO_VAL", i;
+ write_test(sprintf("The_Tree_Type(ND_kid(t,%d))", i), $(i+5), i);
+ }
+ print ")";
+ print " return TRUE;";
+ print " Error_Prt_Line (ND_linenum(t), ec_builtin_function_type, self->name,";
+ i = $3+6;
+ printf "\t\t \"%s", $i;
+ while (++i <= NF) printf " %s", $i;
+ print "\");";
+ print " return FALSE;";
+ print "}";
+ } else if ($2 == "@3") {
+ if (once++ == 0) printf "\n#ifndef HAVE_ALTIVEC\n";
+ printf "#define %s -1\n", $3;
+ } else {
+ if (once && twice++ == 0) printf "#endif /* HAVE_ALTIVEC */\n\n";
+ printf "extern struct a_type *T_%s;\n", $2;
+ }
+ }
+ next;
+ }
+ $1 == "%" {
+ print $0;
+ status = 1;
+ exit;
+ }
+ {
+ # Compute the signature of the generic function.
+ signature=$1;
+ for (i = 2; i <= NF-OPN; i++) {
+ if ($i != "...")
+ signature=(signature " " $i);
+ }
+
+ # Ensure that the signature is unique.
+ if (signature_line[signature]) {
+ print "Ambiguous signatures:";
+ print $0;
+ print line[signature_line[signature]];
+ }
+ signature_line[signature] = n_lines;
+
+ # Require that overloaded functions have the same attributes:
+ # number of arguments, when, configured, and addressible.
+ if (same_arg_count[$1] && same_arg_count[$1] != NF)
+ printf "%% number of arguments for %s varies: %d and %d\n", \
+ $1, NF-NARGS, same_arg_count[$1]-NARGS;
+ same_arg_count[$1] = NF;
+
+ if (same_when[$1] && same_when[$1] != $(NF-WHEN))
+ printf "%% when for %s varies: %s and %s\n", \
+ $1, $(NF-WHEN), same_when[$1];
+ same_when[$1] = $(NF-WHEN);
+
+ if (same_configured[$1] && same_configured[$1] != $(NF-CONFIGURED))
+ printf "%% configured for %s varies: %s and %s\n", \
+ $1, $(NF-CONFIGURED), same_configured[$1];
+ same_configured[$1] = $(NF-CONFIGURED);
+
+ if (same_addressible[$1] && same_addressible[$1] != $(NF-ADDRESSIBLE))
+ printf "%% addressible for %s varies: %s and %s\n", \
+ $1, $(NF-ADDRESSIBLE), same_addressible[$1];
+ else if (same_addressible[$1] && same_addressible[$1] != "FALSE")
+ printf "%% Overloaded function %s is addressible\n", $1
+ same_addressible[$1] = $(NF-ADDRESSIBLE);
+
+ # Record the line.
+ line[n_lines++] = $0;
+ }
+ function push(fcn, n) {
+ if (last) printf "};\n";
+ # Gcc3: declare as arrays of const pointers
+ if (fcn) printf "static const struct builtin *const O_%s[%d] = {\n", fcn, n;
+ last = fcn;
+ }
+ function mangle(name) {
+ if (split(name, names, ":") == 1)
+ return ("B_" names[1]);
+ return ("B" names[2] "_" names[1]);
+ }
+ END {
+ if (status) exit;
+
+ # Gcc3: Mark file as Apple local
+ printf "/* APPLE LOCAL file AltiVec */\n";
+ printf "/* This file is generated by ops-to-gp. Do not edit. */\n\n";
+ printf "/* To regenerate execute:\n";
+ printf " ops-to-gp -gcc vec.ops builtin.ops\n";
+ printf " with the current directory being gcc/config/rs6000. */\n\n";
+
+ # Output the description of each specific function.
+ uid = 0;
+ if (MCC) print "";
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[nf-SPECIFIC];
+ if (!done[fcn]) {
+ printf "static const struct builtin %s = {", mangle(fcn);
+ if (GCC) printf " {";
+ ellipsis = 1;
+ for (j = 2; j <= nf-OPN; j++)
+ if (part[j] != "...") {
+ printf " &T_%s,", part[j];
+ } else {
+ ellipsis = -1;
+ printf " NULL,";
+ }
+ while (j++ <= MAXARGS+1)
+ printf " NULL,";
+ instruction = part[nf-INSTRUCTION];
+ if (substr(instruction, 1, 4) == "MOP_")
+ instruction = substr(instruction, 5);
+ if (substr(instruction, length(instruction)) == "D")
+ instruction = (substr(instruction, 1, length(instruction) - 1) ".");
+ # Gcc3: Prefix each specific instruction with a "*"
+ if (match (instruction, "^[a-zA-Z]") > 0)
+ instruction = "*" instruction;
+ if (GCC) printf " },";
+ if (GCC) printf " \"%s\",", substr(part[nf-CONSTRAINTS], 1, length(part[nf-CONSTRAINTS]) - 1);
+ printf " &T_%s,", part[nf-RESULT];
+ if (MCC) printf " \"%s\",", part[nf-SPECIFIC];
+ printf " %d,", ellipsis * (nf - NARGS);
+ if (MCC) {
+ printf " %s,", part[nf-WHEN];
+ printf " %s,", part[nf-ADDRESSIBLE];
+ printf " %s,", part[nf-CONST_PTR_OK];
+ printf " %s,", part[nf-VOLATILE_PTR_OK];
+ printf " %s,", part[nf-CONFIGURED];
+ printf " %s,", part[nf-INSTRUCTION];
+ printf " %s,", part[nf-TRANSFORM];
+ printf " %s", part[nf-PREDICATE];
+ } else if (GCC) {
+ printf " %s,", part[nf-CONST_PTR_OK];
+ printf " %s,", part[nf-VOLATILE_PTR_OK];
+ printf " %s,", part[nf-OPTIMIZE];
+ printf " \"%s\",", part[nf-SPECIFIC];
+ printf " \"%s\",", instruction;
+ printf " %s,", part[nf-INSN_CODE];
+ printf " B_UID(%d)", uid++;
+ }
+ printf " };\n";
+ }
+ done[fcn] = 1;
+ }
+
+ if (GCC) printf "#define LAST_B_UID B_UID(%d)\n", uid;
+
+ if (GCC) {
+ # Output the description of each specific function.
+ print "";
+ uid = 0;
+ for (i in done)
+ done[i] = "";
+ print "const struct builtin * const Builtin[] = {"
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[nf-SPECIFIC];
+ if (!done[fcn]) {
+ printf " &%s,\n", mangle(fcn);
+ }
+ done[fcn] = 1;
+ }
+ print "};"
+ }
+
+ # Output the overload tables for each generic function.
+ print "";
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[1];
+ if (last != fcn)
+ push(fcn, part[nf]);
+ printf " &%s,\n", mangle(part[nf-SPECIFIC]);
+ }
+ push("", 0);
+
+ # Output the builtin function structure.
+ print "";
+ if (MCC) {
+ print "%}";
+ print "struct overloadx {";
+ print " char *name;";
+ print " int fcns;";
+ print " int args;";
+ print " struct builtin **functions;";
+ print "};";
+ print "%%";
+ } else if (GCC) {
+ print "const struct overloadx Overload[] = {";
+ }
+
+ # Output the builtin function list and data.
+ uid = 0;
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[1];
+ args = nf - NARGS;
+ if (part[nf-OPN] == "...") args = -args;
+ if (last != fcn) {
+ if (MCC) printf "%s, %d, %d, O_%s\n", fcn, part[nf], args, fcn;
+ if (GCC) printf " { \"%s\", %d, %d, O_%s, O_UID(%d) },\n", \
+ fcn, part[nf], args, fcn, uid++;
+ }
+ last = fcn;
+ }
+
+ if (GCC) {
+ print " { NULL, 0, 0, NULL, 0 }"
+ print "};";
+
+ printf "#define LAST_O_UID O_UID(%d)\n", uid;
+ }
+
+ }' > $output.$suffix
+
+if [ "$MCC" = "1" ] ; then
+ $gperf > $output.h
+fi
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/power4.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/power4.md
new file mode 100644
index 000000000..53ac066d8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/power4.md
@@ -0,0 +1,410 @@
+;; Scheduling description for IBM Power4 and PowerPC 970 processors.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+;; Sources: IBM Red Book and White Paper on POWER4
+
+;; The POWER4 has 2 iu, 2 fpu, 2 lsu per engine (2 engines per chip).
+;; Instructions that update more than one register get broken into two
+;; (split) or more internal ops. The chip can issue up to 5
+;; internal ops per cycle.
+
+(define_automaton "power4iu,power4fpu,power4vec,power4misc")
+
+(define_cpu_unit "iu1_power4,iu2_power4" "power4iu")
+(define_cpu_unit "lsu1_power4,lsu2_power4" "power4misc")
+(define_cpu_unit "fpu1_power4,fpu2_power4" "power4fpu")
+(define_cpu_unit "bpu_power4,cru_power4" "power4misc")
+(define_cpu_unit "vec_power4,vecperm_power4" "power4vec")
+(define_cpu_unit "du1_power4,du2_power4,du3_power4,du4_power4,du5_power4"
+ "power4misc")
+
+(define_reservation "lsq_power4"
+ "(du1_power4,lsu1_power4)\
+ |(du2_power4,lsu2_power4)\
+ |(du3_power4,lsu2_power4)\
+ |(du4_power4,lsu1_power4)")
+
+(define_reservation "lsuq_power4"
+ "(du1_power4+du2_power4,lsu1_power4+iu2_power4)\
+ |(du2_power4+du3_power4,lsu2_power4+iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4+iu1_power4)")
+
+(define_reservation "iq_power4"
+ "(du1_power4,iu1_power4)\
+ |(du2_power4,iu2_power4)\
+ |(du3_power4,iu2_power4)\
+ |(du4_power4,iu1_power4)")
+
+(define_reservation "fpq_power4"
+ "(du1_power4,fpu1_power4)\
+ |(du2_power4,fpu2_power4)\
+ |(du3_power4,fpu2_power4)\
+ |(du4_power4,fpu1_power4)")
+
+(define_reservation "vq_power4"
+ "(du1_power4,vec_power4)\
+ |(du2_power4,vec_power4)\
+ |(du3_power4,vec_power4)\
+ |(du4_power4,vec_power4)")
+
+(define_reservation "vpq_power4"
+ "(du1_power4,vecperm_power4)\
+ |(du2_power4,vecperm_power4)\
+ |(du3_power4,vecperm_power4)\
+ |(du4_power4,vecperm_power4)")
+
+
+; Dispatch slots are allocated in order conforming to program order.
+(absence_set "du1_power4" "du2_power4,du3_power4,du4_power4,du5_power4")
+(absence_set "du2_power4" "du3_power4,du4_power4,du5_power4")
+(absence_set "du3_power4" "du4_power4,du5_power4")
+(absence_set "du4_power4" "du5_power4")
+
+
+; Load/store
+(define_insn_reservation "power4-load" 4 ; 3
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "power4"))
+ "lsq_power4")
+
+(define_insn_reservation "power4-load-ext" 5
+ (and (eq_attr "type" "load_ext")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,lsu1_power4,nothing,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,lsu2_power4,nothing,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4,nothing,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-load-ext-update" 5
+ (and (eq_attr "type" "load_ext_u")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4+iu2_power4,nothing,nothing,iu2_power4")
+
+(define_insn_reservation "power4-load-ext-update-indexed" 5
+ (and (eq_attr "type" "load_ext_ux")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ iu1_power4,lsu2_power4+iu1_power4,nothing,nothing,iu2_power4")
+
+(define_insn_reservation "power4-load-update-indexed" 3
+ (and (eq_attr "type" "load_ux")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ iu1_power4,lsu2_power4+iu2_power4")
+
+(define_insn_reservation "power4-load-update" 4 ; 3
+ (and (eq_attr "type" "load_u")
+ (eq_attr "cpu" "power4"))
+ "lsuq_power4")
+
+(define_insn_reservation "power4-fpload" 6 ; 5
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "power4"))
+ "lsq_power4")
+
+(define_insn_reservation "power4-fpload-update" 6 ; 5
+ (and (eq_attr "type" "fpload_u,fpload_ux")
+ (eq_attr "cpu" "power4"))
+ "lsuq_power4")
+
+(define_insn_reservation "power4-vecload" 6 ; 5
+ (and (eq_attr "type" "vecload")
+ (eq_attr "cpu" "power4"))
+ "lsq_power4")
+
+(define_insn_reservation "power4-store" 12
+ (and (eq_attr "type" "store")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,lsu1_power4,iu1_power4)\
+ |(du2_power4,lsu2_power4,iu2_power4)\
+ |(du3_power4,lsu2_power4,iu2_power4)\
+ |(du4_power4,lsu1_power4,iu1_power4)")
+
+(define_insn_reservation "power4-store-update" 12
+ (and (eq_attr "type" "store_u")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,lsu1_power4+iu2_power4,iu1_power4)\
+ |(du2_power4+du3_power4,lsu2_power4+iu2_power4,iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4+iu1_power4,iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4,iu1_power4,iu2_power4)")
+
+(define_insn_reservation "power4-store-update-indexed" 12
+ (and (eq_attr "type" "store_ux")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ iu1_power4,lsu2_power4+iu2_power4,iu2_power4")
+
+(define_insn_reservation "power4-fpstore" 12
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,lsu1_power4,fpu1_power4)\
+ |(du2_power4,lsu2_power4,fpu2_power4)\
+ |(du3_power4,lsu2_power4,fpu2_power4)\
+ |(du4_power4,lsu1_power4,fpu1_power4)")
+
+(define_insn_reservation "power4-fpstore-update" 12
+ (and (eq_attr "type" "fpstore_u,fpstore_ux")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,lsu1_power4+iu2_power4,fpu1_power4)\
+ |(du2_power4+du3_power4,lsu2_power4+iu2_power4,fpu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4+iu1_power4,fpu2_power4)")
+
+(define_insn_reservation "power4-vecstore" 12
+ (and (eq_attr "type" "vecstore")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,lsu1_power4,vec_power4)\
+ |(du2_power4,lsu2_power4,vec_power4)\
+ |(du3_power4,lsu2_power4,vec_power4)\
+ |(du4_power4,lsu1_power4,vec_power4)")
+
+(define_insn_reservation "power4-llsc" 11
+ (and (eq_attr "type" "load_l,store_c,sync")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4")
+
+
+; Integer latency is 2 cycles
+(define_insn_reservation "power4-integer" 2
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "power4"))
+ "iq_power4")
+
+(define_insn_reservation "power4-two" 2
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,nothing,iu1_power4)\
+ |(du4_power4+du1_power4,iu1_power4,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-three" 2
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4+du3_power4,\
+ iu1_power4,nothing,iu2_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4+du4_power4,\
+ iu2_power4,nothing,iu2_power4,nothing,iu1_power4)\
+ |(du3_power4+du4_power4+du1_power4,\
+ iu2_power4,nothing,iu1_power4,nothing,iu1_power4)\
+ |(du4_power4+du1_power4+du2_power4,\
+ iu1_power4,nothing,iu2_power4,nothing,iu2_power4)")
+
+(define_insn_reservation "power4-insert" 4
+ (and (eq_attr "type" "insert_word")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-cmp" 3
+ (and (eq_attr "type" "cmp,fast_compare")
+ (eq_attr "cpu" "power4"))
+ "iq_power4")
+
+(define_insn_reservation "power4-compare" 2
+ (and (eq_attr "type" "compare,delayed_compare")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,iu1_power4)")
+
+(define_bypass 4 "power4-compare" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_insn_reservation "power4-lmul-cmp" 7
+ (and (eq_attr "type" "lmul_compare")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4*6,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4*6,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4*6,iu1_power4)")
+
+(define_bypass 10 "power4-lmul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_insn_reservation "power4-imul-cmp" 5
+ (and (eq_attr "type" "imul_compare")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4*4,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4*4,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4*4,iu1_power4)")
+
+(define_bypass 8 "power4-imul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_insn_reservation "power4-lmul" 7
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,iu1_power4*6)\
+ |(du2_power4,iu2_power4*6)\
+ |(du3_power4,iu2_power4*6)\
+ |(du4_power4,iu1_power4*6)")
+
+(define_insn_reservation "power4-imul" 5
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,iu1_power4*4)\
+ |(du2_power4,iu2_power4*4)\
+ |(du3_power4,iu2_power4*4)\
+ |(du4_power4,iu1_power4*4)")
+
+(define_insn_reservation "power4-imul3" 4
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,iu1_power4*3)\
+ |(du2_power4,iu2_power4*3)\
+ |(du3_power4,iu2_power4*3)\
+ |(du4_power4,iu1_power4*3)")
+
+
+; SPR move only executes in first IU.
+; Integer division only executes in second IU.
+(define_insn_reservation "power4-idiv" 36
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4,iu2_power4*35")
+
+(define_insn_reservation "power4-ldiv" 68
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4,iu2_power4*67")
+
+
+(define_insn_reservation "power4-mtjmpr" 3
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,bpu_power4")
+
+
+; Branches take dispatch Slot 4. The presence_sets prevent other insn from
+; grabbing previous dispatch slots once this is assigned.
+(define_insn_reservation "power4-branch" 2
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "power4"))
+ "(du5_power4\
+ |du4_power4+du5_power4\
+ |du3_power4+du4_power4+du5_power4\
+ |du2_power4+du3_power4+du4_power4+du5_power4\
+ |du1_power4+du2_power4+du3_power4+du4_power4+du5_power4),bpu_power4")
+
+
+; Condition Register logical ops are split if non-destructive (RT != RB)
+(define_insn_reservation "power4-crlogical" 2
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,cru_power4")
+
+(define_insn_reservation "power4-delayedcr" 4
+ (and (eq_attr "type" "delayed_cr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4,cru_power4,cru_power4")
+
+; 4 mfcrf (each 3 cyc, 1/cyc) + 3 fxu
+(define_insn_reservation "power4-mfcr" 6
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ du1_power4+du2_power4+du3_power4+du4_power4+cru_power4,\
+ cru_power4,cru_power4,cru_power4")
+
+; mfcrf (1 field)
+(define_insn_reservation "power4-mfcrf" 3
+ (and (eq_attr "type" "mfcrf")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,cru_power4")
+
+; mtcrf (1 field)
+(define_insn_reservation "power4-mtcr" 4
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,iu1_power4")
+
+; Basic FP latency is 6 cycles
+(define_insn_reservation "power4-fp" 6
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "power4"))
+ "fpq_power4")
+
+(define_insn_reservation "power4-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "power4"))
+ "fpq_power4")
+
+(define_insn_reservation "power4-sdiv" 33
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,fpu1_power4*28)\
+ |(du2_power4,fpu2_power4*28)\
+ |(du3_power4,fpu2_power4*28)\
+ |(du4_power4,fpu1_power4*28)")
+
+(define_insn_reservation "power4-sqrt" 40
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,fpu1_power4*35)\
+ |(du2_power4,fpu2_power4*35)\
+ |(du3_power4,fpu2_power4*35)\
+ |(du4_power4,fpu2_power4*35)")
+
+(define_insn_reservation "power4-isync" 2
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4")
+
+
+; VMX
+(define_insn_reservation "power4-vecsimple" 2
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+(define_insn_reservation "power4-veccomplex" 5
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+; vecfp compare
+(define_insn_reservation "power4-veccmp" 8
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+(define_insn_reservation "power4-vecfloat" 8
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+(define_insn_reservation "power4-vecperm" 2
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "power4"))
+ "vpq_power4")
+
+(define_bypass 4 "power4-vecload" "power4-vecperm")
+
+(define_bypass 3 "power4-vecsimple" "power4-vecperm")
+(define_bypass 6 "power4-veccomplex" "power4-vecperm")
+(define_bypass 3 "power4-vecperm"
+ "power4-vecsimple,power4-veccomplex,power4-vecfloat")
+(define_bypass 9 "power4-vecfloat" "power4-vecperm")
+
+(define_bypass 5 "power4-vecsimple,power4-veccomplex"
+ "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_bypass 4 "power4-vecsimple,power4-vecperm" "power4-vecstore")
+(define_bypass 7 "power4-veccomplex" "power4-vecstore")
+(define_bypass 10 "power4-vecfloat" "power4-vecstore")
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/power5.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/power5.md
new file mode 100644
index 000000000..ce6892605
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/power5.md
@@ -0,0 +1,321 @@
+;; Scheduling description for IBM POWER5 processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+;; Sources: IBM Red Book and White Paper on POWER5
+
+;; The POWER5 has 2 iu, 2 fpu, 2 lsu per engine (2 engines per chip).
+;; Instructions that update more than one register get broken into two
+;; (split) or more internal ops. The chip can issue up to 5
+;; internal ops per cycle.
+
+(define_automaton "power5iu,power5fpu,power5misc")
+
+(define_cpu_unit "iu1_power5,iu2_power5" "power5iu")
+(define_cpu_unit "lsu1_power5,lsu2_power5" "power5misc")
+(define_cpu_unit "fpu1_power5,fpu2_power5" "power5fpu")
+(define_cpu_unit "bpu_power5,cru_power5" "power5misc")
+(define_cpu_unit "du1_power5,du2_power5,du3_power5,du4_power5,du5_power5"
+ "power5misc")
+
+(define_reservation "lsq_power5"
+ "(du1_power5,lsu1_power5)\
+ |(du2_power5,lsu2_power5)\
+ |(du3_power5,lsu2_power5)\
+ |(du4_power5,lsu1_power5)")
+
+(define_reservation "iq_power5"
+ "(du1_power5,iu1_power5)\
+ |(du2_power5,iu2_power5)\
+ |(du3_power5,iu2_power5)\
+ |(du4_power5,iu1_power5)")
+
+(define_reservation "fpq_power5"
+ "(du1_power5,fpu1_power5)\
+ |(du2_power5,fpu2_power5)\
+ |(du3_power5,fpu2_power5)\
+ |(du4_power5,fpu1_power5)")
+
+; Dispatch slots are allocated in order conforming to program order.
+(absence_set "du1_power5" "du2_power5,du3_power5,du4_power5,du5_power5")
+(absence_set "du2_power5" "du3_power5,du4_power5,du5_power5")
+(absence_set "du3_power5" "du4_power5,du5_power5")
+(absence_set "du4_power5" "du5_power5")
+
+
+; Load/store
+(define_insn_reservation "power5-load" 4 ; 3
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "power5"))
+ "lsq_power5")
+
+(define_insn_reservation "power5-load-ext" 5
+ (and (eq_attr "type" "load_ext")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5,nothing,nothing,iu2_power5")
+
+(define_insn_reservation "power5-load-ext-update" 5
+ (and (eq_attr "type" "load_ext_u")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5+iu2_power5,nothing,nothing,iu2_power5")
+
+(define_insn_reservation "power5-load-ext-update-indexed" 5
+ (and (eq_attr "type" "load_ext_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ iu1_power5,lsu2_power5+iu1_power5,nothing,nothing,iu2_power5")
+
+(define_insn_reservation "power5-load-update-indexed" 3
+ (and (eq_attr "type" "load_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ iu1_power5,lsu2_power5+iu2_power5")
+
+(define_insn_reservation "power5-load-update" 4 ; 3
+ (and (eq_attr "type" "load_u")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5")
+
+(define_insn_reservation "power5-fpload" 6 ; 5
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "power5"))
+ "lsq_power5")
+
+(define_insn_reservation "power5-fpload-update" 6 ; 5
+ (and (eq_attr "type" "fpload_u,fpload_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5")
+
+(define_insn_reservation "power5-store" 12
+ (and (eq_attr "type" "store")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,lsu1_power5,iu1_power5)\
+ |(du2_power5,lsu2_power5,iu2_power5)\
+ |(du3_power5,lsu2_power5,iu2_power5)\
+ |(du4_power5,lsu1_power5,iu1_power5)")
+
+(define_insn_reservation "power5-store-update" 12
+ (and (eq_attr "type" "store_u")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5,iu1_power5")
+
+(define_insn_reservation "power5-store-update-indexed" 12
+ (and (eq_attr "type" "store_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ iu1_power5,lsu2_power5+iu2_power5,iu2_power5")
+
+(define_insn_reservation "power5-fpstore" 12
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,lsu1_power5,fpu1_power5)\
+ |(du2_power5,lsu2_power5,fpu2_power5)\
+ |(du3_power5,lsu2_power5,fpu2_power5)\
+ |(du4_power5,lsu1_power5,fpu1_power5)")
+
+(define_insn_reservation "power5-fpstore-update" 12
+ (and (eq_attr "type" "fpstore_u,fpstore_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5,fpu1_power5")
+
+(define_insn_reservation "power5-llsc" 11
+ (and (eq_attr "type" "load_l,store_c,sync")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5")
+
+
+; Integer latency is 2 cycles
+(define_insn_reservation "power5-integer" 2
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "power5"))
+ "iq_power5")
+
+(define_insn_reservation "power5-two" 2
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5+du2_power5,iu1_power5,nothing,iu2_power5)\
+ |(du2_power5+du3_power5,iu2_power5,nothing,iu2_power5)\
+ |(du3_power5+du4_power5,iu2_power5,nothing,iu1_power5)\
+ |(du4_power5+du1_power5,iu1_power5,nothing,iu1_power5)")
+
+(define_insn_reservation "power5-three" 2
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5+du2_power5+du3_power5,\
+ iu1_power5,nothing,iu2_power5,nothing,iu2_power5)\
+ |(du2_power5+du3_power5+du4_power5,\
+ iu2_power5,nothing,iu2_power5,nothing,iu1_power5)\
+ |(du3_power5+du4_power5+du1_power5,\
+ iu2_power5,nothing,iu1_power5,nothing,iu1_power5)\
+ |(du4_power5+du1_power5+du2_power5,\
+ iu1_power5,nothing,iu2_power5,nothing,iu2_power5)")
+
+(define_insn_reservation "power5-insert" 4
+ (and (eq_attr "type" "insert_word")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5,nothing,iu2_power5")
+
+(define_insn_reservation "power5-cmp" 3
+ (and (eq_attr "type" "cmp,fast_compare")
+ (eq_attr "cpu" "power5"))
+ "iq_power5")
+
+(define_insn_reservation "power5-compare" 2
+ (and (eq_attr "type" "compare,delayed_compare")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5,iu2_power5")
+
+(define_bypass 4 "power5-compare" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf")
+
+(define_insn_reservation "power5-lmul-cmp" 7
+ (and (eq_attr "type" "lmul_compare")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5*6,iu2_power5")
+
+(define_bypass 10 "power5-lmul-cmp" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf")
+
+(define_insn_reservation "power5-imul-cmp" 5
+ (and (eq_attr "type" "imul_compare")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5*4,iu2_power5")
+
+(define_bypass 8 "power5-imul-cmp" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf")
+
+(define_insn_reservation "power5-lmul" 7
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,iu1_power5*6)\
+ |(du2_power5,iu2_power5*6)\
+ |(du3_power5,iu2_power5*6)\
+ |(du4_power5,iu1_power5*6)")
+
+(define_insn_reservation "power5-imul" 5
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,iu1_power5*4)\
+ |(du2_power5,iu2_power5*4)\
+ |(du3_power5,iu2_power5*4)\
+ |(du4_power5,iu1_power5*4)")
+
+(define_insn_reservation "power5-imul3" 4
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,iu1_power5*3)\
+ |(du2_power5,iu2_power5*3)\
+ |(du3_power5,iu2_power5*3)\
+ |(du4_power5,iu1_power5*3)")
+
+
+; SPR move only executes in first IU.
+; Integer division only executes in second IU.
+(define_insn_reservation "power5-idiv" 36
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu2_power5*35")
+
+(define_insn_reservation "power5-ldiv" 68
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu2_power5*67")
+
+
+(define_insn_reservation "power5-mtjmpr" 3
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,bpu_power5")
+
+
+; Branches take dispatch Slot 4. The presence_sets prevent other insn from
+; grabbing previous dispatch slots once this is assigned.
+(define_insn_reservation "power5-branch" 2
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "power5"))
+ "(du5_power5\
+ |du4_power5+du5_power5\
+ |du3_power5+du4_power5+du5_power5\
+ |du2_power5+du3_power5+du4_power5+du5_power5\
+ |du1_power5+du2_power5+du3_power5+du4_power5+du5_power5),bpu_power5")
+
+
+; Condition Register logical ops are split if non-destructive (RT != RB)
+(define_insn_reservation "power5-crlogical" 2
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,cru_power5")
+
+(define_insn_reservation "power5-delayedcr" 4
+ (and (eq_attr "type" "delayed_cr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,cru_power5,cru_power5")
+
+; 4 mfcrf (each 3 cyc, 1/cyc) + 3 fxu
+(define_insn_reservation "power5-mfcr" 6
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ du1_power5+du2_power5+du3_power5+du4_power5+cru_power5,\
+ cru_power5,cru_power5,cru_power5")
+
+; mfcrf (1 field)
+(define_insn_reservation "power5-mfcrf" 3
+ (and (eq_attr "type" "mfcrf")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,cru_power5")
+
+; mtcrf (1 field)
+(define_insn_reservation "power5-mtcr" 4
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,iu1_power5")
+
+; Basic FP latency is 6 cycles
+(define_insn_reservation "power5-fp" 6
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "power5"))
+ "fpq_power5")
+
+(define_insn_reservation "power5-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "power5"))
+ "fpq_power5")
+
+(define_insn_reservation "power5-sdiv" 33
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,fpu1_power5*28)\
+ |(du2_power5,fpu2_power5*28)\
+ |(du3_power5,fpu2_power5*28)\
+ |(du4_power5,fpu1_power5*28)")
+
+(define_insn_reservation "power5-sqrt" 40
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,fpu1_power5*35)\
+ |(du2_power5,fpu2_power5*35)\
+ |(du3_power5,fpu2_power5*35)\
+ |(du4_power5,fpu2_power5*35)")
+
+(define_insn_reservation "power5-isync" 2
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/ppc64-fp.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/ppc64-fp.c
new file mode 100644
index 000000000..184f34e1d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/ppc64-fp.c
@@ -0,0 +1,243 @@
+/* Functions needed for soft-float on powerpc64-linux, copied from
+ libgcc2.c with macros expanded to force the use of specific types.
+
+ Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#if defined(__powerpc64__) || defined (__64BIT__) || defined(__ppc64__)
+#define TMODES
+#include "config/fp-bit.h"
+
+extern DItype __fixtfdi (TFtype);
+extern DItype __fixdfdi (DFtype);
+extern DItype __fixsfdi (SFtype);
+extern USItype __fixunsdfsi (DFtype);
+extern USItype __fixunssfsi (SFtype);
+extern TFtype __floatditf (DItype);
+extern TFtype __floatunditf (UDItype);
+extern DFtype __floatdidf (DItype);
+extern DFtype __floatundidf (UDItype);
+extern SFtype __floatdisf (DItype);
+extern SFtype __floatundisf (UDItype);
+extern DItype __fixunstfdi (TFtype);
+
+static DItype local_fixunssfdi (SFtype);
+static DItype local_fixunsdfdi (DFtype);
+
+DItype
+__fixtfdi (TFtype a)
+{
+ if (a < 0)
+ return - __fixunstfdi (-a);
+ return __fixunstfdi (a);
+}
+
+DItype
+__fixdfdi (DFtype a)
+{
+ if (a < 0)
+ return - local_fixunsdfdi (-a);
+ return local_fixunsdfdi (a);
+}
+
+DItype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - local_fixunssfdi (-a);
+ return local_fixunssfdi (a);
+}
+
+USItype
+__fixunsdfsi (DFtype a)
+{
+ if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
+ return (SItype) a;
+}
+
+USItype
+__fixunssfsi (SFtype a)
+{
+ if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
+ return (SItype) a;
+}
+
+TFtype
+__floatditf (DItype u)
+{
+ DFtype dh, dl;
+
+ dh = (SItype) (u >> (sizeof (SItype) * 8));
+ dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (TFtype) dh + (TFtype) dl;
+}
+
+TFtype
+__floatunditf (UDItype u)
+{
+ DFtype dh, dl;
+
+ dh = (USItype) (u >> (sizeof (SItype) * 8));
+ dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (TFtype) dh + (TFtype) dl;
+}
+
+DFtype
+__floatdidf (DItype u)
+{
+ DFtype d;
+
+ d = (SItype) (u >> (sizeof (SItype) * 8));
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return d;
+}
+
+DFtype
+__floatundidf (UDItype u)
+{
+ DFtype d;
+
+ d = (USItype) (u >> (sizeof (SItype) * 8));
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return d;
+}
+
+SFtype
+__floatdisf (DItype u)
+{
+ DFtype f;
+
+ if (53 < (sizeof (DItype) * 8)
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
+ {
+ if (! (- ((DItype) 1 << 53) < u
+ && u < ((DItype) 1 << 53)))
+ {
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
+ {
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
+ }
+ }
+ }
+ f = (SItype) (u >> (sizeof (SItype) * 8));
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (SFtype) f;
+}
+
+SFtype
+__floatundisf (UDItype u)
+{
+ DFtype f;
+
+ if (53 < (sizeof (DItype) * 8)
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
+ {
+ if (u >= ((UDItype) 1 << 53))
+ {
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
+ {
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
+ }
+ }
+ }
+ f = (USItype) (u >> (sizeof (SItype) * 8));
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (SFtype) f;
+}
+
+DItype
+__fixunstfdi (TFtype a)
+{
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ const TFtype b = (a / (((UDItype) 1) << (sizeof (SItype) * 8)));
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ UDItype v = (USItype) b;
+ v <<= (sizeof (SItype) * 8);
+ /* Remove high part from the TFtype, leaving the low part as flonum. */
+ a -= (TFtype) v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (-a);
+ else
+ v += (USItype) a;
+ return v;
+}
+
+/* This version is needed to prevent recursion; fixunsdfdi in libgcc
+ calls fixdfdi, which in turn calls calls fixunsdfdi. */
+
+static DItype
+local_fixunsdfdi (DFtype a)
+{
+ USItype hi, lo;
+
+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
+}
+
+/* This version is needed to prevent recursion; fixunssfdi in libgcc
+ calls fixsfdi, which in turn calls calls fixunssfdi. */
+
+static DItype
+local_fixunssfdi (SFtype original_a)
+{
+ DFtype a = original_a;
+ USItype hi, lo;
+
+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
+}
+
+#endif /* __powerpc64__ */
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/predicates.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/predicates.md
new file mode 100644
index 000000000..1d8967488
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/predicates.md
@@ -0,0 +1,1319 @@
+;; Predicate definitions for POWER and PowerPC.
+;; Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+;; Boston, MA 02110-1301, USA.
+
+;; Return 1 for anything except PARALLEL.
+(define_predicate "any_operand"
+ (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem"))
+
+;; Return 1 for any PARALLEL.
+(define_predicate "any_parallel_operand"
+ (match_code "parallel"))
+
+;; Return 1 if op is COUNT register.
+(define_predicate "count_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == COUNT_REGISTER_REGNUM
+ || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+
+;; Return 1 if op is an Altivec register.
+(define_predicate "altivec_register_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || ALTIVEC_REGNO_P (REGNO (op))
+ || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+
+;; Return 1 if op is XER register.
+(define_predicate "xer_operand"
+ (and (match_code "reg")
+ (match_test "XER_REGNO_P (REGNO (op))")))
+
+;; Return 1 if op is a signed 5-bit constant integer.
+(define_predicate "s5bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -16 && INTVAL (op) <= 15")))
+
+;; Return 1 if op is a unsigned 5-bit constant integer.
+(define_predicate "u5bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 31")))
+
+;; Return 1 if op is a signed 8-bit constant integer.
+;; Integer multiplication complete more quickly
+(define_predicate "s8bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -128 && INTVAL (op) <= 127")))
+
+;; Return 1 if op is a constant integer that can fit in a D field.
+(define_predicate "short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_I (op)")))
+
+;; Return 1 if op is a constant integer that can fit in an unsigned D field.
+(define_predicate "u_short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_K (op)")))
+
+;; Return 1 if op is a constant integer that cannot fit in a signed D field.
+(define_predicate "non_short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT)
+ (INTVAL (op) + 0x8000) >= 0x10000")))
+
+;; Return 1 if op is a positive constant integer that is an exact power of 2.
+(define_predicate "exact_log2_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) > 0 && exact_log2 (INTVAL (op)) >= 0")))
+
+;; Return 1 if op is a register that is not special.
+(define_predicate "gpc_reg_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "(GET_CODE (op) != REG
+ || (REGNO (op) >= ARG_POINTER_REGNUM
+ && !XER_REGNO_P (REGNO (op)))
+ || REGNO (op) < MQ_REGNO)
+ && !((TARGET_E500_DOUBLE || TARGET_SPE)
+ && invalid_e500_subreg (op, mode))")))
+
+;; Return 1 if op is a register that is a condition register field.
+(define_predicate "cc_reg_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || CR_REGNO_P (REGNO (op))")))
+
+;; Return 1 if op is a register that is a condition register field not cr0.
+(define_predicate "cc_reg_not_cr0_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || CR_REGNO_NOT_CR0_P (REGNO (op))")))
+
+;; Return 1 if op is a constant integer valid for D field
+;; or non-special register register.
+(define_predicate "reg_or_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_operand 0 "short_cint_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid whose negation is valid for
+;; D field or non-special register register.
+;; Do not allow a constant zero because all patterns that call this
+;; predicate use "addic r1,r2,-const" to set carry when r2 is greater than
+;; or equal to const, which does not work for zero.
+(define_predicate "reg_or_neg_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "satisfies_constraint_P (op)
+ && INTVAL (op) != 0")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for DS field
+;; or non-special register.
+(define_predicate "reg_or_aligned_short_operand"
+ (if_then_else (match_code "const_int")
+ (and (match_operand 0 "short_cint_operand")
+ (match_test "!(INTVAL (op) & 3)"))
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer whose high-order 16 bits are zero
+;; or non-special register.
+(define_predicate "reg_or_u_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_operand 0 "u_short_cint_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is any constant integer
+;; or non-special register.
+(define_predicate "reg_or_cint_operand"
+ (ior (match_code "const_int")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for addition
+;; or non-special register.
+(define_predicate "reg_or_add_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(HOST_BITS_PER_WIDE_INT == 32
+ && (mode == SImode || INTVAL (op) < 0x7fff8000))
+ || ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80008000)
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for subtraction
+;; or non-special register.
+(define_predicate "reg_or_sub_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(HOST_BITS_PER_WIDE_INT == 32
+ && (mode == SImode || - INTVAL (op) < 0x7fff8000))
+ || ((unsigned HOST_WIDE_INT) (- INTVAL (op)
+ + (mode == SImode
+ ? 0x80000000 : 0x80008000))
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is any 32-bit unsigned constant integer
+;; or non-special register.
+(define_predicate "reg_or_logical_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ && INTVAL (op) >= 0)
+ || ((INTVAL (op) & GET_MODE_MASK (mode)
+ & (~ (unsigned HOST_WIDE_INT) 0xffffffff)) == 0)")
+ (if_then_else (match_code "const_double")
+ (match_test "GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ && mode == DImode
+ && CONST_DOUBLE_HIGH (op) == 0")
+ (match_operand 0 "gpc_reg_operand"))))
+
+;; Return 1 if operand is a CONST_DOUBLE that can be set in a register
+;; with no more than one instruction per word.
+(define_predicate "easy_fp_constant"
+ (match_code "const_double")
+{
+ long k[4];
+ REAL_VALUE_TYPE rv;
+
+ if (GET_MODE (op) != mode
+ || (!SCALAR_FLOAT_MODE_P (mode) && mode != DImode))
+ return 0;
+
+ /* Consider all constants with -msoft-float to be easy. */
+ if ((TARGET_SOFT_FLOAT || TARGET_E500_SINGLE)
+ && mode != DImode)
+ return 1;
+
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return 0;
+
+ /* If we are using V.4 style PIC, consider all constants to be hard. */
+ if (flag_pic && DEFAULT_ABI == ABI_V4)
+ return 0;
+
+#ifdef TARGET_RELOCATABLE
+ /* Similarly if we are using -mrelocatable, consider all constants
+ to be hard. */
+ if (TARGET_RELOCATABLE)
+ return 0;
+#endif
+
+ switch (mode)
+ {
+ case TFmode:
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[2]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[3]) == 1);
+
+ case DFmode:
+ /* Force constants to memory before reload to utilize
+ compress_float_constant.
+ Avoid this when flag_unsafe_math_optimizations is enabled
+ because RDIV division to reciprocal optimization is not able
+ to regenerate the division. */
+ if (TARGET_E500_DOUBLE
+ || (!reload_in_progress && !reload_completed
+ && !flag_unsafe_math_optimizations))
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1);
+
+ case SFmode:
+ /* The constant 0.f is easy. */
+ if (op == CONST0_RTX (SFmode))
+ return 1;
+
+ /* Force constants to memory before reload to utilize
+ compress_float_constant.
+ Avoid this when flag_unsafe_math_optimizations is enabled
+ because RDIV division to reciprocal optimization is not able
+ to regenerate the division. */
+ if (!reload_in_progress && !reload_completed
+ && !flag_unsafe_math_optimizations)
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, k[0]);
+
+ return num_insns_constant_wide (k[0]) == 1;
+
+ case DImode:
+ return ((TARGET_POWERPC64
+ && GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_LOW (op) == 0)
+ || (num_insns_constant (op, DImode) <= 2));
+
+ case SImode:
+ return 1;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+;; Return 1 if the operand is a CONST_VECTOR and can be loaded into a
+;; vector register without using memory.
+(define_predicate "easy_vector_constant"
+ (match_code "const_vector")
+{
+ if (ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (zero_constant (op, mode))
+ return true;
+ return easy_altivec_constant (op, mode);
+ }
+
+ if (SPE_VECTOR_MODE (mode))
+ {
+ int cst, cst2;
+ if (zero_constant (op, mode))
+ return true;
+ if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
+ return false;
+
+ /* Limit SPE vectors to 15 bits signed. These we can generate with:
+ li r0, CONSTANT1
+ evmergelo r0, r0, r0
+ li r0, CONSTANT2
+
+ I don't know how efficient it would be to allow bigger constants,
+ considering we'll have an extra 'ori' for every 'li'. I doubt 5
+ instructions is better than a 64-bit memory load, but I don't
+ have the e500 timing specs. */
+ if (mode == V2SImode)
+ {
+ cst = INTVAL (CONST_VECTOR_ELT (op, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (op, 1));
+ return cst >= -0x7fff && cst <= 0x7fff
+ && cst2 >= -0x7fff && cst2 <= 0x7fff;
+ }
+ }
+
+ return false;
+})
+
+;; Same as easy_vector_constant but only for EASY_VECTOR_15_ADD_SELF.
+(define_predicate "easy_vector_constant_add_self"
+ (and (match_code "const_vector")
+ (and (match_test "TARGET_ALTIVEC")
+ (match_test "easy_altivec_constant (op, mode)")))
+{
+ rtx last = CONST_VECTOR_ELT (op, GET_MODE_NUNITS (mode) - 1);
+ HOST_WIDE_INT val = ((INTVAL (last) & 0xff) ^ 0x80) - 0x80;
+ return EASY_VECTOR_15_ADD_SELF (val);
+})
+
+;; Return 1 if operand is constant zero (scalars and vectors).
+(define_predicate "zero_constant"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Return 1 if operand is 0.0.
+;; or non-special register register field no cr0
+(define_predicate "zero_fp_constant"
+ (and (match_code "const_double")
+ (match_test "SCALAR_FLOAT_MODE_P (mode)
+ && op == CONST0_RTX (mode)")))
+
+;; Return 1 if the operand is in volatile memory. Note that during the
+;; RTL generation phase, memory_operand does not return TRUE for volatile
+;; memory references. So this function allows us to recognize volatile
+;; references where it's safe.
+(define_predicate "volatile_mem_operand"
+ (and (and (match_code "mem")
+ (match_test "MEM_VOLATILE_P (op)"))
+ (if_then_else (match_test "reload_completed")
+ (match_operand 0 "memory_operand")
+ (if_then_else (match_test "reload_in_progress")
+ (match_test "strict_memory_address_p (mode, XEXP (op, 0))")
+ (match_test "memory_address_p (mode, XEXP (op, 0))")))))
+
+;; Return 1 if the operand is an offsettable memory operand.
+(define_predicate "offsettable_mem_operand"
+ (and (match_code "mem")
+ (match_test "offsettable_address_p (reload_completed
+ || reload_in_progress,
+ mode, XEXP (op, 0))")))
+
+;; Return 1 if the operand is a memory operand with an address divisible by 4
+(define_predicate "word_offset_memref_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_test "GET_CODE (XEXP (op, 0)) != PLUS
+ || ! REG_P (XEXP (XEXP (op, 0), 0))
+ || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (op, 0), 1)) % 4 == 0")))
+
+;; Return 1 if the operand is an indexed or indirect memory operand.
+(define_predicate "indexed_or_indirect_operand"
+ (match_code "mem")
+{
+ op = XEXP (op, 0);
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (op) == AND
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == -16)
+ op = XEXP (op, 0);
+
+ return indexed_or_indirect_address (op, mode);
+})
+
+;; Return 1 if the operand is an indexed or indirect address.
+(define_special_predicate "indexed_or_indirect_address"
+ (and (match_test "REG_P (op)
+ || (GET_CODE (op) == PLUS
+ /* Omit testing REG_P (XEXP (op, 0)). */
+ && REG_P (XEXP (op, 1)))")
+ (match_operand 0 "address_operand")))
+
+;; Used for the destination of the fix_truncdfsi2 expander.
+;; If stfiwx will be used, the result goes to memory; otherwise,
+;; we're going to emit a store and a load of a subreg, so the dest is a
+;; register.
+(define_predicate "fix_trunc_dest_operand"
+ (if_then_else (match_test "! TARGET_E500_DOUBLE && TARGET_PPC_GFXOPT")
+ (match_operand 0 "memory_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if the operand is either a non-special register or can be used
+;; as the operand of a `mode' add insn.
+(define_predicate "add_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "satisfies_constraint_I (op)
+ || satisfies_constraint_L (op)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if OP is a constant but not a valid add_operand.
+(define_predicate "non_add_cint_operand"
+ (and (match_code "const_int")
+ (match_test "!satisfies_constraint_I (op)
+ && !satisfies_constraint_L (op)")))
+
+;; Return 1 if the operand is a constant that can be used as the operand
+;; of an OR or XOR.
+(define_predicate "logical_const_operand"
+ (match_code "const_int,const_double")
+{
+ HOST_WIDE_INT opl, oph;
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ opl = INTVAL (op) & GET_MODE_MASK (mode);
+
+ if (HOST_BITS_PER_WIDE_INT <= 32
+ && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && opl < 0)
+ return 0;
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT);
+
+ opl = CONST_DOUBLE_LOW (op);
+ oph = CONST_DOUBLE_HIGH (op);
+ if (oph != 0)
+ return 0;
+ }
+ else
+ return 0;
+
+ return ((opl & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0
+ || (opl & ~ (unsigned HOST_WIDE_INT) 0xffff0000) == 0);
+})
+
+;; Return 1 if the operand is a non-special register or a constant that
+;; can be used as the operand of an OR or XOR.
+(define_predicate "logical_operand"
+ (ior (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_const_operand")))
+
+;; Return 1 if op is a constant that is not a logical operand, but could
+;; be split into one.
+(define_predicate "non_logical_cint_operand"
+ (and (match_code "const_int,const_double")
+ (and (not (match_operand 0 "logical_operand"))
+ (match_operand 0 "reg_or_logical_cint_operand"))))
+
+;; Return 1 if op is a constant that can be encoded in a 32-bit mask,
+;; suitable for use with rlwinm (no more than two 1->0 or 0->1
+;; transitions). Reject all ones and all zeros, since these should have
+;; been optimized away and confuse the making of MB and ME.
+(define_predicate "mask_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ if (TARGET_POWERPC64)
+ {
+ /* Fail if the mask is not 32-bit. */
+ if (mode == DImode && (c & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0)
+ return 0;
+
+ /* Fail if the mask wraps around because the upper 32-bits of the
+ mask will all be 1s, contrary to GCC's internal view. */
+ if ((c & 0x80000001) == 0x80000001)
+ return 0;
+ }
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Reject all zeros or all ones. */
+ if (c == 0)
+ return 0;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Return 1 for the PowerPC64 rlwinm corner case.
+(define_predicate "mask_operand_wrap"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ if ((c & 0x80000001) != 0x80000001)
+ return 0;
+
+ c = ~c;
+ if (c == 0)
+ return 0;
+
+ lsb = c & -c;
+ c = ~c;
+ c &= -lsb;
+ lsb = c & -c;
+ return c == -lsb;
+})
+
+;; Return 1 if the operand is a constant that is a PowerPC64 mask
+;; suitable for use with rldicl or rldicr (no more than one 1->0 or 0->1
+;; transition). Reject all zeros, since zero should have been
+;; optimized away and confuses the making of MB and ME.
+(define_predicate "mask64_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ /* Reject all zeros. */
+ if (c == 0)
+ return 0;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Like mask64_operand, but allow up to three transitions. This
+;; predicate is used by insn patterns that generate two rldicl or
+;; rldicr machine insns.
+(define_predicate "mask64_2_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ /* Disallow all zeros. */
+ if (c == 0)
+ return 0;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a third transition. */
+ c = ~c;
+
+ /* Erase second transition. */
+ c &= -lsb;
+
+ /* Find the third transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Like and_operand, but also match constants that can be implemented
+;; with two rldicl or rldicr insns.
+(define_predicate "and64_2_operand"
+ (ior (match_operand 0 "mask64_2_operand")
+ (if_then_else (match_test "fixed_regs[CR0_REGNO]")
+ (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_operand"))))
+
+;; Return 1 if the operand is either a non-special register or a
+;; constant that can be used as the operand of a logical AND.
+(define_predicate "and_operand"
+ (ior (match_operand 0 "mask_operand")
+ (ior (and (match_test "TARGET_POWERPC64 && mode == DImode")
+ (match_operand 0 "mask64_operand"))
+ (if_then_else (match_test "fixed_regs[CR0_REGNO]")
+ (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_operand")))))
+
+;; Return 1 if the operand is either a logical operand or a short cint operand.
+(define_predicate "scc_eq_operand"
+ (ior (match_operand 0 "logical_operand")
+ (match_operand 0 "short_cint_operand")))
+
+;; Return 1 if the operand is a general non-special register or memory operand.
+(define_predicate "reg_or_mem_operand"
+ (ior (match_operand 0 "memory_operand")
+ (ior (and (match_code "mem")
+ (match_test "macho_lo_sum_memory_operand (op, mode)"))
+ (ior (match_operand 0 "volatile_mem_operand")
+ (match_operand 0 "gpc_reg_operand")))))
+
+;; Return 1 if the operand is either an easy FP constant or memory or reg.
+(define_predicate "reg_or_none500mem_operand"
+ (if_then_else (match_code "mem")
+ (and (match_test "!TARGET_E500_DOUBLE")
+ (ior (match_operand 0 "memory_operand")
+ (ior (match_test "macho_lo_sum_memory_operand (op, mode)")
+ (match_operand 0 "volatile_mem_operand"))))
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if the operand is CONST_DOUBLE 0, register or memory operand.
+(define_predicate "zero_reg_mem_operand"
+ (ior (match_operand 0 "zero_fp_constant")
+ (match_operand 0 "reg_or_mem_operand")))
+
+;; Return 1 if the operand is a general register or memory operand without
+;; pre_inc or pre_dec, which produces invalid form of PowerPC lwa
+;; instruction.
+(define_predicate "lwa_operand"
+ (match_code "reg,subreg,mem")
+{
+ rtx inner = op;
+
+ if (reload_completed && GET_CODE (inner) == SUBREG)
+ inner = SUBREG_REG (inner);
+
+ return gpc_reg_operand (inner, mode)
+ || (memory_operand (inner, mode)
+ && GET_CODE (XEXP (inner, 0)) != PRE_INC
+ && GET_CODE (XEXP (inner, 0)) != PRE_DEC
+ && (GET_CODE (XEXP (inner, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (inner, 0), 1)) != CONST_INT
+ /* APPLE LOCAL begin radar 4805365 */
+ || INTVAL (XEXP (XEXP (inner, 0), 1)) % 4 == 0)
+ /* Return 1 if the alignment is known and 32 bits aligned. */
+ && (MEM_ALIGN (inner) != 0
+ && MEM_ALIGN (inner) % 32 == 0));
+ /* APPLE LOCAL end radar 4805365 */
+})
+
+;; Return 1 if the operand, used inside a MEM, is a SYMBOL_REF.
+(define_predicate "symbol_ref_operand"
+ (and (match_code "symbol_ref")
+ (match_test "(mode == VOIDmode || GET_MODE (op) == mode)
+ && (DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))")))
+
+;; Return 1 if op is an operand that can be loaded via the GOT.
+;; or non-special register register field no cr0
+(define_predicate "got_operand"
+ (match_code "symbol_ref,const,label_ref"))
+
+;; Return 1 if op is a simple reference that can be loaded via the GOT,
+;; excluding labels involving addition.
+(define_predicate "got_no_const_operand"
+ (match_code "symbol_ref,label_ref"))
+
+;; Return 1 if op is a SYMBOL_REF for a TLS symbol.
+(define_predicate "rs6000_tls_symbol_ref"
+ (and (match_code "symbol_ref")
+ (match_test "RS6000_SYMBOL_REF_TLS_P (op)")))
+
+;; Return 1 if the operand, used inside a MEM, is a valid first argument
+;; to CALL. This is a SYMBOL_REF, a pseudo-register, LR or CTR.
+(define_predicate "call_operand"
+ (if_then_else (match_code "reg")
+ (match_test "REGNO (op) == LINK_REGISTER_REGNUM
+ || REGNO (op) == COUNT_REGISTER_REGNUM
+ /* APPLE LOCAL begin accept hard R12 as target reg */
+#ifdef MAGIC_INDIRECT_CALL_REG
+ || REGNO (op) == MAGIC_INDIRECT_CALL_REG
+#endif
+ /* APPLE LOCAL end accept hard R12 as target reg */
+ || REGNO (op) >= FIRST_PSEUDO_REGISTER")
+ (match_code "symbol_ref")))
+
+;; Return 1 if the operand is a SYMBOL_REF for a function known to be in
+;; this file.
+(define_predicate "current_file_function_operand"
+ (and (match_code "symbol_ref")
+ (match_test "(DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))
+ && ((SYMBOL_REF_LOCAL_P (op)
+ && (DEFAULT_ABI != ABI_AIX
+ || !SYMBOL_REF_EXTERNAL_P (op)))
+ || (op == XEXP (DECL_RTL (current_function_decl),
+ 0)))")))
+
+;; Return 1 if this operand is a valid input for a move insn.
+(define_predicate "input_operand"
+ (match_code "label_ref,symbol_ref,const,high,reg,subreg,mem,
+ const_double,const_vector,const_int,plus")
+{
+ /* Memory is always valid. */
+ if (memory_operand (op, mode))
+ return 1;
+
+ /* For floating-point, easy constants are valid. */
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && CONSTANT_P (op)
+ && easy_fp_constant (op, mode))
+ return 1;
+
+ /* Allow any integer constant. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && (GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONST_DOUBLE))
+ return 1;
+
+ /* Allow easy vector constants. */
+ if (GET_CODE (op) == CONST_VECTOR
+ && easy_vector_constant (op, mode))
+ return 1;
+
+ /* Do not allow invalid E500 subregs. */
+ if ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && GET_CODE (op) == SUBREG
+ && invalid_e500_subreg (op, mode))
+ return 0;
+
+ /* For floating-point or multi-word mode, the only remaining valid type
+ is a register. */
+ if (SCALAR_FLOAT_MODE_P (mode)
+ || GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return register_operand (op, mode);
+
+ /* The only cases left are integral modes one word or smaller (we
+ do not get called for MODE_CC values). These can be in any
+ register. */
+ if (register_operand (op, mode))
+ return 1;
+
+ /* A SYMBOL_REF referring to the TOC is valid. */
+ if (legitimate_constant_pool_address_p (op))
+ return 1;
+
+ /* A constant pool expression (relative to the TOC) is valid */
+ if (toc_relative_expr_p (op))
+ return 1;
+
+ /* V.4 allows SYMBOL_REFs and CONSTs that are in the small data region
+ to be valid. */
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)
+ && small_data_operand (op, Pmode))
+ return 1;
+
+ return 0;
+})
+
+;; Return true if OP is an invalid SUBREG operation on the e500.
+(define_predicate "rs6000_nonimmediate_operand"
+ (match_code "reg,subreg,mem")
+{
+ if ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && GET_CODE (op) == SUBREG
+ && invalid_e500_subreg (op, mode))
+ return 0;
+
+ return nonimmediate_operand (op, mode);
+})
+
+;; Return true if operand is boolean operator.
+(define_predicate "boolean_operator"
+ (match_code "and,ior,xor"))
+
+;; Return true if operand is OR-form of boolean operator.
+(define_predicate "boolean_or_operator"
+ (match_code "ior,xor"))
+
+;; Return true if operand is an equality operator.
+(define_special_predicate "equality_operator"
+ (match_code "eq,ne"))
+
+;; Return true if operand is MIN or MAX operator.
+(define_predicate "min_max_operator"
+ (match_code "smin,smax,umin,umax"))
+
+;; Return 1 if OP is a comparison operation that is valid for a branch
+;; instruction. We check the opcode against the mode of the CC value.
+;; validate_condition_mode is an assertion.
+(define_predicate "branch_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (and (match_test "GET_MODE_CLASS (GET_MODE (XEXP (op, 0))) == MODE_CC")
+ (match_test "validate_condition_mode (GET_CODE (op),
+ GET_MODE (XEXP (op, 0))),
+ 1"))))
+
+;; Return 1 if OP is a comparison operation that is valid for an SCC insn --
+;; it must be a positive comparison.
+(define_predicate "scc_comparison_operator"
+ (and (match_operand 0 "branch_comparison_operator")
+ (match_code "eq,lt,gt,ltu,gtu,unordered")))
+
+;; Return 1 if OP is a comparison operation that is valid for a branch
+;; insn, which is true if the corresponding bit in the CC register is set.
+(define_predicate "branch_positive_comparison_operator"
+ (and (match_operand 0 "branch_comparison_operator")
+ (match_code "eq,lt,gt,ltu,gtu,unordered")))
+
+;; Return 1 is OP is a comparison operation that is valid for a trap insn.
+(define_predicate "trap_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (match_code "eq,ne,le,lt,ge,gt,leu,ltu,geu,gtu")))
+
+;; Return 1 if OP is a load multiple operation, known to be a PARALLEL.
+(define_predicate "load_multiple_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is a store multiple operation, known to be a PARALLEL.
+;; The second vector element is a CLOBBER.
+(define_predicate "store_multiple_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0) - 1;
+ unsigned int src_regno;
+ rtx dest_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i + 1);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for a save_world call in prologue, known to be
+;; a PARLLEL.
+(define_predicate "save_world_operation"
+ (match_code "parallel")
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 55)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), DFmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != DFmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+ }
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || REGNO (SET_SRC (elt)) != CR2_REGNO
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+ return 1;
+})
+
+;; Return 1 if OP is valid for a restore_world call in epilogue, known to be
+;; a PARLLEL.
+(define_predicate "restore_world_operation"
+ (match_code "parallel")
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 59)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != RETURN
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || REGNO (SET_DEST (elt)) != CR2_REGNO
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), DFmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != DFmode)
+ return 0;
+ }
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+ return 1;
+})
+
+;; Return 1 if OP is valid for a vrsave call, known to be a PARALLEL.
+(define_predicate "vrsave_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno, src_regno;
+ int i;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC_VOLATILE
+ || XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPECV_SET_VRSAVE)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_regno = REGNO (XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 1));
+
+ if (dest_regno != VRSAVE_REGNO || src_regno != VRSAVE_REGNO)
+ return 0;
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != CLOBBER
+ && GET_CODE (elt) != SET)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for mfcr insn, known to be a PARALLEL.
+(define_predicate "mfcr_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+ rtx src_reg;
+
+ src_reg = XVECEXP (SET_SRC (exp), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != CCmode
+ || ! CR_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != SImode
+ || ! INT_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (src_reg));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_MOVESI_FROM_CR
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+})
+
+;; Return 1 if OP is valid for mtcrf insn, known to be a PARALLEL.
+(define_predicate "mtcrf_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+ rtx src_reg;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+ src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != SImode
+ || ! INT_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != CCmode
+ || ! CR_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+})
+
+;; Return 1 if OP is valid for lmw insn, known to be a PARALLEL.
+(define_predicate "lmw_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ if (dest_regno > 31
+ || count != 32 - (int) dest_regno)
+ return 0;
+
+ if (legitimate_indirect_address_p (src_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (src_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, src_addr, 0))
+ {
+ offset = INTVAL (XEXP (src_addr, 1));
+ base_regno = REGNO (XEXP (src_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_SRC (elt), 0);
+ if (legitimate_indirect_address_p (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for stmw insn, known to be a PARALLEL.
+(define_predicate "stmw_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int src_regno;
+ rtx dest_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ if (src_regno > 31
+ || count != 32 - (int) src_regno)
+ return 0;
+
+ if (legitimate_indirect_address_p (dest_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (dest_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, dest_addr, 0))
+ {
+ offset = INTVAL (XEXP (dest_addr, 1));
+ base_regno = REGNO (XEXP (dest_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_DEST (elt), 0);
+ if (legitimate_indirect_address_p (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+})
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rios1.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/rios1.md
new file mode 100644
index 000000000..59b34c5f8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rios1.md
@@ -0,0 +1,190 @@
+;; Scheduling description for IBM POWER processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "rios1,rios1fp")
+(define_cpu_unit "iu_rios1" "rios1")
+(define_cpu_unit "fpu_rios1" "rios1fp")
+(define_cpu_unit "bpu_rios1" "rios1")
+
+;; RIOS1 32-bit IU, FPU, BPU
+
+(define_insn_reservation "rios1-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1")
+
+(define_insn_reservation "ppc601-fpload" 3
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1+fpu_rios1")
+
+(define_insn_reservation "rios1-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,iu_rios1")
+
+(define_insn_reservation "rios1-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,iu_rios1,iu_rios1")
+
+(define_insn_reservation "rios1-imul" 5
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*5")
+
+(define_insn_reservation "rios1-imul2" 4
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*4")
+
+(define_insn_reservation "rios1-imul3" 3
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*3")
+
+(define_insn_reservation "ppc601-imul" 5
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1*5")
+
+(define_insn_reservation "rios1-idiv" 19
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*19")
+
+(define_insn_reservation "ppc601-idiv" 36
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1*36")
+
+; compare executes on integer unit, but feeds insns which
+; execute on the branch unit.
+(define_insn_reservation "rios1-compare" 4
+ (and (eq_attr "type" "cmp,fast_compare,compare")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1,nothing*2,bpu_rios1")
+
+(define_insn_reservation "rios1-delayed_compare" 5
+ (and (eq_attr "type" "delayed_compare")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1,nothing*3,bpu_rios1")
+
+(define_insn_reservation "ppc601-compare" 3
+ (and (eq_attr "type" "cmp,compare,delayed_compare")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1,nothing,bpu_rios1")
+
+(define_insn_reservation "rios1-fpcompare" 9
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rios1"))
+ "fpu_rios1,nothing*3,bpu_rios1")
+
+(define_insn_reservation "ppc601-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc601"))
+ "(fpu_rios1+iu_rios1*2),nothing*2,bpu_rios1")
+
+(define_insn_reservation "rios1-fp" 2
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "rios1"))
+ "fpu_rios1")
+
+(define_insn_reservation "ppc601-fp" 4
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1")
+
+(define_insn_reservation "rios1-dmul" 5
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1*2")
+
+(define_insn_reservation "rios1-sdiv" 19
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rios1"))
+ "fpu_rios1*19")
+
+(define_insn_reservation "ppc601-sdiv" 17
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1*17")
+
+(define_insn_reservation "ppc601-ddiv" 31
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1*31")
+
+(define_insn_reservation "rios1-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-mtcr" 4
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-crlogical" 4
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "bpu_rios1")
+
+(define_insn_reservation "rios1-mtjmpr" 5
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "ppc601-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "bpu_rios1")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rios2.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/rios2.md
new file mode 100644
index 000000000..b2f5cb282
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rios2.md
@@ -0,0 +1,128 @@
+;; Scheduling description for IBM Power2 processor.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "rios2,rios2fp")
+(define_cpu_unit "iu1_rios2,iu2_rios2" "rios2")
+(define_cpu_unit "fpu1_rios2,fpu2_rios2" "rios2fp")
+(define_cpu_unit "bpu_rios2" "rios2")
+
+;; RIOS2 32-bit 2xIU, 2xFPU, BPU
+;; IU1 can perform all integer operations
+;; IU2 can perform all integer operations except imul and idiv
+
+(define_insn_reservation "rios2-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
+ load_ux,load_u,fpload,fpload_ux,fpload_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-imul" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2*2")
+
+(define_insn_reservation "rios2-idiv" 13
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2*13")
+
+; compare executes on integer unit, but feeds insns which
+; execute on the branch unit.
+(define_insn_reservation "rios2-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "rios2"))
+ "(iu1_rios2|iu2_rios2),nothing,bpu_rios2")
+
+(define_insn_reservation "rios2-fp" 2
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "rios2"))
+ "fpu1_rios2|fpu2_rios2")
+
+(define_insn_reservation "rios2-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rios2"))
+ "(fpu1_rios2|fpu2_rios2),nothing*3,bpu_rios2")
+
+(define_insn_reservation "rios2-dmul" 2
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "rios2"))
+ "fpu1_rios2|fpu2_rios2")
+
+(define_insn_reservation "rios2-sdiv" 17
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rios2"))
+ "(fpu1_rios2*17)|(fpu2_rios2*17)")
+
+(define_insn_reservation "rios2-ssqrt" 26
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "rios2"))
+ "(fpu1_rios2*26)|(fpu2_rios2*26)")
+
+(define_insn_reservation "rios2-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-crlogical" 3
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "rios2"))
+ "bpu_rios2")
+
+(define_insn_reservation "rios2-mtjmpr" 5
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "rios2"))
+ "bpu_rios2")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-c.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-c.c
new file mode 100644
index 000000000..31d77452c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-c.c
@@ -0,0 +1,2738 @@
+/* Subroutines for the C front end on the POWER and PowerPC architectures.
+ Copyright (C) 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+
+ Contributed by Zack Weinberg <zack@codesourcery.com>
+ and Paolo Bonzini <bonzini@gnu.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "cpplib.h"
+#include "tree.h"
+#include "c-common.h"
+#include "c-pragma.h"
+#include "c-tree.h"
+#include "toplev.h"
+#include "tm_p.h"
+#include "target.h"
+#include "langhooks.h"
+/* APPLE LOCAL begin AltiVec */
+#include "c-common.h"
+#include "cpplib.h"
+#include "../libcpp/internal.h"
+#include "options.h"
+
+static cpp_hashnode *altivec_categorize_keyword (const cpp_token *);
+static void init_vector_keywords (cpp_reader *pfile);
+/* APPLE LOCAL end AltiVec */
+
+
+/* Handle the machine specific pragma longcall. Its syntax is
+
+ # pragma longcall ( TOGGLE )
+
+ where TOGGLE is either 0 or 1.
+
+ rs6000_default_long_calls is set to the value of TOGGLE, changing
+ whether or not new function declarations receive a longcall
+ attribute by default. */
+
+#define SYNTAX_ERROR(gmsgid) do { \
+ warning (OPT_Wpragmas, gmsgid); \
+ warning (OPT_Wpragmas, "ignoring malformed #pragma longcall"); \
+ return; \
+} while (0)
+
+void
+rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree x, n;
+
+ /* If we get here, generic code has already scanned the directive
+ leader and the word "longcall". */
+
+ if (pragma_lex (&x) != CPP_OPEN_PAREN)
+ SYNTAX_ERROR ("missing open paren");
+ if (pragma_lex (&n) != CPP_NUMBER)
+ SYNTAX_ERROR ("missing number");
+ if (pragma_lex (&x) != CPP_CLOSE_PAREN)
+ SYNTAX_ERROR ("missing close paren");
+
+ if (n != integer_zero_node && n != integer_one_node)
+ SYNTAX_ERROR ("number must be 0 or 1");
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma longcall");
+
+ rs6000_default_long_calls = (n == integer_one_node);
+}
+
+/* Handle defining many CPP flags based on TARGET_xxx. As a general
+ policy, rather than trying to guess what flags a user might want a
+ #define for, it's better to define a flag for everything. */
+
+#define builtin_define(TXT) cpp_define (pfile, TXT)
+#define builtin_assert(TXT) cpp_assert (pfile, TXT)
+
+/* APPLE LOCAL begin AltiVec */
+/* Keep the AltiVec keywords handy for fast comparisons. */
+static GTY(()) cpp_hashnode *__vector_keyword;
+static GTY(()) cpp_hashnode *vector_keyword;
+static GTY(()) cpp_hashnode *__pixel_keyword;
+static GTY(()) cpp_hashnode *pixel_keyword;
+static GTY(()) cpp_hashnode *__bool_keyword;
+static GTY(()) cpp_hashnode *bool_keyword;
+static GTY(()) cpp_hashnode *_Bool_keyword;
+
+static GTY(()) cpp_hashnode *expand_bool_pixel; /* Preserved across calls. */
+
+static cpp_hashnode *
+altivec_categorize_keyword (const cpp_token *tok)
+{
+ if (tok->type == CPP_NAME)
+ {
+ cpp_hashnode *ident = tok->val.node;
+
+ if (ident == vector_keyword || ident == __vector_keyword)
+ return __vector_keyword;
+
+ if (ident == pixel_keyword || ident == __pixel_keyword)
+ return __pixel_keyword;
+
+ if (ident == bool_keyword || ident == _Bool_keyword
+ || ident == __bool_keyword)
+ return __bool_keyword;
+
+ return ident;
+ }
+
+ return 0;
+}
+
+/* Called to decide whether a conditional macro should be expanded.
+ Since we have exactly one such macro (i.e, 'vector'), we do not
+ need to examine the 'tok' parameter. */
+
+cpp_hashnode *
+rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
+{
+ static bool vector_keywords_init = false;
+ cpp_hashnode *expand_this = tok->val.node;
+ cpp_hashnode *ident;
+
+ if (!vector_keywords_init)
+ {
+ init_vector_keywords (pfile);
+ vector_keywords_init = true;
+ }
+
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == __vector_keyword)
+ {
+ tok = _cpp_peek_token (pfile, 0);
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == __pixel_keyword || ident == __bool_keyword)
+ {
+ expand_this = __vector_keyword;
+ expand_bool_pixel = ident;
+ }
+ else if (ident)
+ {
+ enum rid rid_code = (enum rid)(ident->rid_code);
+ if (ident->type == NT_MACRO)
+ {
+ (void)cpp_get_token (pfile);
+ tok = _cpp_peek_token (pfile, 0);
+ ident = altivec_categorize_keyword (tok);
+ rid_code = (enum rid)(ident->rid_code);
+ }
+
+ if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
+ || rid_code == RID_SHORT || rid_code == RID_SIGNED
+ || rid_code == RID_INT || rid_code == RID_CHAR
+ || rid_code == RID_FLOAT)
+ {
+ expand_this = __vector_keyword;
+ /* If the next keyword is bool or pixel, it
+ will need to be expanded as well. */
+ tok = _cpp_peek_token (pfile, 1);
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == __pixel_keyword || ident == __bool_keyword)
+ expand_bool_pixel = ident;
+ }
+ }
+ }
+ else if (expand_bool_pixel
+ && (ident == __pixel_keyword || ident == __bool_keyword))
+ {
+ expand_this = expand_bool_pixel;
+ expand_bool_pixel = 0;
+ }
+
+ return expand_this;
+}
+
+static void
+init_vector_keywords (cpp_reader *pfile)
+{
+ /* Keywords without two leading underscores are context-sensitive, and hence
+ implemented as conditional macros, controlled by the rs6000_macro_to_expand()
+ function above. */
+ __vector_keyword = cpp_lookup (pfile, DSC ("__vector"));
+ __vector_keyword->flags |= NODE_CONDITIONAL;
+
+ __pixel_keyword = cpp_lookup (pfile, DSC ("__pixel"));
+ __pixel_keyword->flags |= NODE_CONDITIONAL;
+
+ __bool_keyword = cpp_lookup (pfile, DSC ("__bool"));
+ __bool_keyword->flags |= NODE_CONDITIONAL;
+
+ vector_keyword = cpp_lookup (pfile, DSC ("vector"));
+ vector_keyword->flags |= NODE_CONDITIONAL;
+
+ pixel_keyword = cpp_lookup (pfile, DSC ("pixel"));
+ pixel_keyword->flags |= NODE_CONDITIONAL;
+
+ _Bool_keyword = cpp_lookup (pfile, DSC ("_Bool"));
+ _Bool_keyword->flags |= NODE_CONDITIONAL;
+
+ bool_keyword = cpp_lookup (pfile, DSC ("bool"));
+ bool_keyword->flags |= NODE_CONDITIONAL;
+ return;
+}
+
+/* APPLE LOCAL end AltiVec */
+
+void
+rs6000_cpu_cpp_builtins (cpp_reader *pfile)
+{
+ if (TARGET_POWER2)
+ builtin_define ("_ARCH_PWR2");
+ else if (TARGET_POWER)
+ builtin_define ("_ARCH_PWR");
+ if (TARGET_POWERPC)
+ builtin_define ("_ARCH_PPC");
+ if (TARGET_PPC_GPOPT)
+ builtin_define ("_ARCH_PPCSQ");
+ if (TARGET_PPC_GFXOPT)
+ builtin_define ("_ARCH_PPCGR");
+ if (TARGET_POWERPC64)
+ builtin_define ("_ARCH_PPC64");
+ if (TARGET_MFCRF)
+ builtin_define ("_ARCH_PWR4");
+ if (TARGET_POPCNTB)
+ builtin_define ("_ARCH_PWR5");
+ if (TARGET_FPRND)
+ builtin_define ("_ARCH_PWR5X");
+ if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC)
+ builtin_define ("_ARCH_COM");
+ if (TARGET_ALTIVEC)
+ {
+ builtin_define ("__ALTIVEC__");
+ builtin_define ("__VEC__=10206");
+
+ /* Define the AltiVec syntactic elements. */
+ builtin_define ("__vector=__attribute__((altivec(vector__)))");
+ builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
+ builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
+
+ /* APPLE LOCAL begin AltiVec */
+ if (rs6000_altivec_pim)
+ {
+ builtin_define ("vector=vector");
+ builtin_define ("pixel=pixel");
+ builtin_define ("_Bool=_Bool");
+ builtin_define ("bool=bool");
+ init_vector_keywords (pfile);
+
+ /* Indicate that the compiler supports Apple AltiVec syntax,
+ including context-sensitive keywords. */
+ builtin_define ("__APPLE_ALTIVEC__");
+ builtin_define ("vec_step(T)=(sizeof (__typeof__(T)) / sizeof (__typeof__(T) __attribute__((altivec(element__)))))");
+
+ /* Enable context-sensitive macros. */
+ cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
+ }
+ /* APPLE LOCAL end AltiVec */
+ }
+ if (TARGET_SPE)
+ builtin_define ("__SPE__");
+ if (TARGET_SOFT_FLOAT)
+ builtin_define ("_SOFT_FLOAT");
+ /* Used by lwarx/stwcx. errata work-around. */
+ if (rs6000_cpu == PROCESSOR_PPC405)
+ builtin_define ("__PPC405__");
+ /* Used by libstdc++. */
+ if (TARGET_NO_LWSYNC)
+ builtin_define ("__NO_LWSYNC__");
+
+ /* May be overridden by target configuration. */
+ RS6000_CPU_CPP_ENDIAN_BUILTINS();
+
+ if (TARGET_LONG_DOUBLE_128)
+ builtin_define ("__LONG_DOUBLE_128__");
+
+ switch (rs6000_current_abi)
+ {
+ case ABI_V4:
+ builtin_define ("_CALL_SYSV");
+ break;
+ case ABI_AIX:
+ builtin_define ("_CALL_AIXDESC");
+ builtin_define ("_CALL_AIX");
+ break;
+ case ABI_DARWIN:
+ builtin_define ("_CALL_DARWIN");
+ break;
+ default:
+ break;
+ }
+
+ /* Let the compiled code know if 'f' class registers will not be available. */
+ if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ builtin_define ("__NO_FPRS__");
+}
+
+
+struct altivec_builtin_types
+{
+ enum rs6000_builtins code;
+ enum rs6000_builtins overloaded_code;
+ signed char ret_type;
+ signed char op1;
+ signed char op2;
+ signed char op3;
+};
+
+const struct altivec_builtin_types altivec_overloaded_builtins[] = {
+ /* Unary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_CEIL, ALTIVEC_BUILTIN_VRFIP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_EXPTE, ALTIVEC_BUILTIN_VEXPTEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_FLOOR, ALTIVEC_BUILTIN_VRFIM,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_LOGE, ALTIVEC_BUILTIN_VLOGEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_RE, ALTIVEC_BUILTIN_VREFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_RSQRTE, ALTIVEC_BUILTIN_VRSQRTEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_TRUNC, ALTIVEC_BUILTIN_VRFIZ,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+
+ /* Binary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDFP, ALTIVEC_BUILTIN_VADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDC, ALTIVEC_BUILTIN_VADDCUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSW, ALTIVEC_BUILTIN_VAVGSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUW, ALTIVEC_BUILTIN_VAVGUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSH, ALTIVEC_BUILTIN_VAVGSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUH, ALTIVEC_BUILTIN_VAVGUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSB, ALTIVEC_BUILTIN_VAVGSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUB, ALTIVEC_BUILTIN_VAVGUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPB, ALTIVEC_BUILTIN_VCMPBFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQFP, ALTIVEC_BUILTIN_VCMPEQFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGE, ALTIVEC_BUILTIN_VCMPGEFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTFP, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLE, ALTIVEC_BUILTIN_VCMPGEFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFUX,
+ RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFSX,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCFSX, ALTIVEC_BUILTIN_VCFSX,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCFUX, ALTIVEC_BUILTIN_VCFUX,
+ RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTS, ALTIVEC_BUILTIN_VCTSXS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTU, ALTIVEC_BUILTIN_VCTUXS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXFP, ALTIVEC_BUILTIN_VMAXFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINFP, ALTIVEC_BUILTIN_VMINFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULEUB, ALTIVEC_BUILTIN_VMULEUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULESB, ALTIVEC_BUILTIN_VMULESB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULEUH, ALTIVEC_BUILTIN_VMULEUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULESH, ALTIVEC_BUILTIN_VMULESH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOSH, ALTIVEC_BUILTIN_VMULOSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOUH, ALTIVEC_BUILTIN_VMULOUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOSB, ALTIVEC_BUILTIN_VMULOSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOUB, ALTIVEC_BUILTIN_VMULOUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKPX, ALTIVEC_BUILTIN_VPKPX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSHSS,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSWSS,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSWSS, ALTIVEC_BUILTIN_VPKSWSS,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUS, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSHSS, ALTIVEC_BUILTIN_VPKSHSS,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUS, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSWUS, ALTIVEC_BUILTIN_VPKSWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSHUS, ALTIVEC_BUILTIN_VPKSHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBFP, ALTIVEC_BUILTIN_VSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4UBS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SBS,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4SHS, ALTIVEC_BUILTIN_VSUM4SHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4SBS, ALTIVEC_BUILTIN_VSUM4SBS,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4UBS, ALTIVEC_BUILTIN_VSUM4UBS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM2S, ALTIVEC_BUILTIN_VSUM2SWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+
+ /* Ternary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_MADDS, ALTIVEC_BUILTIN_VMHADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MRADDS, ALTIVEC_BUILTIN_VMHRADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUBM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMMBM,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUHM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMSHM,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMSHM, ALTIVEC_BUILTIN_VMSUMSHM,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUHM, ALTIVEC_BUILTIN_VMSUMUHM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMMBM, ALTIVEC_BUILTIN_VMSUMMBM,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUBM, ALTIVEC_BUILTIN_VMSUMUBM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMUHS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMSHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMSHS, ALTIVEC_BUILTIN_VMSUMSHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUHS, ALTIVEC_BUILTIN_VMSUMUHS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_NMSUB, ALTIVEC_BUILTIN_VNMSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+
+ /* Predicates. */
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+
+ /* cmpge is the same as cmpgt for all cases except floating point.
+ There is further code to deal with this special case in
+ altivec_build_resolved_builtin. */
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGEFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+ { 0, 0, 0, 0, 0, 0 }
+};
+
+
+/* Convert a type stored into a struct altivec_builtin_types as ID,
+ into a tree. The types are in rs6000_builtin_types: negative values
+ create a pointer type for the type associated to ~ID. Note it is
+ a logical NOT, rather than a negation, otherwise you cannot represent
+ a pointer type for ID 0. */
+
+static inline tree
+rs6000_builtin_type (int id)
+{
+ tree t;
+ t = rs6000_builtin_types[id < 0 ? ~id : id];
+ return id < 0 ? build_pointer_type (t) : t;
+}
+
+/* Check whether the type of an argument, T, is compatible with a
+ type ID stored into a struct altivec_builtin_types. Integer
+ types are considered compatible; otherwise, the language hook
+ lang_hooks.types_compatible_p makes the decision. */
+
+static inline bool
+rs6000_builtin_type_compatible (tree t, int id)
+{
+ tree builtin_type;
+ builtin_type = rs6000_builtin_type (id);
+ if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
+ return true;
+ else
+ return lang_hooks.types_compatible_p (t, builtin_type);
+}
+
+
+/* Build a tree for a function call to an Altivec non-overloaded builtin.
+ The overloaded builtin that matched the types and args is described
+ by DESC. The N arguments are given in ARGS, respectively.
+
+ Actually the only thing it does is calling fold_convert on ARGS, with
+ a small exception for vec_{all,any}_{ge,le} predicates. */
+
+static tree
+altivec_build_resolved_builtin (tree *args, int n,
+ const struct altivec_builtin_types *desc)
+{
+ tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
+ tree ret_type = rs6000_builtin_type (desc->ret_type);
+ tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
+ tree arglist = NULL_TREE, arg_type[3];
+
+ int i;
+ for (i = 0; i < n; i++)
+ arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
+
+ /* The AltiVec overloading implementation is overall gross, but this
+ is particularly disgusting. The vec_{all,any}_{ge,le} builtins
+ are completely different for floating-point vs. integer vector
+ types, because the former has vcmpgefp, but the latter should use
+ vcmpgtXX.
+
+ In practice, the second and third arguments are swapped, and the
+ condition (LT vs. EQ, which is recognizable by bit 1 of the first
+ argument) is reversed. Patch the arguments here before building
+ the resolved CALL_EXPR. */
+ if (desc->code == ALTIVEC_BUILTIN_VCMPGE_P
+ && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P)
+ {
+ tree t;
+ t = args[2], args[2] = args[1], args[1] = t;
+ t = arg_type[2], arg_type[2] = arg_type[1], arg_type[1] = t;
+
+ args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
+ build_int_cst (NULL_TREE, 2));
+ }
+
+ while (--n >= 0)
+ arglist = tree_cons (NULL_TREE,
+ fold_convert (arg_type[n], args[n]),
+ arglist);
+
+ return fold_convert (ret_type,
+ build_function_call_expr (impl_fndecl, arglist));
+}
+
+/* Implementation of the resolve_overloaded_builtin target hook, to
+ support Altivec's overloaded builtins. */
+
+tree
+altivec_resolve_overloaded_builtin (tree fndecl, tree arglist)
+{
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ tree types[3], args[3];
+ const struct altivec_builtin_types *desc;
+ int n;
+
+ if (fcode < ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ || fcode > ALTIVEC_BUILTIN_OVERLOADED_LAST)
+ return NULL_TREE;
+
+ for (n = 0;
+ !VOID_TYPE_P (TREE_VALUE (fnargs)) && arglist;
+ fnargs = TREE_CHAIN (fnargs), arglist = TREE_CHAIN (arglist), n++)
+ {
+ tree decl_type = TREE_VALUE (fnargs);
+ tree arg = TREE_VALUE (arglist);
+ tree type;
+
+ if (arg == error_mark_node)
+ return error_mark_node;
+
+ if (n >= 3)
+ abort ();
+
+ arg = default_conversion (arg);
+
+ /* The C++ front-end converts float * to const void * using
+ NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */
+ type = TREE_TYPE (arg);
+ if (POINTER_TYPE_P (type)
+ && TREE_CODE (arg) == NOP_EXPR
+ && lang_hooks.types_compatible_p (TREE_TYPE (arg),
+ const_ptr_type_node)
+ && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
+ ptr_type_node))
+ {
+ arg = TREE_OPERAND (arg, 0);
+ type = TREE_TYPE (arg);
+ }
+
+ /* Remove the const from the pointers to simplify the overload
+ matching further down. */
+ if (POINTER_TYPE_P (decl_type)
+ && POINTER_TYPE_P (type)
+ && TYPE_QUALS (TREE_TYPE (type)) != 0)
+ {
+ if (TYPE_READONLY (TREE_TYPE (type))
+ && !TYPE_READONLY (TREE_TYPE (decl_type)))
+ warning (0, "passing arg %d of %qE discards qualifiers from"
+ "pointer target type", n + 1, fndecl);
+ type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
+ 0));
+ arg = fold_convert (type, arg);
+ }
+
+ args[n] = arg;
+ types[n] = type;
+ }
+
+ /* If the number of arguments did not match the prototype, return NULL
+ and the generic code will issue the appropriate error message. */
+ if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || arglist)
+ return NULL;
+
+ if (n == 0)
+ abort ();
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
+ {
+ if (TREE_CODE (types[0]) != VECTOR_TYPE)
+ goto bad;
+
+ return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
+ }
+
+ for (desc = altivec_overloaded_builtins;
+ desc->code && desc->code != fcode; desc++)
+ continue;
+
+ /* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
+ the opX fields. */
+ for (; desc->code == fcode; desc++)
+ if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[0], desc->op1))
+ && (desc->op2 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[1], desc->op2))
+ && (desc->op3 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[2], desc->op3)))
+ return altivec_build_resolved_builtin (args, n, desc);
+
+ bad:
+ error ("invalid parameter combination for AltiVec intrinsic");
+ return error_mark_node;
+}
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-modes.def b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-modes.def
new file mode 100644
index 000000000..c0599b396
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-modes.def
@@ -0,0 +1,47 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* 128-bit floating point. ABI_V4 uses IEEE quad, AIX/Darwin
+ adjust this in rs6000_override_options. */
+FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* PSImode is used for the XER register. The XER register
+ is not used for anything; perhaps it should be deleted,
+ except that that would change register numbers. */
+PARTIAL_INT_MODE (SI);
+
+/* Add any extra modes needed to represent the condition code.
+
+ For the RS/6000, we need separate modes when unsigned (logical) comparisons
+ are being done and we need a separate mode for floating-point. We also
+ use a mode for the case when we are comparing the results of two
+ comparisons, as then only the EQ bit is valid in the register. */
+
+CC_MODE (CCUNS);
+CC_MODE (CCFP);
+CC_MODE (CCEQ);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
+VECTOR_MODE (INT, DI, 1);
+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-protos.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-protos.h
new file mode 100644
index 000000000..0db9c8693
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000-protos.h
@@ -0,0 +1,185 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#ifndef GCC_RS6000_PROTOS_H
+#define GCC_RS6000_PROTOS_H
+
+/* Declare functions in rs6000.c */
+
+#ifdef RTX_CODE
+
+#ifdef TREE_CODE
+extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, int, int, int);
+extern void rs6000_va_start (tree, rtx);
+#endif /* TREE_CODE */
+
+extern bool easy_altivec_constant (rtx, enum machine_mode);
+extern bool macho_lo_sum_memory_operand (rtx, enum machine_mode);
+extern int num_insns_constant (rtx, enum machine_mode);
+extern int num_insns_constant_wide (HOST_WIDE_INT);
+extern int small_data_operand (rtx, enum machine_mode);
+extern bool toc_relative_expr_p (rtx);
+extern bool invalid_e500_subreg (rtx, enum machine_mode);
+extern void validate_condition_mode (enum rtx_code, enum machine_mode);
+extern bool legitimate_constant_pool_address_p (rtx);
+extern bool legitimate_indirect_address_p (rtx, int);
+
+extern rtx rs6000_got_register (rtx);
+extern rtx find_addr_reg (rtx);
+extern rtx gen_easy_altivec_constant (rtx);
+extern const char *output_vec_const_move (rtx *);
+extern void rs6000_expand_vector_init (rtx, rtx);
+extern void rs6000_expand_vector_set (rtx, rtx, int);
+extern void rs6000_expand_vector_extract (rtx, rtx, int);
+extern void build_mask64_2_operands (rtx, rtx *);
+extern int expand_block_clear (rtx[]);
+extern int expand_block_move (rtx[]);
+extern const char * rs6000_output_load_multiple (rtx[]);
+extern int includes_lshift_p (rtx, rtx);
+extern int includes_rshift_p (rtx, rtx);
+extern int includes_rldic_lshift_p (rtx, rtx);
+extern int includes_rldicr_lshift_p (rtx, rtx);
+extern int insvdi_rshift_rlwimi_p (rtx, rtx, rtx);
+extern int registers_ok_for_quad_peep (rtx, rtx);
+extern int mems_ok_for_quad_peep (rtx, rtx);
+extern bool gpr_or_gpr_p (rtx, rtx);
+extern enum reg_class rs6000_secondary_reload_class (enum reg_class,
+ enum machine_mode, rtx);
+extern int ccr_bit (rtx, int);
+extern int extract_MB (rtx);
+extern int extract_ME (rtx);
+extern void rs6000_output_function_entry (FILE *, const char *);
+extern void print_operand (FILE *, rtx, int);
+extern void print_operand_address (FILE *, rtx);
+extern enum rtx_code rs6000_reverse_condition (enum machine_mode,
+ enum rtx_code);
+extern void rs6000_emit_sCOND (enum rtx_code, rtx);
+extern void rs6000_emit_cbranch (enum rtx_code, rtx);
+extern char * output_cbranch (rtx, const char *, int, rtx);
+extern char * output_e500_flip_gt_bit (rtx, rtx);
+extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int);
+extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx);
+extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_emit_minmax (rtx, enum rtx_code, rtx, rtx);
+extern void rs6000_emit_sync (enum rtx_code, enum machine_mode,
+ rtx, rtx, rtx, rtx, bool);
+extern void rs6000_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_split_compare_and_swap (rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_expand_compare_and_swapqhi (rtx, rtx, rtx, rtx);
+extern void rs6000_split_compare_and_swapqhi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_split_lock_test_and_set (rtx, rtx, rtx, rtx);
+extern void rs6000_emit_swdivsf (rtx, rtx, rtx);
+extern void rs6000_emit_swdivdf (rtx, rtx, rtx);
+extern void output_toc (FILE *, rtx, int, enum machine_mode);
+extern void rs6000_initialize_trampoline (rtx, rtx, rtx);
+extern rtx rs6000_longcall_ref (rtx);
+extern void rs6000_fatal_bad_address (rtx);
+extern rtx create_TOC_reference (rtx);
+extern void rs6000_split_multireg_move (rtx, rtx);
+extern void rs6000_emit_move (rtx, rtx, enum machine_mode);
+extern rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
+extern rtx rs6000_legitimize_reload_address (rtx, enum machine_mode,
+ int, int, int, int *);
+extern int rs6000_legitimate_address (enum machine_mode, rtx, int);
+extern bool rs6000_legitimate_offset_address_p (enum machine_mode, rtx, int);
+extern bool rs6000_mode_dependent_address (rtx);
+extern bool rs6000_offsettable_memref_p (rtx);
+extern rtx rs6000_return_addr (int, rtx);
+extern void rs6000_output_symbol_ref (FILE*, rtx);
+extern HOST_WIDE_INT rs6000_initial_elimination_offset (int, int);
+
+extern rtx rs6000_machopic_legitimize_pic_address (rtx, enum machine_mode,
+ rtx);
+#endif /* RTX_CODE */
+
+#ifdef TREE_CODE
+extern unsigned int rs6000_special_round_type_align (tree, unsigned int,
+ unsigned int);
+/* APPLE LOCAL begin mainline 2006-10-31 PR 23067, radar 4869885 */
+extern unsigned int darwin_rs6000_special_round_type_align (tree, unsigned int,
+ unsigned int);
+/* APPLE LOCAL end mainline 2006-10-31 PR 23067, radar 4869885 */
+extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int, int);
+extern int function_arg_boundary (enum machine_mode, tree);
+extern rtx function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
+extern tree altivec_resolve_overloaded_builtin (tree, tree);
+extern rtx rs6000_function_value (tree, tree);
+extern rtx rs6000_libcall_value (enum machine_mode);
+extern rtx rs6000_va_arg (tree, tree);
+extern int function_ok_for_sibcall (tree);
+extern void rs6000_elf_declare_function_name (FILE *, const char *, tree);
+extern bool rs6000_elf_in_small_data_p (tree);
+#ifdef ARGS_SIZE_RTX
+/* expr.h defines ARGS_SIZE_RTX and `enum direction' */
+extern enum direction function_arg_padding (enum machine_mode, tree);
+#endif /* ARGS_SIZE_RTX */
+
+#endif /* TREE_CODE */
+
+extern void optimization_options (int, int);
+extern void rs6000_override_options (const char *);
+extern int direct_return (void);
+extern int first_reg_to_save (void);
+extern int first_fp_reg_to_save (void);
+extern void output_ascii (FILE *, const char *, int);
+extern void rs6000_gen_section_name (char **, const char *, const char *);
+extern void output_function_profiler (FILE *, int);
+extern void output_profile_hook (int);
+extern int rs6000_trampoline_size (void);
+extern int get_TOC_alias_set (void);
+extern void rs6000_emit_prologue (void);
+extern void rs6000_emit_load_toc_table (int);
+extern void rs6000_aix_emit_builtin_unwind_init (void);
+extern unsigned int rs6000_dbx_register_number (unsigned int);
+extern void rs6000_emit_epilogue (int);
+extern void rs6000_emit_eh_reg_restore (rtx, rtx);
+extern const char * output_isel (rtx *);
+extern int rs6000_register_move_cost (enum machine_mode,
+ enum reg_class, enum reg_class);
+extern int rs6000_memory_move_cost (enum machine_mode, enum reg_class, int);
+extern bool rs6000_tls_referenced_p (rtx);
+extern int rs6000_hard_regno_nregs (int, enum machine_mode);
+extern void rs6000_conditional_register_usage (void);
+/* APPLE LOCAL AltiVec */
+extern tree rs6000_fold_builtin (tree, tree, bool);
+/* APPLE LOCAL CW asm blocks */
+extern const char *rs6000_iasm_register_name (const char *, char *);
+/* APPLE LOCAL 3399553 */
+extern void rs6000_expand_flt_rounds (rtx);
+
+/* Declare functions in rs6000-c.c */
+
+extern void rs6000_pragma_longcall (struct cpp_reader *);
+extern void rs6000_cpu_cpp_builtins (struct cpp_reader *);
+
+/* APPLE LOCAL begin AltiVec */
+extern struct cpp_hashnode *rs6000_macro_to_expand (struct cpp_reader *,
+ const struct cpp_token *);
+/* APPLE LOCAL end AltiVec */
+
+#if TARGET_MACHO
+char *output_call (rtx, rtx *, int, int);
+#endif
+
+extern bool rs6000_hard_regno_mode_ok_p[][FIRST_PSEUDO_REGISTER];
+#endif /* rs6000-protos.h */
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.c b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.c
new file mode 100644
index 000000000..a26af4d31
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.c
@@ -0,0 +1,22252 @@
+/* Subroutines used for code generation on IBM RS/6000.
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "obstack.h"
+#include "tree.h"
+#include "expr.h"
+#include "optabs.h"
+#include "except.h"
+#include "function.h"
+#include "output.h"
+#include "basic-block.h"
+#include "integrate.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "hashtab.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "reload.h"
+#include "cfglayout.h"
+#include "sched-int.h"
+#include "tree-gimple.h"
+/* APPLE LOCAL 3893112 */
+#include "params.h"
+#include "intl.h"
+#include "params.h"
+#include "tm-constrs.h"
+#if TARGET_XCOFF
+#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
+#endif
+#if TARGET_MACHO
+#include "gstab.h" /* for N_SLINE */
+#endif
+
+/* APPLE LOCAL begin pascal strings */
+#include "../../libcpp/internal.h"
+extern struct cpp_reader* parse_in;
+/* APPLE LOCAL end pascal strings */
+
+/* APPLE LOCAL begin Macintosh alignment */
+#ifndef OPTION_ALIGN_MAC68K
+#define OPTION_ALIGN_MAC68K 0
+#endif
+/* APPLE LOCAL end Macintosh alignment */
+
+#ifndef TARGET_NO_PROTOTYPE
+#define TARGET_NO_PROTOTYPE 0
+#endif
+
+#define min(A,B) ((A) < (B) ? (A) : (B))
+#define max(A,B) ((A) > (B) ? (A) : (B))
+
+/* Structure used to define the rs6000 stack */
+typedef struct rs6000_stack {
+ int first_gp_reg_save; /* first callee saved GP register used */
+ int first_fp_reg_save; /* first callee saved FP register used */
+ int first_altivec_reg_save; /* first callee saved AltiVec register used */
+ int lr_save_p; /* true if the link reg needs to be saved */
+ int cr_save_p; /* true if the CR reg needs to be saved */
+ unsigned int vrsave_mask; /* mask of vec registers to save */
+ int push_p; /* true if we need to allocate stack space */
+ int calls_p; /* true if the function makes any calls */
+ int world_save_p; /* true if we're saving *everything*:
+ r13-r31, cr, f14-f31, vrsave, v20-v31 */
+ enum rs6000_abi abi; /* which ABI to use */
+ int gp_save_offset; /* offset to save GP regs from initial SP */
+ int fp_save_offset; /* offset to save FP regs from initial SP */
+ int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
+ int lr_save_offset; /* offset to save LR from initial SP */
+ int cr_save_offset; /* offset to save CR from initial SP */
+ int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
+ int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
+ int varargs_save_offset; /* offset to save the varargs registers */
+ int ehrd_offset; /* offset to EH return data */
+ int reg_size; /* register size (4 or 8) */
+ HOST_WIDE_INT vars_size; /* variable save area size */
+ int parm_size; /* outgoing parameter size */
+ int save_size; /* save area size */
+ int fixed_size; /* fixed size of stack frame */
+ int gp_size; /* size of saved GP registers */
+ int fp_size; /* size of saved FP registers */
+ int altivec_size; /* size of saved AltiVec registers */
+ int cr_size; /* size to hold CR if not in save_size */
+ int vrsave_size; /* size to hold VRSAVE if not in save_size */
+ int altivec_padding_size; /* size of altivec alignment padding if
+ not in save_size */
+ int spe_gp_size; /* size of 64-bit GPR save size for SPE */
+ int spe_padding_size;
+ HOST_WIDE_INT total_size; /* total bytes allocated for stack */
+ int spe_64bit_regs_used;
+} rs6000_stack_t;
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+typedef struct machine_function GTY(())
+{
+ /* Flags if __builtin_return_address (n) with n >= 1 was used. */
+ int ra_needs_full_frame;
+ /* Some local-dynamic symbol. */
+ const char *some_ld_name;
+ /* Whether the instruction chain has been scanned already. */
+ int insn_chain_scanned_p;
+ /* Flags if __builtin_return_address (0) was used. */
+ int ra_need_lr;
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ /* Substitute PIC register in leaf functions */
+ unsigned int substitute_pic_base_reg;
+ /* APPLE LOCAL end volatile pic base reg in leaves */
+ /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
+ varargs save area. */
+ HOST_WIDE_INT varargs_save_offset;
+} machine_function;
+
+/* Target cpu type */
+
+enum processor_type rs6000_cpu;
+struct rs6000_cpu_select rs6000_select[3] =
+{
+ /* switch name, tune arch */
+ { (const char *)0, "--with-cpu=", 1, 1 },
+ { (const char *)0, "-mcpu=", 1, 1 },
+ { (const char *)0, "-mtune=", 1, 0 },
+};
+
+/* APPLE LOCAL begin 5774356 */
+static int debug_sp_offset = 0;
+static int debug_vrsave_offset = 0;
+/* APPLE LOCAL end 5774356 */
+/* Always emit branch hint bits. */
+static GTY(()) bool rs6000_always_hint;
+
+/* Schedule instructions for group formation. */
+static GTY(()) bool rs6000_sched_groups;
+
+/* Support for -msched-costly-dep option. */
+const char *rs6000_sched_costly_dep_str;
+enum rs6000_dependence_cost rs6000_sched_costly_dep;
+
+/* Support for -minsert-sched-nops option. */
+const char *rs6000_sched_insert_nops_str;
+enum rs6000_nop_insertion rs6000_sched_insert_nops;
+
+/* Support targetm.vectorize.builtin_mask_for_load. */
+static GTY(()) tree altivec_builtin_mask_for_load;
+
+/* Size of long double. */
+int rs6000_long_double_type_size;
+
+/* IEEE quad extended precision long double. */
+int rs6000_ieeequad;
+
+/* Whether -mabi=altivec has appeared. */
+int rs6000_altivec_abi;
+
+/* Nonzero if we want SPE ABI extensions. */
+int rs6000_spe_abi;
+
+/* Nonzero if floating point operations are done in the GPRs. */
+int rs6000_float_gprs = 0;
+
+/* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
+int rs6000_darwin64_abi;
+
+/* Set to nonzero once AIX common-mode calls have been defined. */
+static GTY(()) int common_mode_defined;
+
+/* Save information from a "cmpxx" operation until the branch or scc is
+ emitted. */
+rtx rs6000_compare_op0, rs6000_compare_op1;
+int rs6000_compare_fp_p;
+
+/* Label number of label created for -mrelocatable, to call to so we can
+ get the address of the GOT section */
+int rs6000_pic_labelno;
+
+#ifdef USING_ELFOS_H
+/* Which abi to adhere to */
+const char *rs6000_abi_name;
+
+/* Semantics of the small data area */
+enum rs6000_sdata_type rs6000_sdata = SDATA_DATA;
+
+/* Which small data model to use */
+const char *rs6000_sdata_name = (char *)0;
+
+/* Counter for labels which are to be placed in .fixup. */
+int fixuplabelno = 0;
+#endif
+
+/* Bit size of immediate TLS offsets and string from which it is decoded. */
+int rs6000_tls_size = 32;
+const char *rs6000_tls_size_string;
+
+/* ABI enumeration available for subtarget to use. */
+enum rs6000_abi rs6000_current_abi;
+
+/* Whether to use variant of AIX ABI for PowerPC64 Linux. */
+int dot_symbols;
+
+/* Debug flags */
+const char *rs6000_debug_name;
+int rs6000_debug_stack; /* debug stack applications */
+int rs6000_debug_arg; /* debug argument handling */
+
+/* Value is TRUE if register/mode pair is acceptable. */
+bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+
+/* Built in types. */
+
+tree rs6000_builtin_types[RS6000_BTI_MAX];
+/* APPLE LOCAL begin AltiVec */
+/* NB: We do not store the PIM operations/predicates this array. */
+tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
+/* APPLE LOCAL end AltiVec */
+
+const char *rs6000_traceback_name;
+static enum {
+ traceback_default = 0,
+ traceback_none,
+ traceback_part,
+ traceback_full
+} rs6000_traceback;
+
+/* Flag to say the TOC is initialized */
+int toc_initialized;
+char toc_label_name[10];
+
+static GTY(()) section *read_only_data_section;
+static GTY(()) section *private_data_section;
+static GTY(()) section *read_only_private_data_section;
+static GTY(()) section *sdata2_section;
+static GTY(()) section *toc_section;
+
+/* Control alignment for fields within structures. */
+/* String from -malign-XXXXX. */
+int rs6000_alignment_flags;
+
+/* True for any options that were explicitly set. */
+struct {
+ bool aix_struct_ret; /* True if -maix-struct-ret was used. */
+ bool alignment; /* True if -malign- was used. */
+ bool abi; /* True if -mabi=spe/nospe was used. */
+ bool spe; /* True if -mspe= was used. */
+ bool float_gprs; /* True if -mfloat-gprs= was used. */
+ bool isel; /* True if -misel was used. */
+ bool long_double; /* True if -mlong-double- was used. */
+ bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
+} rs6000_explicit_options;
+
+struct builtin_description
+{
+ /* mask is not const because we're going to alter it below. This
+ nonsense will go away when we rewrite the -march infrastructure
+ to give us more target flag bits. */
+ unsigned int mask;
+ const enum insn_code icode;
+ const char *const name;
+ const enum rs6000_builtins code;
+};
+
+/* Target cpu costs. */
+
+struct processor_costs {
+ const int mulsi; /* cost of SImode multiplication. */
+ const int mulsi_const; /* cost of SImode multiplication by constant. */
+ const int mulsi_const9; /* cost of SImode mult by short constant. */
+ const int muldi; /* cost of DImode multiplication. */
+ const int divsi; /* cost of SImode division. */
+ const int divdi; /* cost of DImode division. */
+ const int fp; /* cost of simple SFmode and DFmode insns. */
+ const int dmul; /* cost of DFmode multiplication (and fmadd). */
+ const int sdiv; /* cost of SFmode division (fdivs). */
+ const int ddiv; /* cost of DFmode division (fdiv). */
+};
+
+const struct processor_costs *rs6000_cost;
+
+/* Processor costs (relative to an add) */
+
+/* Instruction size costs on 32bit processors. */
+static const
+struct processor_costs size32_cost = {
+ COSTS_N_INSNS (1), /* mulsi */
+ COSTS_N_INSNS (1), /* mulsi_const */
+ COSTS_N_INSNS (1), /* mulsi_const9 */
+ COSTS_N_INSNS (1), /* muldi */
+ COSTS_N_INSNS (1), /* divsi */
+ COSTS_N_INSNS (1), /* divdi */
+ COSTS_N_INSNS (1), /* fp */
+ COSTS_N_INSNS (1), /* dmul */
+ COSTS_N_INSNS (1), /* sdiv */
+ COSTS_N_INSNS (1), /* ddiv */
+};
+
+/* Instruction size costs on 64bit processors. */
+static const
+struct processor_costs size64_cost = {
+ COSTS_N_INSNS (1), /* mulsi */
+ COSTS_N_INSNS (1), /* mulsi_const */
+ COSTS_N_INSNS (1), /* mulsi_const9 */
+ COSTS_N_INSNS (1), /* muldi */
+ COSTS_N_INSNS (1), /* divsi */
+ COSTS_N_INSNS (1), /* divdi */
+ COSTS_N_INSNS (1), /* fp */
+ COSTS_N_INSNS (1), /* dmul */
+ COSTS_N_INSNS (1), /* sdiv */
+ COSTS_N_INSNS (1), /* ddiv */
+};
+
+/* Instruction costs on RIOS1 processors. */
+static const
+struct processor_costs rios1_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (2), /* fp */
+ COSTS_N_INSNS (2), /* dmul */
+ COSTS_N_INSNS (19), /* sdiv */
+ COSTS_N_INSNS (19), /* ddiv */
+};
+
+/* Instruction costs on RIOS2 processors. */
+static const
+struct processor_costs rios2_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (13), /* divsi */
+ COSTS_N_INSNS (13), /* divdi */
+ COSTS_N_INSNS (2), /* fp */
+ COSTS_N_INSNS (2), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+};
+
+/* Instruction costs on RS64A processors. */
+static const
+struct processor_costs rs64a_cost = {
+ COSTS_N_INSNS (20), /* mulsi */
+ COSTS_N_INSNS (12), /* mulsi_const */
+ COSTS_N_INSNS (8), /* mulsi_const9 */
+ COSTS_N_INSNS (34), /* muldi */
+ COSTS_N_INSNS (65), /* divsi */
+ COSTS_N_INSNS (67), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (31), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+};
+
+/* Instruction costs on MPCCORE processors. */
+static const
+struct processor_costs mpccore_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (6), /* divsi */
+ COSTS_N_INSNS (6), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (10), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+};
+
+/* Instruction costs on PPC403 processors. */
+static const
+struct processor_costs ppc403_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (33), /* divsi */
+ COSTS_N_INSNS (33), /* divdi */
+ COSTS_N_INSNS (11), /* fp */
+ COSTS_N_INSNS (11), /* dmul */
+ COSTS_N_INSNS (11), /* sdiv */
+ COSTS_N_INSNS (11), /* ddiv */
+};
+
+/* Instruction costs on PPC405 processors. */
+static const
+struct processor_costs ppc405_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (35), /* divsi */
+ COSTS_N_INSNS (35), /* divdi */
+ COSTS_N_INSNS (11), /* fp */
+ COSTS_N_INSNS (11), /* dmul */
+ COSTS_N_INSNS (11), /* sdiv */
+ COSTS_N_INSNS (11), /* ddiv */
+};
+
+/* Instruction costs on PPC440 processors. */
+static const
+struct processor_costs ppc440_cost = {
+ COSTS_N_INSNS (3), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (3), /* muldi */
+ COSTS_N_INSNS (34), /* divsi */
+ COSTS_N_INSNS (34), /* divdi */
+ COSTS_N_INSNS (5), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (19), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+};
+
+/* Instruction costs on PPC601 processors. */
+static const
+struct processor_costs ppc601_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (5), /* mulsi_const */
+ COSTS_N_INSNS (5), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (36), /* divsi */
+ COSTS_N_INSNS (36), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+};
+
+/* Instruction costs on PPC603 processors. */
+static const
+struct processor_costs ppc603_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (37), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+};
+
+/* Instruction costs on PPC604 processors. */
+static const
+struct processor_costs ppc604_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (20), /* divsi */
+ COSTS_N_INSNS (20), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+};
+
+/* Instruction costs on PPC604e processors. */
+static const
+struct processor_costs ppc604e_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (20), /* divsi */
+ COSTS_N_INSNS (20), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+};
+
+/* Instruction costs on PPC620 processors. */
+static const
+struct processor_costs ppc620_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (7), /* muldi */
+ COSTS_N_INSNS (21), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+};
+
+/* Instruction costs on PPC630 processors. */
+static const
+struct processor_costs ppc630_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (7), /* muldi */
+ COSTS_N_INSNS (21), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (21), /* ddiv */
+};
+
+/* Instruction costs on PPC750 and PPC7400 processors. */
+static const
+struct processor_costs ppc750_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (17), /* divsi */
+ COSTS_N_INSNS (17), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+};
+
+/* Instruction costs on PPC7450 processors. */
+static const
+struct processor_costs ppc7450_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (23), /* divsi */
+ COSTS_N_INSNS (23), /* divdi */
+ COSTS_N_INSNS (5), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (21), /* sdiv */
+ COSTS_N_INSNS (35), /* ddiv */
+};
+
+/* Instruction costs on PPC8540 processors. */
+static const
+struct processor_costs ppc8540_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (29), /* sdiv */
+ COSTS_N_INSNS (29), /* ddiv */
+};
+
+/* Instruction costs on POWER4 and POWER5 processors. */
+static const
+struct processor_costs power4_cost = {
+ COSTS_N_INSNS (3), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (18), /* divsi */
+ COSTS_N_INSNS (34), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+};
+
+
+static bool rs6000_function_ok_for_sibcall (tree, tree);
+static const char *rs6000_invalid_within_doloop (rtx);
+static rtx rs6000_generate_compare (enum rtx_code);
+static void rs6000_maybe_dead (rtx);
+static void rs6000_emit_stack_tie (void);
+static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx);
+static rtx spe_synthesize_frame_save (rtx);
+static bool spe_func_has_64bit_regs_p (void);
+static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int,
+ int, HOST_WIDE_INT);
+static rtx gen_frame_mem_offset (enum machine_mode, rtx, int);
+static void rs6000_emit_allocate_stack (HOST_WIDE_INT, int);
+static unsigned rs6000_hash_constant (rtx);
+static unsigned toc_hash_function (const void *);
+static int toc_hash_eq (const void *, const void *);
+static int constant_pool_expr_1 (rtx, int *, int *);
+static bool constant_pool_expr_p (rtx);
+static bool legitimate_small_data_p (enum machine_mode, rtx);
+static bool legitimate_indexed_address_p (rtx, int);
+static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
+static struct machine_function * rs6000_init_machine_status (void);
+static bool rs6000_assemble_integer (rtx, unsigned int, int);
+static bool no_global_regs_above (int);
+#ifdef HAVE_GAS_HIDDEN
+static void rs6000_assemble_visibility (tree, int);
+#endif
+static int rs6000_ra_ever_killed (void);
+static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
+static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
+static bool rs6000_ms_bitfield_layout_p (tree);
+static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
+static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
+/* APPLE LOCAL mangle_type 7105099 */
+static const char *rs6000_mangle_type (tree);
+extern const struct attribute_spec rs6000_attribute_table[];
+static void rs6000_set_default_type_attributes (tree);
+static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
+static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
+ tree);
+static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+static bool rs6000_return_in_memory (tree, tree);
+static void rs6000_file_start (void);
+#if TARGET_ELF
+static int rs6000_elf_reloc_rw_mask (void);
+static void rs6000_elf_asm_out_constructor (rtx, int);
+static void rs6000_elf_asm_out_destructor (rtx, int);
+static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
+static void rs6000_elf_asm_init_sections (void);
+static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
+static void rs6000_elf_encode_section_info (tree, rtx, int)
+ ATTRIBUTE_UNUSED;
+#endif
+static bool rs6000_use_blocks_for_constant_p (enum machine_mode, rtx);
+#if TARGET_XCOFF
+static void rs6000_xcoff_asm_output_anchor (rtx);
+static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
+static void rs6000_xcoff_asm_init_sections (void);
+static int rs6000_xcoff_reloc_rw_mask (void);
+static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
+static section *rs6000_xcoff_select_section (tree, int,
+ unsigned HOST_WIDE_INT);
+static void rs6000_xcoff_unique_section (tree, int);
+static section *rs6000_xcoff_select_rtx_section
+ (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
+static const char * rs6000_xcoff_strip_name_encoding (const char *);
+static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
+static void rs6000_xcoff_file_start (void);
+static void rs6000_xcoff_file_end (void);
+#endif
+/* APPLE LOCAL begin pragma reverse_bitfield */
+#if TARGET_MACHO
+static bool rs6000_reverse_bitfields_p (tree);
+#endif
+/* APPLE LOCAL end pragma reverse_bitfield */
+static int rs6000_variable_issue (FILE *, int, rtx, int);
+static bool rs6000_rtx_costs (rtx, int, int, int *);
+static int rs6000_adjust_cost (rtx, rtx, rtx, int);
+static bool is_microcoded_insn (rtx);
+static int is_dispatch_slot_restricted (rtx);
+static bool is_cracked_insn (rtx);
+static bool is_branch_slot_insn (rtx);
+static int rs6000_adjust_priority (rtx, int);
+static int rs6000_issue_rate (void);
+static bool rs6000_is_costly_dependence (rtx, rtx, rtx, int, int);
+static rtx get_next_active_insn (rtx, rtx);
+static bool insn_terminates_group_p (rtx , enum group_termination);
+static bool is_costly_group (rtx *, rtx);
+static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *);
+static int redefine_groups (FILE *, int, rtx, rtx);
+static int pad_groups (FILE *, int, rtx, rtx);
+static void rs6000_sched_finish (FILE *, int);
+static int rs6000_use_sched_lookahead (void);
+static tree rs6000_builtin_mask_for_load (void);
+
+static void def_builtin (int, const char *, tree, int);
+/* APPLE LOCAL mainline 4.2 5569774 */
+static bool rs6000_vector_alignment_reachable (tree, bool);
+static void rs6000_init_builtins (void);
+static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
+static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
+static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx);
+static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+static void altivec_init_builtins (void);
+static void rs6000_common_init_builtins (void);
+static void rs6000_init_libfuncs (void);
+
+static void enable_mask_for_builtins (struct builtin_description *, int,
+ enum rs6000_builtins,
+ enum rs6000_builtins);
+static tree build_opaque_vector_type (tree, int);
+static void spe_init_builtins (void);
+static rtx spe_expand_builtin (tree, rtx, bool *);
+static rtx spe_expand_stv_builtin (enum insn_code, tree);
+static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
+static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
+static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
+static rs6000_stack_t *rs6000_stack_info (void);
+static void debug_stack_info (rs6000_stack_t *);
+
+static rtx altivec_expand_builtin (tree, rtx, bool *);
+static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
+static rtx altivec_expand_st_builtin (tree, rtx, bool *);
+static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
+static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
+static rtx altivec_expand_predicate_builtin (enum insn_code,
+ const char *, tree, rtx);
+static rtx altivec_expand_lv_builtin (enum insn_code, tree, rtx);
+static rtx altivec_expand_stv_builtin (enum insn_code, tree);
+/* APPLE LOCAL begin AltiVec */
+static tree altivec_cov_rt_12 (tree, tree);
+static tree altivec_cov_rt_2p (tree);
+static tree altivec_cov_rt_1d (tree);
+static tree altivec_cov_rt_1h (tree);
+static struct altivec_pim_info *altivec_ovl_resolve (struct altivec_pim_info *,
+ tree, tree);
+static tree altivec_convert_args (tree, tree);
+/* APPLE LOCAL end AltiVec */
+static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
+static rtx altivec_expand_vec_set_builtin (tree);
+static rtx altivec_expand_vec_ext_builtin (tree, rtx);
+static int get_element_number (tree, tree);
+static bool rs6000_handle_option (size_t, const char *, int);
+static void rs6000_parse_tls_size_option (void);
+static void rs6000_parse_yes_no_option (const char *, const char *, int *);
+static int first_altivec_reg_to_save (void);
+static unsigned int compute_vrsave_mask (void);
+static void compute_save_world_info (rs6000_stack_t *info_ptr);
+static void is_altivec_return_reg (rtx, void *);
+static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
+int easy_vector_constant (rtx, enum machine_mode);
+static bool rs6000_is_opaque_type (tree);
+static rtx rs6000_dwarf_register_span (rtx);
+static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
+static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
+static rtx rs6000_tls_get_addr (void);
+static rtx rs6000_got_sym (void);
+static int rs6000_tls_symbol_ref_1 (rtx *, void *);
+static const char *rs6000_get_some_local_dynamic_name (void);
+static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
+static rtx rs6000_complex_function_value (enum machine_mode);
+static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
+ enum machine_mode, tree);
+static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ HOST_WIDE_INT, int);
+static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT,
+ rtx[], int *);
+static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT,
+ rtx[], int *);
+static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, tree, int, bool);
+static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
+static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
+static void setup_incoming_varargs (CUMULATIVE_ARGS *,
+ enum machine_mode, tree,
+ int *, int);
+/* APPLE LOCAL begin Altivec */
+static bool skip_vec_args (tree, int, int*);
+/* APPLE LOCAL end Altivec */
+static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static const char *invalid_arg_for_unprototyped_fn (tree, tree, tree);
+#if TARGET_MACHO
+static void macho_branch_islands (void);
+/* APPLE LOCAL 4380289 */
+static tree add_compiler_branch_island (tree, int);
+static int no_previous_def (tree function_name);
+static tree get_prev_label (tree function_name);
+static void rs6000_darwin_file_start (void);
+#endif
+
+static tree rs6000_build_builtin_va_list (void);
+static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool rs6000_must_pass_in_stack (enum machine_mode, tree);
+static bool rs6000_scalar_mode_supported_p (enum machine_mode);
+static bool rs6000_vector_mode_supported_p (enum machine_mode);
+static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
+ enum machine_mode);
+static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
+ enum machine_mode);
+static int get_vsel_insn (enum machine_mode);
+static void rs6000_emit_vector_select (rtx, rtx, rtx, rtx);
+static tree rs6000_stack_protect_fail (void);
+
+const int INSN_NOT_AVAILABLE = -1;
+static enum machine_mode rs6000_eh_return_filter_mode (void);
+
+/* Hash table stuff for keeping track of TOC entries. */
+
+struct toc_hash_struct GTY(())
+{
+ /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
+ ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
+ rtx key;
+ enum machine_mode key_mode;
+ int labelno;
+};
+
+static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
+
+/* Default register names. */
+char rs6000_reg_names[][8] =
+{
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "mq", "lr", "ctr","ap",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "xer",
+ /* AltiVec registers. */
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "vrsave", "vscr",
+ /* SPE registers. */
+ "spe_acc", "spefscr",
+ /* Soft frame pointer. */
+ "sfp"
+ /* APPLE LOCAL 3399553 */
+ , "fpscr"
+};
+
+#ifdef TARGET_REGNAMES
+static const char alt_reg_names[][8] =
+{
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+ "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
+ "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
+ "mq", "lr", "ctr", "ap",
+ "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
+ "xer",
+ /* AltiVec registers. */
+ "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
+ "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
+ "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
+ "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
+ "vrsave", "vscr",
+ /* SPE registers. */
+ "spe_acc", "spefscr",
+ /* Soft frame pointer. */
+ "sfp"
+ /* APPLE LOCAL 3399553 */
+ , "fpscr"
+};
+#endif
+
+#ifndef MASK_STRICT_ALIGN
+#define MASK_STRICT_ALIGN 0
+#endif
+#ifndef TARGET_PROFILE_KERNEL
+#define TARGET_PROFILE_KERNEL 0
+#endif
+
+/* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
+#define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
+#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
+#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
+
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
+
+/* Default unaligned ops are only provided for ELF. Find the ops needed
+ for non-ELF systems. */
+#ifndef OBJECT_FORMAT_ELF
+#if TARGET_XCOFF
+/* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
+ 64-bit targets. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
+#else
+/* For Darwin. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
+#endif
+#endif
+
+/* This hook deals with fixups for relocatable code and DI-mode objects
+ in 64-bit code. */
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER rs6000_assemble_integer
+
+#ifdef HAVE_GAS_HIDDEN
+#undef TARGET_ASM_ASSEMBLE_VISIBILITY
+#define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
+#endif
+
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS HAVE_AS_TLS
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
+
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
+#undef TARGET_SCHED_ADJUST_PRIORITY
+#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
+#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
+#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
+#undef TARGET_SCHED_FINISH
+#define TARGET_SCHED_FINISH rs6000_sched_finish
+
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
+
+#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
+#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
+
+/* APPLE LOCAL begin mainline 4.2 5569774 */
+#undef TARGET_VECTOR_ALIGNMENT_REACHABLE
+#define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
+/* APPLE LOCAL end mainline 4.2 5569774 */
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS rs6000_init_builtins
+
+
+/* APPLE LOCAL begin AltiVec */
+/* If we are running in Apple AltiVec (as opposed to FSF AltiVec) mode,
+ we will need to handle the Motorola PIM instructions ourselves instead
+ of relying on <altivec.h>. The rs6000_fold_builtin() routine will
+ rewrite the PIM instructions into the __builtin... (AldyVec)
+ instructions. */
+#undef TARGET_FOLD_BUILTIN
+#define TARGET_FOLD_BUILTIN rs6000_fold_builtin
+/* APPLE LOCAL end AltiVec */
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
+
+/* APPLE LOCAL begin mangle_type 7105099 */
+#undef TARGET_MANGLE_TYPE
+#define TARGET_MANGLE_TYPE rs6000_mangle_type
+/* APPLE LOCAL end mangle_type 7105099 */
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
+
+#if TARGET_MACHO
+#undef TARGET_BINDS_LOCAL_P
+#define TARGET_BINDS_LOCAL_P darwin_binds_local_p
+/* APPLE LOCAL begin pragma reverse_bitfields */
+#undef TARGET_REVERSE_BITFIELDS_P
+#define TARGET_REVERSE_BITFIELDS_P rs6000_reverse_bitfields_p
+/* APPLE LOCAL end pragma reverse_bitfields */
+#endif
+
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
+
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
+
+#undef TARGET_INVALID_WITHIN_DOLOOP
+#define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS rs6000_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST hook_int_rtx_0
+
+#undef TARGET_VECTOR_OPAQUE_P
+#define TARGET_VECTOR_OPAQUE_P rs6000_is_opaque_type
+
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
+
+/* On rs6000, function arguments are promoted, as are function return
+ values. */
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
+
+/* APPLE LOCAL begin Altivec */
+#undef TARGET_SKIP_VEC_ARGS
+#define TARGET_SKIP_VEC_ARGS skip_vec_args
+/* APPLE LOCAL end Altivec */
+
+/* Always strict argument naming on rs6000. */
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
+#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_SPLIT_COMPLEX_ARG
+#define TARGET_SPLIT_COMPLEX_ARG hook_bool_tree_true
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
+
+#undef TARGET_EH_RETURN_FILTER_MODE
+#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
+
+#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
+#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION rs6000_handle_option
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT)
+
+#undef TARGET_STACK_PROTECT_FAIL
+#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
+
+/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
+ The PowerPC architecture requires only weak consistency among
+ processors--that is, memory accesses between processors need not be
+ sequentially consistent and memory accesses among processors can occur
+ in any order. The ability to order memory accesses weakly provides
+ opportunities for more efficient use of the system bus. Unless a
+ dependency exists, the 604e allows read operations to precede store
+ operations. */
+#undef TARGET_RELAXED_ORDERING
+#define TARGET_RELAXED_ORDERING true
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
+#endif
+
+/* APPLE LOCAL begin radar 5155743, mainline candidate */
+#undef TARGET_HAVE_DYNAMIC_STACK_SPACE
+#define TARGET_HAVE_DYNAMIC_STACK_SPACE true
+/* APPLE LOCAL end radar 5155743, mainline candidate */
+/* Use a 32-bit anchor range. This leads to sequences like:
+
+ addis tmp,anchor,high
+ add dest,tmp,low
+
+ where tmp itself acts as an anchor, and can be shared between
+ accesses to the same 64k page. */
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode
+ MODE. */
+static int
+rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* The GPRs can hold any mode, but values bigger than one register
+ cannot go past R31. */
+ if (INT_REGNO_P (regno))
+ return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
+
+ /* The float registers can only hold floating modes and DImode.
+ This also excludes decimal float modes. */
+ if (FP_REGNO_P (regno))
+ return
+ (SCALAR_FLOAT_MODE_P (mode)
+ && !DECIMAL_FLOAT_MODE_P (mode)
+ && FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD);
+
+ /* The CR register can only hold CC modes. */
+ if (CR_REGNO_P (regno))
+ return GET_MODE_CLASS (mode) == MODE_CC;
+
+ if (XER_REGNO_P (regno))
+ return mode == PSImode;
+
+ /* AltiVec only in AldyVec registers. */
+ if (ALTIVEC_REGNO_P (regno))
+ return ALTIVEC_VECTOR_MODE (mode);
+
+ /* ...but GPRs can hold SIMD data on the SPE in one register. */
+ if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return 1;
+
+ /* We cannot put TImode anywhere except general register and it must be
+ able to fit within the register set. */
+
+ return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
+}
+
+/* Initialize rs6000_hard_regno_mode_ok_p table. */
+static void
+rs6000_init_hard_regno_mode_ok (void)
+{
+ int r, m;
+
+ for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
+ for (m = 0; m < NUM_MACHINE_MODES; ++m)
+ if (rs6000_hard_regno_mode_ok (r, m))
+ rs6000_hard_regno_mode_ok_p[m][r] = true;
+}
+
+/* APPLE LOCAL begin axe stubs 5571540 */
+#ifndef DARWIN_LINKER_GENERATES_ISLANDS
+#define DARWIN_LINKER_GENERATES_ISLANDS 0
+#endif
+
+/* KEXTs still need branch islands. */
+#define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
+ || flag_mkernel || flag_apple_kext \
+ || (!flag_pic && !MACHO_DYNAMIC_NO_PIC_P))
+/* APPLE LOCAL end axe stubs 5571540 */
+
+/* APPLE LOCAL begin mainline 2007-02-20 5005743 */ \
+#if TARGET_MACHO
+/* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
+
+static void
+darwin_rs6000_override_options (void)
+{
+ /* The Darwin ABI always includes AltiVec, can't be (validly) turned
+ off. */
+ rs6000_altivec_abi = 1;
+ TARGET_ALTIVEC_VRSAVE = 1;
+
+ /* APPLE LOCAL begin ARM 5683689 */
+ if (!darwin_macosx_version_min && !darwin_iphoneos_version_min)
+ darwin_macosx_version_min = "10.1";
+ /* APPLE LOCAL end ARM 5683689 */
+
+ /* APPLE LOCAL begin ARM 5683689 */
+ /* APPLE LOCAL begin constant cfstrings */
+ if (darwin_constant_cfstrings < 0)
+ darwin_constant_cfstrings =
+ darwin_iphoneos_version_min
+ || (strverscmp (darwin_macosx_version_min, "10.2") >= 0);
+ /* APPLE LOCAL end constant cfstrings */
+ /* APPLE LOCAL end ARM 5683689 */
+
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ {
+ if (flag_pic)
+ warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
+ flag_pic = 0;
+ }
+ else if (flag_pic == 1)
+ {
+ flag_pic = 2;
+ }
+ /* APPLE LOCAL begin longcall */
+ if (TARGET_64BIT && TARGET_MACHO)
+ rs6000_default_long_calls = 0;
+ /* APPLE LOCAL end longcall */
+ }
+ if (TARGET_64BIT && ! TARGET_POWERPC64)
+ {
+ target_flags |= MASK_POWERPC64;
+ warning (0, "-m64 requires PowerPC64 architecture, enabling");
+ }
+ if (flag_mkernel)
+ /* APPLE LOCAL begin 5731065 */
+ /* Moved setting of SOFT_FLOAT into rs6000_handle_option. */
+ rs6000_default_long_calls = 1;
+ /* APPLE LOCAL end 5731065 */
+
+ /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
+ Altivec. */
+ if (!flag_mkernel && !flag_apple_kext
+ && TARGET_64BIT
+ && ! (target_flags_explicit & MASK_ALTIVEC))
+ target_flags |= MASK_ALTIVEC;
+
+ /* Unless the user (not the configurer) has explicitly overridden
+ it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
+ G4 unless targetting the kernel. */
+ if (!flag_mkernel
+ && !flag_apple_kext
+ /* APPLE LOCAL ARM 5683689 */
+ && darwin_macosx_version_min
+ && strverscmp (darwin_macosx_version_min, "10.5") >= 0
+ && ! (target_flags_explicit & MASK_ALTIVEC)
+ && ! rs6000_select[1].string)
+ {
+ target_flags |= MASK_ALTIVEC;
+ }
+
+ /* APPLE LOCAL begin axe stubs 5571540 */
+ /* I'm not sure if the branch island code needs stubs or not, so
+ assume they do. */
+ if (DARWIN_GENERATE_ISLANDS)
+ darwin_stubs = true;
+ /* APPLE LOCAL end axe stubs 5571540 */
+ /* APPLE LOCAL begin stack-protector default 5095227 */
+ /* Don't enable -fstack-protector by default for kexts on darwin ppc
+ targeting 10.6 because there's a bug that it exposes in some kext,
+ <rdar://problem/6034665>. */
+ if (flag_stack_protect == -1
+ && (flag_mkernel || flag_apple_kext)
+ && strcmp (darwin_macosx_version_min, "10.6") == 0)
+ flag_stack_protect = 0;
+ /* APPLE LOCAL end stack-protector default 5095227 */
+}
+#endif
+
+/* APPLE LOCAL end mainline 2007-02-20 5005743 */ \
+/* If not otherwise specified by a target, make 'long double' equivalent to
+ 'double'. */
+
+#ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
+#endif
+
+/* Override command line options. Mostly we process the processor
+ type and sometimes adjust other TARGET_ options. */
+
+void
+rs6000_override_options (const char *default_cpu)
+{
+ size_t i, j;
+ struct rs6000_cpu_select *ptr;
+ int set_masks;
+ /* APPLE LOCAL -fast or -fastf or -fastcp */
+ enum processor_type mcpu_cpu = PROCESSOR_POWER4;
+
+ /* Simplifications for entries below. */
+
+ enum {
+ POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
+ POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
+ };
+
+ /* This table occasionally claims that a processor does not support
+ a particular feature even though it does, but the feature is slower
+ than the alternative. Thus, it shouldn't be relied on as a
+ complete description of the processor's support.
+
+ Please keep this list in order, and don't forget to update the
+ documentation in invoke.texi when adding a new processor or
+ flag. */
+ static struct ptt
+ {
+ const char *const name; /* Canonical processor name. */
+ const enum processor_type processor; /* Processor type enum value. */
+ const int target_enable; /* Target flags to enable. */
+ } const processor_target_table[]
+ = {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"403", PROCESSOR_PPC403,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
+ {"405", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"405fp", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"440", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"440fp", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
+ {"601", PROCESSOR_PPC601,
+ MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
+ {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"620", PROCESSOR_PPC620,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"630", PROCESSOR_PPC630,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK},
+ {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK},
+ {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"8540", PROCESSOR_PPC8540,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_STRICT_ALIGN},
+ /* 8548 has a dummy entry for now. */
+ {"8548", PROCESSOR_PPC8540,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_STRICT_ALIGN},
+ {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"970", PROCESSOR_POWER4,
+ POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
+ {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK},
+ {"G5", PROCESSOR_POWER4,
+ POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"power2", PROCESSOR_POWER,
+ MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
+ {"power3", PROCESSOR_PPC630,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"power4", PROCESSOR_POWER4,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"power5", PROCESSOR_POWER5,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB},
+ {"power5+", PROCESSOR_POWER5,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
+ {"power6", PROCESSOR_POWER5,
+ POWERPC_7400_MASK | MASK_POWERPC64 | MASK_MFCRF | MASK_POPCNTB
+ | MASK_FPRND},
+ {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
+ {"powerpc64", PROCESSOR_POWERPC64,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rios2", PROCESSOR_RIOS2,
+ MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
+ {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rs64", PROCESSOR_RS64A,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
+ };
+
+ const size_t ptt_size = ARRAY_SIZE (processor_target_table);
+
+ /* APPLE LOCAL begin -mmultiple/-mstring fixme */
+ /* Save current -mmultiple/-mno-multiple status. */
+ int multiple = (target_flags & MASK_MULTIPLE);
+ /* Save current -mstring/-mno-string status. */
+ int string = (target_flags & MASK_STRING);
+ /* APPLE LOCAL end -mmultiple/-mstring fixme */
+
+ /* Some OSs don't support saving the high part of 64-bit registers on
+ context switch. Other OSs don't support saving Altivec registers.
+ On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
+ settings; if the user wants either, the user must explicitly specify
+ them and we won't interfere with the user's specification. */
+
+ enum {
+ POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
+ POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
+ | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
+ | MASK_DLMZB)
+ };
+
+ rs6000_init_hard_regno_mode_ok ();
+
+ set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
+#ifdef OS_MISSING_POWERPC64
+ if (OS_MISSING_POWERPC64)
+ set_masks &= ~MASK_POWERPC64;
+#endif
+#ifdef OS_MISSING_ALTIVEC
+ if (OS_MISSING_ALTIVEC)
+ set_masks &= ~MASK_ALTIVEC;
+#endif
+
+ /* Don't override by the processor default if given explicitly. */
+ set_masks &= ~target_flags_explicit;
+
+ /* Identify the processor type. */
+ rs6000_select[0].string = default_cpu;
+ rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
+
+ /* APPLE LOCAL begin -fast or -fastf or -fastcp */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ if (rs6000_select[1].string == (char *)0 && rs6000_select[2].string == (char *)0)
+ {
+ /* -mcpu and -mtune unspecified. Assume both are G5 */
+ rs6000_select[1].string = "G5";
+ rs6000_select[2].string = "G5";
+ }
+ }
+ /* APPLE LOCAL end -fast or -fastf or -fastcp */
+
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ {
+ ptr = &rs6000_select[i];
+ if (ptr->string != (char *)0 && ptr->string[0] != '\0')
+ {
+ for (j = 0; j < ptt_size; j++)
+ if (! strcmp (ptr->string, processor_target_table[j].name))
+ {
+ if (ptr->set_tune_p)
+ rs6000_cpu = processor_target_table[j].processor;
+
+ if (ptr->set_arch_p)
+ {
+ target_flags &= ~set_masks;
+ target_flags |= (processor_target_table[j].target_enable
+ & set_masks);
+ /* APPLE LOCAL begin -fast or -fastf or -fastcp */
+ mcpu_cpu = processor_target_table[j].processor;
+ /* APPLE LOCAL end -fast or -fastf or -fastcp */
+ }
+ break;
+ }
+
+ if (j == ptt_size)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* APPLE LOCAL begin AltiVec */
+ /* If '-maltivec' has been specified or if anything else turns on
+ AltiVec, enable AltiVec optimizations, even if previously turned
+ off via '-faltivec'. */
+ if (TARGET_ALTIVEC)
+ flag_disable_opts_for_faltivec = 0;
+
+ /* Handle -m(no-)pim-altivec. */
+ if (rs6000_altivec_pim)
+ {
+ /* If '-faltivec' or '-mpim-altivec' has been specified and we
+ have not already selected AltiVec codegen, disable certain
+ unsafe AltiVec optimizations so that the resulting binary can
+ run on a G3. These may be re-enabled by subsequently
+ specifying '-maltivec' or '-mcpu=xxx', where xxx supports
+ AltiVec instructions. */
+ if (! TARGET_ALTIVEC)
+ {
+ flag_disable_opts_for_faltivec = 1;
+ /* APPLE LOCAL radar 4161346 */
+ target_flags |= MASK_ALTIVEC;
+ }
+ /* APPLE LOCAL radar 5822514 */
+ target_flags |= MASK_PIM_ALTIVEC;
+ }
+ /* APPLE LOCAL end AltiVec */
+
+ /* APPLE LOCAL begin -fast or -fastf or -fastcp */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ flag_gcse_sm = 1;
+ rs6000_sched_insert_nops = sched_finish_regroup_exact;
+ flag_unroll_loops = 1;
+ flag_tree_loop_linear = 1;
+ flag_strict_aliasing = 1;
+ flag_schedule_interblock = 1;
+ flag_gcse_las = 1;
+ align_jumps_max_skip = 15;
+ align_loops_max_skip = 15;
+ align_functions = 16;
+ align_loops = 16;
+ align_jumps = 16;
+ set_fast_math_flags (1);
+ flag_reorder_blocks = 1;
+ if (flag_branch_probabilities && !flag_exceptions)
+ flag_reorder_blocks_and_partition = 1;
+ if (!flag_pic)
+ target_flags |= MASK_MACHO_DYNAMIC_NO_PIC;
+
+ if (mcpu_cpu == PROCESSOR_POWER4)
+ {
+ target_flags |= MASK_POWERPC;
+ target_flags_explicit |= MASK_POWERPC;
+ }
+ if (flag_fast || flag_fastcp)
+ /* This doesn't work with NAG Fortran output. The gcc 3.5 C++ libraries
+ have been adjusted so that it now works with them. */
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ if (flag_fastf)
+ /* This applies Fortran argument semantics; for NAG Fortran output only. */
+ flag_argument_noalias = 2;
+ /* IMI flags */
+ disable_typechecking_for_spec_flag = 1;
+ flag_unit_at_a_time = 1;
+ }
+
+ /* rs6000_init_hard_regno_mode_ok must come AFTER setting of -fast flags. */
+ rs6000_init_hard_regno_mode_ok ();
+
+ /* APPLE LOCAL end -fast or -fastf or -fastcp */
+
+ if (TARGET_E500)
+ rs6000_isel = 1;
+
+ /* APPLE LOCAL begin Disable string insns with -Os on Darwin (radar 3509006) */
+ /* If we are optimizing big endian systems for space, use the load/store
+ multiple instructions. */
+ if (BYTES_BIG_ENDIAN && optimize_size)
+ target_flags |= ~target_flags_explicit & MASK_MULTIPLE;
+
+ /* If we are optimizing big endian systems for space, use the
+ string instructions. But do not do this for Darwin, as the
+ kernel can't properly support some hardware that doesn't have
+ these instructions. It's not clear that the compiler is the
+ right place to fix this, but that's how it is for now. See
+ *extensive* discussion in Radar 3509006. */
+ if (BYTES_BIG_ENDIAN && optimize_size && DEFAULT_ABI != ABI_DARWIN)
+ target_flags |= MASK_STRING;
+ /* APPLE LOCAL end Disable string insns with -Os on Darwin (radar 3509006) */
+
+ /* APPLE LOCAL begin -mmultiple/-mstring fixme */
+ /* If -mmultiple or -mno-multiple was explicitly used, don't
+ override with the processor default */
+ if ((target_flags_explicit & MASK_MULTIPLE) != 0)
+ target_flags = (target_flags & ~MASK_MULTIPLE) | multiple;
+
+ /* If -mstring or -mno-string was explicitly used, don't override
+ with the processor default. */
+ if ((target_flags_explicit & MASK_STRING) != 0)
+ target_flags = (target_flags & ~MASK_STRING) | string;
+ /* APPLE LOCAL end -mmultiple/-mstring fixme */
+
+ /* Don't allow -mmultiple or -mstring on little endian systems
+ unless the cpu is a 750, because the hardware doesn't support the
+ instructions used in little endian mode, and causes an alignment
+ trap. The 750 does not cause an alignment trap (except when the
+ target is unaligned). */
+
+ if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
+ {
+ if (TARGET_MULTIPLE)
+ {
+ target_flags &= ~MASK_MULTIPLE;
+ if ((target_flags_explicit & MASK_MULTIPLE) != 0)
+ warning (0, "-mmultiple is not supported on little endian systems");
+ }
+
+ if (TARGET_STRING)
+ {
+ target_flags &= ~MASK_STRING;
+ if ((target_flags_explicit & MASK_STRING) != 0)
+ warning (0, "-mstring is not supported on little endian systems");
+ }
+ }
+
+ /* Set debug flags */
+ if (rs6000_debug_name)
+ {
+ if (! strcmp (rs6000_debug_name, "all"))
+ rs6000_debug_stack = rs6000_debug_arg = 1;
+ else if (! strcmp (rs6000_debug_name, "stack"))
+ rs6000_debug_stack = 1;
+ else if (! strcmp (rs6000_debug_name, "arg"))
+ rs6000_debug_arg = 1;
+ else
+ error ("unknown -mdebug-%s switch", rs6000_debug_name);
+ }
+
+ if (rs6000_traceback_name)
+ {
+ if (! strncmp (rs6000_traceback_name, "full", 4))
+ rs6000_traceback = traceback_full;
+ else if (! strncmp (rs6000_traceback_name, "part", 4))
+ rs6000_traceback = traceback_part;
+ else if (! strncmp (rs6000_traceback_name, "no", 2))
+ rs6000_traceback = traceback_none;
+ else
+ error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
+ rs6000_traceback_name);
+ }
+
+ if (!rs6000_explicit_options.long_double)
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+
+#ifndef POWERPC_LINUX
+ if (!rs6000_explicit_options.ieee)
+ rs6000_ieeequad = 1;
+#endif
+
+ /* Set Altivec ABI as default for powerpc64 linux. */
+ if (TARGET_ELF && TARGET_64BIT)
+ {
+ rs6000_altivec_abi = 1;
+ TARGET_ALTIVEC_VRSAVE = 1;
+ }
+
+ /* Set the Darwin64 ABI as default for 64-bit Darwin. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ {
+ rs6000_darwin64_abi = 1;
+#if TARGET_MACHO
+ darwin_one_byte_bool = 1;
+ /* APPLE LOCAL pragma reverse_bitfields */
+ darwin_reverse_bitfields = 0;
+#endif
+ /* Default to natural alignment, for better performance. */
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ }
+
+ /* Place FP constants in the constant pool instead of TOC
+ if section anchors enabled. */
+ if (flag_section_anchors)
+ TARGET_NO_FP_IN_TOC = 1;
+
+ /* Handle -mtls-size option. */
+ rs6000_parse_tls_size_option ();
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
+ SUBSUBTARGET_OVERRIDE_OPTIONS;
+#endif
+#ifdef SUB3TARGET_OVERRIDE_OPTIONS
+ SUB3TARGET_OVERRIDE_OPTIONS;
+#endif
+
+ if (TARGET_E500)
+ {
+ if (TARGET_ALTIVEC)
+ error ("AltiVec and E500 instructions cannot coexist");
+
+ /* The e500 does not have string instructions, and we set
+ MASK_STRING above when optimizing for size. */
+ if ((target_flags & MASK_STRING) != 0)
+ target_flags = target_flags & ~MASK_STRING;
+ }
+ else if (rs6000_select[1].string != NULL)
+ {
+ /* For the powerpc-eabispe configuration, we set all these by
+ default, so let's unset them if we manually set another
+ CPU that is not the E500. */
+ if (!rs6000_explicit_options.abi)
+ rs6000_spe_abi = 0;
+ if (!rs6000_explicit_options.spe)
+ rs6000_spe = 0;
+ if (!rs6000_explicit_options.float_gprs)
+ rs6000_float_gprs = 0;
+ if (!rs6000_explicit_options.isel)
+ rs6000_isel = 0;
+ if (!rs6000_explicit_options.long_double)
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ }
+
+ rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
+ && rs6000_cpu != PROCESSOR_POWER5);
+ rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
+ || rs6000_cpu == PROCESSOR_POWER5);
+
+ rs6000_sched_restricted_insns_priority
+ = (rs6000_sched_groups ? 1 : 0);
+
+ /* APPLE LOCAL begin only consider true dependency for grouping */
+ /* Handle -msched-costly-dep option. */
+ rs6000_sched_costly_dep
+ = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
+ /* APPLE LOCAL end only consider true dependency for grouping */
+
+ if (rs6000_sched_costly_dep_str)
+ {
+ if (! strcmp (rs6000_sched_costly_dep_str, "no"))
+ rs6000_sched_costly_dep = no_dep_costly;
+ else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
+ rs6000_sched_costly_dep = all_deps_costly;
+ else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
+ rs6000_sched_costly_dep = true_store_to_load_dep_costly;
+ else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
+ rs6000_sched_costly_dep = store_to_load_dep_costly;
+ else
+ rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
+ }
+
+ /* Handle -minsert-sched-nops option. */
+ rs6000_sched_insert_nops
+ = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
+
+ if (rs6000_sched_insert_nops_str)
+ {
+ if (! strcmp (rs6000_sched_insert_nops_str, "no"))
+ rs6000_sched_insert_nops = sched_finish_none;
+ else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
+ rs6000_sched_insert_nops = sched_finish_pad_groups;
+ else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
+ rs6000_sched_insert_nops = sched_finish_regroup_exact;
+ else
+ rs6000_sched_insert_nops = atoi (rs6000_sched_insert_nops_str);
+ }
+
+#ifdef TARGET_REGNAMES
+ /* If the user desires alternate register names, copy in the
+ alternate names now. */
+ if (TARGET_REGNAMES)
+ memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
+#endif
+
+ /* Set aix_struct_return last, after the ABI is determined.
+ If -maix-struct-return or -msvr4-struct-return was explicitly
+ used, don't override with the ABI default. */
+ if (!rs6000_explicit_options.aix_struct_ret)
+ aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
+
+ if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
+ REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
+
+ if (TARGET_TOC)
+ ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
+
+ /* We can only guarantee the availability of DI pseudo-ops when
+ assembling for 64-bit targets. */
+ if (!TARGET_64BIT)
+ {
+ targetm.asm_out.aligned_op.di = NULL;
+ targetm.asm_out.unaligned_op.di = NULL;
+ }
+
+ /* Set branch target alignment, if not optimizing for size. */
+ if (!optimize_size)
+ {
+ if (rs6000_sched_groups)
+ {
+ if (align_functions <= 0)
+ align_functions = 16;
+ if (align_jumps <= 0)
+ align_jumps = 16;
+ if (align_loops <= 0)
+ align_loops = 16;
+ }
+ if (align_jumps_max_skip <= 0)
+ align_jumps_max_skip = 15;
+ if (align_loops_max_skip <= 0)
+ align_loops_max_skip = 15;
+ }
+
+ /* Arrange to save and restore machine status around nested functions. */
+ init_machine_status = rs6000_init_machine_status;
+
+ /* We should always be splitting complex arguments, but we can't break
+ Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
+ if (DEFAULT_ABI != ABI_AIX)
+ targetm.calls.split_complex_arg = NULL;
+
+ /* APPLE LOCAL begin AltiVec */
+ /* Enable '(vector signed int)(a, b, c, d)' vector literal notation. */
+ if (rs6000_altivec_pim)
+ targetm.cast_expr_as_vector_init = true;
+ /* APPLE LOCAL end AltiVec */
+
+ /* Initialize rs6000_cost with the appropriate target costs. */
+ if (optimize_size)
+ rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
+ else
+ switch (rs6000_cpu)
+ {
+ case PROCESSOR_RIOS1:
+ rs6000_cost = &rios1_cost;
+ break;
+
+ case PROCESSOR_RIOS2:
+ rs6000_cost = &rios2_cost;
+ break;
+
+ case PROCESSOR_RS64A:
+ rs6000_cost = &rs64a_cost;
+ break;
+
+ case PROCESSOR_MPCCORE:
+ rs6000_cost = &mpccore_cost;
+ break;
+
+ case PROCESSOR_PPC403:
+ rs6000_cost = &ppc403_cost;
+ break;
+
+ case PROCESSOR_PPC405:
+ rs6000_cost = &ppc405_cost;
+ break;
+
+ case PROCESSOR_PPC440:
+ rs6000_cost = &ppc440_cost;
+ break;
+
+ case PROCESSOR_PPC601:
+ rs6000_cost = &ppc601_cost;
+ break;
+
+ case PROCESSOR_PPC603:
+ rs6000_cost = &ppc603_cost;
+ break;
+
+ case PROCESSOR_PPC604:
+ rs6000_cost = &ppc604_cost;
+ break;
+
+ case PROCESSOR_PPC604e:
+ rs6000_cost = &ppc604e_cost;
+ break;
+
+ case PROCESSOR_PPC620:
+ rs6000_cost = &ppc620_cost;
+ break;
+
+ case PROCESSOR_PPC630:
+ rs6000_cost = &ppc630_cost;
+ break;
+
+ case PROCESSOR_PPC750:
+ case PROCESSOR_PPC7400:
+ rs6000_cost = &ppc750_cost;
+ break;
+
+ case PROCESSOR_PPC7450:
+ rs6000_cost = &ppc7450_cost;
+ break;
+
+ case PROCESSOR_PPC8540:
+ rs6000_cost = &ppc8540_cost;
+ break;
+
+ case PROCESSOR_POWER4:
+ case PROCESSOR_POWER5:
+ rs6000_cost = &power4_cost;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Implement targetm.vectorize.builtin_mask_for_load. */
+static tree
+rs6000_builtin_mask_for_load (void)
+{
+ if (TARGET_ALTIVEC)
+ return altivec_builtin_mask_for_load;
+ else
+ return 0;
+}
+
+/* APPLE LOCAL begin mainline 4.2 5569774 */
+/* Return true iff, data reference of TYPE can reach vector alignment (16)
+ after applying N number of iterations. This routine does not determine
+ how may iterations are required to reach desired alignment. */
+
+static bool
+rs6000_vector_alignment_reachable (tree type ATTRIBUTE_UNUSED, bool is_packed)
+{
+ if (is_packed)
+ return false;
+
+ if (TARGET_32BIT)
+ {
+ if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
+ return true;
+
+ if (rs6000_alignment_flags == MASK_ALIGN_POWER)
+ return true;
+
+ return false;
+ }
+ else
+ {
+ if (TARGET_MACHO)
+ /* APPLE LOCAL 5643197 */
+ return (rs6000_alignment_flags == MASK_ALIGN_NATURAL);
+
+ /* Assuming that all other types are naturally aligned. CHECKME! */
+ return true;
+ }
+}
+/* APPLE LOCAL end mainline 4.2 5569774 */
+
+/* Handle generic options of the form -mfoo=yes/no.
+ NAME is the option name.
+ VALUE is the option value.
+ FLAG is the pointer to the flag where to store a 1 or 0, depending on
+ whether the option value is 'yes' or 'no' respectively. */
+static void
+rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
+{
+ if (value == 0)
+ return;
+ else if (!strcmp (value, "yes"))
+ *flag = 1;
+ else if (!strcmp (value, "no"))
+ *flag = 0;
+ else
+ error ("unknown -m%s= option specified: '%s'", name, value);
+}
+
+/* Validate and record the size specified with the -mtls-size option. */
+
+static void
+rs6000_parse_tls_size_option (void)
+{
+ if (rs6000_tls_size_string == 0)
+ return;
+ else if (strcmp (rs6000_tls_size_string, "16") == 0)
+ rs6000_tls_size = 16;
+ else if (strcmp (rs6000_tls_size_string, "32") == 0)
+ rs6000_tls_size = 32;
+ else if (strcmp (rs6000_tls_size_string, "64") == 0)
+ rs6000_tls_size = 64;
+ else
+ error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string);
+}
+
+/* APPLE LOCAL begin outwit script - cvs diff is inconsistent about
+ which of the }'s in the next 2 functions represents a local change */
+void
+optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
+{
+ /* APPLE LOCAL begin tweak default optimizations */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Turn these on only if specifically requested, not with -O* */
+ /* Strict aliasing breaks too much existing code */
+ flag_strict_aliasing = 0;
+ /* Block reordering causes code bloat, and very little speedup */
+ flag_reorder_blocks = 0;
+ /* Multi-basic-block scheduling loses badly when the compiler
+ misguesses which blocks are going to be executed, more than
+ it gains when it guesses correctly. Its guesses for cases
+ where interblock scheduling occurs (if-then-else's) are
+ little better than random, so disable this unless requested. */
+ flag_schedule_interblock = 0;
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+ /* APPLE LOCAL begin 3893112 */
+ /* This value may be temporary; dje will have an opinion at some point.
+ 36 is what x86 uses and ppc should be at least as big. */
+ set_param_value ("sra-max-structure-size", 36);
+ /* Another parameter has been added in mainline, which by default
+ should be parallel to the one above. */
+ set_param_value ("sra-max-structure-count",
+ SRA_MAX_STRUCTURE_SIZE / UNITS_PER_WORD);
+ /* APPLE LOCAL end 3893112 */
+ }
+ /* APPLE LOCAL end tweak default optimizations */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+
+ /* Double growth factor to counter reduced min jump length. */
+ set_param_value ("max-grow-copy-bb-insns", 16);
+
+ /* Enable section anchors by default.
+ Skip section anchors for Objective C and Objective C++
+ until front-ends fixed. */
+ if (!TARGET_MACHO && lang_hooks.name[4] != 'O')
+ flag_section_anchors = 1;
+}
+
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+/* Version of the above for use from #pragma optimization_level.
+ Do not reset things unless they're per-function. */
+#if TARGET_MACHO
+void
+/* APPLE LOCAL begin 4760857 optimization pragmas */
+reset_optimization_options (int level ATTRIBUTE_UNUSED, int size)
+/* APPLE LOCAL end 4760857 optimization pragmas */
+{
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Block reordering causes code bloat, and very little speedup */
+ flag_reorder_blocks = 0;
+ /* Multi-basic-block scheduling loses badly when the compiler
+ misguesses which blocks are going to be executed, more than
+ it gains when it guesses correctly. Its guesses for cases
+ where interblock scheduling occurs (if-then-else's) are
+ little better than random, so disable this unless requested. */
+ flag_schedule_interblock = 0;
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+ }
+ /* APPLE LOCAL begin 4760857 optimization pragmas */
+ /* Set branch target alignment, if not optimizing for size. */
+ if (!size)
+ {
+ if (rs6000_sched_groups)
+ {
+ if (align_jumps <= 0)
+ align_jumps = 16;
+ if (align_loops <= 0)
+ align_loops = 16;
+ }
+ if (align_jumps_max_skip <= 0)
+ align_jumps_max_skip = 15;
+ if (align_loops_max_skip <= 0)
+ align_loops_max_skip = 15;
+ }
+ /* APPLE LOCAL end 4760857 optimization pragmas */
+}
+#endif
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
+/* APPLE LOCAL end outwit script */
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+rs6000_handle_option (size_t code, const char *arg, int value)
+{
+ switch (code)
+ {
+ case OPT_mno_power:
+ target_flags &= ~(MASK_POWER | MASK_POWER2
+ | MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_POWER | MASK_POWER2
+ | MASK_MULTIPLE | MASK_STRING);
+ break;
+ case OPT_mno_powerpc:
+ target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
+ | MASK_PPC_GFXOPT | MASK_POWERPC64);
+ target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
+ | MASK_PPC_GFXOPT | MASK_POWERPC64);
+ break;
+ case OPT_mfull_toc:
+ target_flags &= ~MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+#ifdef TARGET_USES_SYSV4_OPT
+ /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
+ just the same as -mminimal-toc. */
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+#endif
+ break;
+
+#ifdef TARGET_USES_SYSV4_OPT
+ case OPT_mtoc:
+ /* Make -mtoc behave like -mminimal-toc. */
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+ break;
+#endif
+
+#ifdef TARGET_USES_AIX64_OPT
+ case OPT_maix64:
+#else
+ case OPT_m64:
+#endif
+ target_flags |= MASK_POWERPC64 | MASK_POWERPC;
+ target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
+ target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
+ break;
+
+#ifdef TARGET_USES_AIX64_OPT
+ case OPT_maix32:
+#else
+ case OPT_m32:
+#endif
+ target_flags &= ~MASK_POWERPC64;
+ target_flags_explicit |= MASK_POWERPC64;
+ break;
+
+ case OPT_minsert_sched_nops_:
+ rs6000_sched_insert_nops_str = arg;
+ break;
+
+ case OPT_mminimal_toc:
+ if (value == 1)
+ {
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ }
+ break;
+
+ case OPT_mpower:
+ if (value == 1)
+ {
+ target_flags |= (MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
+ }
+ break;
+
+ case OPT_mpower2:
+ if (value == 1)
+ {
+ target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
+ }
+ break;
+
+ case OPT_mpowerpc_gpopt:
+ case OPT_mpowerpc_gfxopt:
+ if (value == 1)
+ {
+ target_flags |= MASK_POWERPC;
+ target_flags_explicit |= MASK_POWERPC;
+ }
+ break;
+
+ case OPT_maix_struct_return:
+ case OPT_msvr4_struct_return:
+ rs6000_explicit_options.aix_struct_ret = true;
+ break;
+
+ case OPT_mvrsave_:
+ rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
+ break;
+
+ case OPT_misel_:
+ rs6000_explicit_options.isel = true;
+ rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel));
+ break;
+
+ case OPT_mspe_:
+ rs6000_explicit_options.spe = true;
+ rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
+ /* No SPE means 64-bit long doubles, even if an E500. */
+ if (!rs6000_spe)
+ rs6000_long_double_type_size = 64;
+ break;
+
+ case OPT_mdebug_:
+ rs6000_debug_name = arg;
+ break;
+
+#ifdef TARGET_USES_SYSV4_OPT
+ case OPT_mcall_:
+ rs6000_abi_name = arg;
+ break;
+
+ case OPT_msdata_:
+ rs6000_sdata_name = arg;
+ break;
+
+ case OPT_mtls_size_:
+ rs6000_tls_size_string = arg;
+ break;
+
+ case OPT_mrelocatable:
+ if (value == 1)
+ {
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 1;
+ }
+ break;
+
+ case OPT_mrelocatable_lib:
+ if (value == 1)
+ {
+ target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 1;
+ }
+ else
+ {
+ target_flags &= ~MASK_RELOCATABLE;
+ target_flags_explicit |= MASK_RELOCATABLE;
+ }
+ break;
+#endif
+
+ case OPT_mabi_:
+ if (!strcmp (arg, "altivec"))
+ {
+ rs6000_explicit_options.abi = true;
+ rs6000_altivec_abi = 1;
+ rs6000_spe_abi = 0;
+ }
+ else if (! strcmp (arg, "no-altivec"))
+ {
+ /* ??? Don't set rs6000_explicit_options.abi here, to allow
+ the default for rs6000_spe_abi to be chosen later. */
+ rs6000_altivec_abi = 0;
+ }
+ else if (! strcmp (arg, "spe"))
+ {
+ rs6000_explicit_options.abi = true;
+ rs6000_spe_abi = 1;
+ rs6000_altivec_abi = 0;
+ if (!TARGET_SPE_ABI)
+ error ("not configured for ABI: '%s'", arg);
+ }
+ else if (! strcmp (arg, "no-spe"))
+ {
+ rs6000_explicit_options.abi = true;
+ rs6000_spe_abi = 0;
+ }
+
+ /* These are here for testing during development only, do not
+ document in the manual please. */
+ else if (! strcmp (arg, "d64"))
+ {
+ rs6000_darwin64_abi = 1;
+ warning (0, "Using darwin64 ABI");
+ }
+ else if (! strcmp (arg, "d32"))
+ {
+ rs6000_darwin64_abi = 0;
+ warning (0, "Using old darwin ABI");
+ }
+
+ else if (! strcmp (arg, "ibmlongdouble"))
+ {
+ rs6000_explicit_options.ieee = true;
+ rs6000_ieeequad = 0;
+ warning (0, "Using IBM extended precision long double");
+ }
+ else if (! strcmp (arg, "ieeelongdouble"))
+ {
+ rs6000_explicit_options.ieee = true;
+ rs6000_ieeequad = 1;
+ warning (0, "Using IEEE extended precision long double");
+ }
+
+ else
+ {
+ error ("unknown ABI specified: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_mcpu_:
+ rs6000_select[1].string = arg;
+ break;
+
+ case OPT_mtune_:
+ rs6000_select[2].string = arg;
+ break;
+
+ case OPT_mtraceback_:
+ rs6000_traceback_name = arg;
+ break;
+
+ case OPT_mfloat_gprs_:
+ rs6000_explicit_options.float_gprs = true;
+ if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
+ rs6000_float_gprs = 1;
+ else if (! strcmp (arg, "double"))
+ rs6000_float_gprs = 2;
+ else if (! strcmp (arg, "no"))
+ rs6000_float_gprs = 0;
+ else
+ {
+ error ("invalid option for -mfloat-gprs: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_mlong_double_:
+ rs6000_explicit_options.long_double = true;
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ if (value != 64 && value != 128)
+ {
+ error ("Unknown switch -mlong-double-%s", arg);
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ return false;
+ }
+ else
+ rs6000_long_double_type_size = value;
+ break;
+
+ case OPT_msched_costly_dep_:
+ rs6000_sched_costly_dep_str = arg;
+ break;
+
+ case OPT_malign_:
+ rs6000_explicit_options.alignment = true;
+ if (! strcmp (arg, "power"))
+ {
+ /* On 64-bit Darwin, power alignment is ABI-incompatible with
+ some C library functions, so warn about it. The flag may be
+ useful for performance studies from time to time though, so
+ don't disable it entirely. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ warning (0, "-malign-power is not supported for 64-bit Darwin;"
+ " it is incompatible with the installed C and C++ libraries");
+ rs6000_alignment_flags = MASK_ALIGN_POWER;
+ }
+ else if (! strcmp (arg, "natural"))
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ /* APPLE LOCAL begin Macintosh alignment 2002-2-26 --ff */
+ else if (! strcmp (arg, "mac68k"))
+ {
+ /* The old mac68k alignment has zero value for 64-bit work,
+ forbid its use. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ error ("-malign-mac68k is not allowed for 64-bit Darwin");
+ /* APPLE LOCAL begin radar 5134231 */
+ rs6000_alignment_flags = OPTION_MASK_ALIGN_MAC68K;
+ /* APPLE LOCAL end radar 5134231 */
+ }
+ /* APPLE LOCAL end Macintosh alignment 2002-2-26 --ff */
+ else
+ {
+ error ("unknown -malign-XXXXX option specified: '%s'", arg);
+ return false;
+ }
+ break;
+ /* APPLE LOCAL begin 5731065 */
+ case OPT_mkernel:
+ /* Set this early so that a kext that wants to use the hard
+ floating point registers can use -mkernel -mhard-float. */
+ target_flags |= MASK_SOFT_FLOAT;
+ break;
+ /* APPLE LOCAL end 5731065 */
+ }
+ return true;
+}
+
+/* Do anything needed at the start of the asm file. */
+
+static void
+rs6000_file_start (void)
+{
+ size_t i;
+ char buffer[80];
+ const char *start = buffer;
+ struct rs6000_cpu_select *ptr;
+ const char *default_cpu = TARGET_CPU_DEFAULT;
+ FILE *file = asm_out_file;
+
+ default_file_start ();
+
+#ifdef TARGET_BI_ARCH
+ if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT)
+ default_cpu = 0;
+#endif
+
+ if (flag_verbose_asm)
+ {
+ sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
+ rs6000_select[0].string = default_cpu;
+
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ {
+ ptr = &rs6000_select[i];
+ if (ptr->string != (char *)0 && ptr->string[0] != '\0')
+ {
+ fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
+ start = "";
+ }
+ }
+
+ if (PPC405_ERRATUM77)
+ {
+ fprintf (file, "%s PPC405CR_ERRATUM77", start);
+ start = "";
+ }
+
+#ifdef USING_ELFOS_H
+ switch (rs6000_sdata)
+ {
+ case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
+ case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
+ case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
+ case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
+ }
+
+ if (rs6000_sdata && g_switch_value)
+ {
+ fprintf (file, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED, start,
+ g_switch_value);
+ start = "";
+ }
+#endif
+
+ if (*start == '\0')
+ putc ('\n', file);
+ }
+
+ if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
+ {
+ switch_to_section (toc_section);
+ switch_to_section (text_section);
+ }
+
+ /* APPLE LOCAL begin hot/cold */
+ /* MERGE FIXME push to FSF or remove as unneeded? */
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Emit declarations for all code sections at the beginning of the file; this
+ keeps them from being separated by data sections, which can lead to
+ out-of-range branches. Also align the unlikely text section properly; the
+ first real occurrence of this may be a label within a function, which does
+ not otherwise get aligned. */
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P || flag_reorder_blocks_and_partition)
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__text,regular,pure_instructions\n");
+ if (flag_reorder_blocks_and_partition)
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__unlikely,regular,pure_instructions\n");
+ fprintf (asm_out_file, "\t.align 2\n");
+ }
+ if (MACHO_DYNAMIC_NO_PIC_P )
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__symbol_stub1,");
+ fprintf (asm_out_file, "symbol_stubs,pure_instructions,16\n");
+ }
+ else
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__picsymbolstub1,");
+ fprintf (asm_out_file, "symbol_stubs,pure_instructions,32\n");
+ }
+ }
+ }
+#endif
+ /* APPLE LOCAL end hot/cold */
+}
+
+
+/* Return nonzero if this function is known to have a null epilogue. */
+
+int
+direct_return (void)
+{
+ if (reload_completed)
+ {
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (info->first_gp_reg_save == 32
+ && info->first_fp_reg_save == 64
+ && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
+ && ! info->lr_save_p
+ && ! info->cr_save_p
+ && info->vrsave_mask == 0
+ && ! info->push_p)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return the number of instructions it takes to form a constant in an
+ integer register. */
+
+int
+num_insns_constant_wide (HOST_WIDE_INT value)
+{
+ /* signed constant loadable with {cal|addi} */
+ if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
+ return 1;
+
+ /* constant loadable with {cau|addis} */
+ else if ((value & 0xffff) == 0
+ && (value >> 31 == -1 || value >> 31 == 0))
+ return 1;
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ else if (TARGET_POWERPC64)
+ {
+ HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ HOST_WIDE_INT high = value >> 31;
+
+ if (high == 0 || high == -1)
+ return 2;
+
+ high >>= 1;
+
+ if (low == 0)
+ return num_insns_constant_wide (high) + 1;
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
+#endif
+
+ else
+ return 2;
+}
+
+int
+num_insns_constant (rtx op, enum machine_mode mode)
+{
+ HOST_WIDE_INT low, high;
+
+ switch (GET_CODE (op))
+ {
+ case CONST_INT:
+#if HOST_BITS_PER_WIDE_INT == 64
+ if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
+ && mask64_operand (op, mode))
+ return 2;
+ else
+#endif
+ return num_insns_constant_wide (INTVAL (op));
+
+ case CONST_DOUBLE:
+ if (mode == SFmode)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ return num_insns_constant_wide ((HOST_WIDE_INT) l);
+ }
+
+ if (mode == VOIDmode || mode == DImode)
+ {
+ high = CONST_DOUBLE_HIGH (op);
+ low = CONST_DOUBLE_LOW (op);
+ }
+ else
+ {
+ long l[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+ high = l[WORDS_BIG_ENDIAN == 0];
+ low = l[WORDS_BIG_ENDIAN != 0];
+ }
+
+ if (TARGET_32BIT)
+ return (num_insns_constant_wide (low)
+ + num_insns_constant_wide (high));
+ else
+ {
+ if ((high == 0 && low >= 0)
+ || (high == -1 && low < 0))
+ return num_insns_constant_wide (low);
+
+ else if (mask64_operand (op, mode))
+ return 2;
+
+ else if (low == 0)
+ return num_insns_constant_wide (high) + 1;
+
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Interpret element ELT of the CONST_VECTOR OP as an integer value.
+ If the mode of OP is MODE_VECTOR_INT, this simply returns the
+ corresponding element of the vector, but for V4SFmode and V2SFmode,
+ the corresponding "float" is interpreted as an SImode integer. */
+
+static HOST_WIDE_INT
+const_vector_elt_as_int (rtx op, unsigned int elt)
+{
+ rtx tmp = CONST_VECTOR_ELT (op, elt);
+ if (GET_MODE (op) == V4SFmode
+ || GET_MODE (op) == V2SFmode)
+ tmp = gen_lowpart (SImode, tmp);
+ return INTVAL (tmp);
+}
+
+/* Return true if OP can be synthesized with a particular vspltisb, vspltish
+ or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
+ depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
+ all items are set to the same value and contain COPIES replicas of the
+ vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
+ operand and the others are set to the value of the operand's msb. */
+
+static bool
+vspltis_constant (rtx op, unsigned step, unsigned copies)
+{
+ enum machine_mode mode = GET_MODE (op);
+ enum machine_mode inner = GET_MODE_INNER (mode);
+
+ unsigned i;
+ unsigned nunits = GET_MODE_NUNITS (mode);
+ unsigned bitsize = GET_MODE_BITSIZE (inner);
+ unsigned mask = GET_MODE_MASK (inner);
+
+ HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
+ HOST_WIDE_INT splat_val = val;
+ HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
+
+ /* Construct the value to be splatted, if possible. If not, return 0. */
+ for (i = 2; i <= copies; i *= 2)
+ {
+ HOST_WIDE_INT small_val;
+ bitsize /= 2;
+ small_val = splat_val >> bitsize;
+ mask >>= bitsize;
+ if (splat_val != ((small_val << bitsize) | (small_val & mask)))
+ return false;
+ splat_val = small_val;
+ }
+
+ /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
+ if (EASY_VECTOR_15 (splat_val))
+ ;
+
+ /* Also check if we can splat, and then add the result to itself. Do so if
+ the value is positive, of if the splat instruction is using OP's mode;
+ for splat_val < 0, the splat and the add should use the same mode. */
+ else if (EASY_VECTOR_15_ADD_SELF (splat_val)
+ && (splat_val >= 0 || (step == 1 && copies == 1)))
+ ;
+
+ else
+ return false;
+
+ /* Check if VAL is present in every STEP-th element, and the
+ other elements are filled with its most significant bit. */
+ for (i = 0; i < nunits - 1; ++i)
+ {
+ HOST_WIDE_INT desired_val;
+ if (((i + 1) & (step - 1)) == 0)
+ desired_val = val;
+ else
+ desired_val = msb_val;
+
+ if (desired_val != const_vector_elt_as_int (op, i))
+ return false;
+ }
+
+ return true;
+}
+
+
+/* Return true if OP is of the given MODE and can be synthesized
+ with a vspltisb, vspltish or vspltisw. */
+
+bool
+easy_altivec_constant (rtx op, enum machine_mode mode)
+{
+ unsigned step, copies;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+ else if (mode != GET_MODE (op))
+ return false;
+
+ /* Start with a vspltisw. */
+ step = GET_MODE_NUNITS (mode) / 4;
+ copies = 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ return false;
+}
+
+/* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
+ result is OP. Abort if it is not possible. */
+
+rtx
+gen_easy_altivec_constant (rtx op)
+{
+ enum machine_mode mode = GET_MODE (op);
+ int nunits = GET_MODE_NUNITS (mode);
+ rtx last = CONST_VECTOR_ELT (op, nunits - 1);
+ unsigned step = nunits / 4;
+ unsigned copies = 1;
+
+ /* Start with a vspltisw. */
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
+
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
+
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
+
+ gcc_unreachable ();
+}
+
+const char *
+output_vec_const_move (rtx *operands)
+{
+ int cst, cst2;
+ enum machine_mode mode;
+ rtx dest, vec;
+
+ dest = operands[0];
+ vec = operands[1];
+ mode = GET_MODE (dest);
+
+ if (TARGET_ALTIVEC)
+ {
+ rtx splat_vec;
+ if (zero_constant (vec, mode))
+ return "vxor %0,%0,%0";
+
+ splat_vec = gen_easy_altivec_constant (vec);
+ gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
+ operands[1] = XEXP (splat_vec, 0);
+ if (!EASY_VECTOR_15 (INTVAL (operands[1])))
+ return "#";
+
+ switch (GET_MODE (splat_vec))
+ {
+ case V4SImode:
+ return "vspltisw %0,%1";
+
+ case V8HImode:
+ return "vspltish %0,%1";
+
+ case V16QImode:
+ return "vspltisb %0,%1";
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ gcc_assert (TARGET_SPE);
+
+ /* Vector constant 0 is handled as a splitter of V2SI, and in the
+ pattern of V1DI, V4HI, and V2SF.
+
+ FIXME: We should probably return # and add post reload
+ splitters for these, but this way is so easy ;-). */
+ cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
+ operands[1] = CONST_VECTOR_ELT (vec, 0);
+ operands[2] = CONST_VECTOR_ELT (vec, 1);
+ if (cst == cst2)
+ return "li %0,%1\n\tevmergelo %0,%0,%0";
+ else
+ return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
+}
+
+/* Initialize vector TARGET to VALS. */
+
+void
+rs6000_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0, one_var = -1;
+ bool all_same = true, all_const_zero = true;
+ rtx x, mem;
+ int i;
+
+ for (i = 0; i < n_elts; ++i)
+ {
+ x = XVECEXP (vals, 0, i);
+ if (!CONSTANT_P (x))
+ ++n_var, one_var = i;
+ else if (x != CONST0_RTX (inner_mode))
+ all_const_zero = false;
+
+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
+ }
+
+ if (n_var == 0)
+ {
+ if (mode != V4SFmode && all_const_zero)
+ {
+ /* Zero register. */
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_XOR (mode, target, target)));
+ return;
+ }
+ else if (mode != V4SFmode && easy_vector_constant (vals, mode))
+ {
+ /* Splat immediate. */
+ emit_insn (gen_rtx_SET (VOIDmode, target, vals));
+ return;
+ }
+ else if (all_same)
+ ; /* Splat vector element. */
+ else
+ {
+ /* Load from constant pool. */
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ return;
+ }
+ }
+
+ /* Store value to stack temp. Load vector element. Splat. */
+ if (all_same)
+ {
+ /* APPLE LOCAL begin 4708231 */
+ rtx pmem;
+
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ mem = adjust_address_nv (mem, inner_mode, 0);
+
+ pmem = copy_addr_to_reg (XEXP (mem, 0));
+ pmem = gen_rtx_MEM (mode, pmem);
+
+ emit_move_insn (mem, XVECEXP (vals, 0, 0));
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ target, pmem),
+ x)));
+ /* APPLE LOCAL end 4708231 */
+
+ x = gen_rtx_VEC_SELECT (inner_mode, target,
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (1, const0_rtx)));
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_VEC_DUPLICATE (mode, x)));
+ return;
+ }
+
+ /* One field is non-constant. Load constant then overwrite
+ varying field. */
+ if (n_var == 1)
+ {
+ rtx copy = copy_rtx (vals);
+
+ /* Load constant part of vector, substitute neighboring value for
+ varying element. */
+ XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
+ rs6000_expand_vector_init (target, copy);
+
+ /* Insert variable. */
+ rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
+ return;
+ }
+
+ /* Construct the vector in memory one field at a time
+ and load the whole vector. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ for (i = 0; i < n_elts; i++)
+ emit_move_insn (adjust_address_nv (mem, inner_mode,
+ i * GET_MODE_SIZE (inner_mode)),
+ XVECEXP (vals, 0, i));
+ emit_move_insn (target, mem);
+}
+
+/* Set field ELT of TARGET to VAL. */
+
+void
+rs6000_expand_vector_set (rtx target, rtx val, int elt)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx reg = gen_reg_rtx (mode);
+ /* APPLE LOCAL 4708231 */
+ rtx mask, mem, x, pmem;
+ int width = GET_MODE_SIZE (inner_mode);
+ int i;
+
+ /* Load single variable value. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
+
+ /* APPLE LOCAL begin 4708231 */
+ /* Copy address into a register. */
+ pmem = copy_addr_to_reg (XEXP (mem, 0));
+ pmem = gen_rtx_MEM (mode, pmem);
+
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ reg, pmem),
+ x)));
+ /* APPLE LOCAL end 4708231 */
+
+ /* Linear sequence. */
+ mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
+ for (i = 0; i < 16; ++i)
+ XVECEXP (mask, 0, i) = GEN_INT (i);
+
+ /* Set permute mask to insert element into target. */
+ for (i = 0; i < width; ++i)
+ XVECEXP (mask, 0, elt*width + i)
+ = GEN_INT (i + 0x10);
+ x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
+ x = gen_rtx_UNSPEC (mode,
+ gen_rtvec (3, target, reg,
+ force_reg (V16QImode, x)),
+ UNSPEC_VPERM);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+}
+
+/* Extract field ELT from VEC into TARGET. */
+
+void
+rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
+{
+ enum machine_mode mode = GET_MODE (vec);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ /* APPLE LOCAL 4708231 */
+ rtx mem, x, pmem;
+
+ /* Allocate mode-sized buffer. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+
+ /* Add offset to field within buffer matching vector element. */
+ mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
+
+ /* APPLE LOCAL begin 4708231 */
+ /* Copy address into a register. */
+ pmem = copy_addr_to_reg (XEXP (mem, 0));
+ pmem = gen_rtx_MEM (mode, pmem);
+
+ /* Store single field into mode-sized buffer. */
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_STVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ pmem, vec),
+ x)));
+ /* APPLE LOCAL end 4708231 */
+ emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
+}
+
+/* Generates shifts and masks for a pair of rldicl or rldicr insns to
+ implement ANDing by the mask IN. */
+void
+build_mask64_2_operands (rtx in, rtx *out)
+{
+#if HOST_BITS_PER_WIDE_INT >= 64
+ unsigned HOST_WIDE_INT c, lsb, m1, m2;
+ int shift;
+
+ gcc_assert (GET_CODE (in) == CONST_INT);
+
+ c = INTVAL (in);
+ if (c & 1)
+ {
+ /* Assume c initially something like 0x00fff000000fffff. The idea
+ is to rotate the word so that the middle ^^^^^^ group of zeros
+ is at the MS end and can be cleared with an rldicl mask. We then
+ rotate back and clear off the MS ^^ group of zeros with a
+ second rldicl. */
+ c = ~c; /* c == 0xff000ffffff00000 */
+ lsb = c & -c; /* lsb == 0x0000000000100000 */
+ m1 = -lsb; /* m1 == 0xfffffffffff00000 */
+ c = ~c; /* c == 0x00fff000000fffff */
+ c &= -lsb; /* c == 0x00fff00000000000 */
+ lsb = c & -c; /* lsb == 0x0000100000000000 */
+ c = ~c; /* c == 0xff000fffffffffff */
+ c &= -lsb; /* c == 0xff00000000000000 */
+ shift = 0;
+ while ((lsb >>= 1) != 0)
+ shift++; /* shift == 44 on exit from loop */
+ m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
+ m1 = ~m1; /* m1 == 0x000000ffffffffff */
+ m2 = ~c; /* m2 == 0x00ffffffffffffff */
+ }
+ else
+ {
+ /* Assume c initially something like 0xff000f0000000000. The idea
+ is to rotate the word so that the ^^^ middle group of zeros
+ is at the LS end and can be cleared with an rldicr mask. We then
+ rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
+ a second rldicr. */
+ lsb = c & -c; /* lsb == 0x0000010000000000 */
+ m2 = -lsb; /* m2 == 0xffffff0000000000 */
+ c = ~c; /* c == 0x00fff0ffffffffff */
+ c &= -lsb; /* c == 0x00fff00000000000 */
+ lsb = c & -c; /* lsb == 0x0000100000000000 */
+ c = ~c; /* c == 0xff000fffffffffff */
+ c &= -lsb; /* c == 0xff00000000000000 */
+ shift = 0;
+ while ((lsb >>= 1) != 0)
+ shift++; /* shift == 44 on exit from loop */
+ m1 = ~c; /* m1 == 0x00ffffffffffffff */
+ m1 >>= shift; /* m1 == 0x0000000000000fff */
+ m1 = ~m1; /* m1 == 0xfffffffffffff000 */
+ }
+
+ /* Note that when we only have two 0->1 and 1->0 transitions, one of the
+ masks will be all 1's. We are guaranteed more than one transition. */
+ out[0] = GEN_INT (64 - shift);
+ out[1] = GEN_INT (m1);
+ out[2] = GEN_INT (shift);
+ out[3] = GEN_INT (m2);
+#else
+ (void)in;
+ (void)out;
+ gcc_unreachable ();
+#endif
+}
+
+/* Return TRUE if OP is an invalid SUBREG operation on the e500. */
+
+bool
+invalid_e500_subreg (rtx op, enum machine_mode mode)
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ /* Reject (subreg:SI (reg:DF)). */
+ if (GET_CODE (op) == SUBREG
+ && mode == SImode
+ && REG_P (SUBREG_REG (op))
+ && GET_MODE (SUBREG_REG (op)) == DFmode)
+ return true;
+
+ /* Reject (subreg:DF (reg:DI)). */
+ if (GET_CODE (op) == SUBREG
+ && mode == DFmode
+ && REG_P (SUBREG_REG (op))
+ && GET_MODE (SUBREG_REG (op)) == DImode)
+ return true;
+ }
+
+ if (TARGET_SPE
+ && GET_CODE (op) == SUBREG
+ && mode == SImode
+ && REG_P (SUBREG_REG (op))
+ && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
+ return true;
+
+ return false;
+}
+
+/* Darwin, AIX increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+
+unsigned int
+rs6000_special_round_type_align (tree type, unsigned int computed,
+ unsigned int specified)
+{
+ unsigned int align = MAX (computed, specified);
+ tree field = TYPE_FIELDS (type);
+
+ /* Skip all non field decls */
+ while (field != NULL && TREE_CODE (field) != FIELD_DECL)
+ field = TREE_CHAIN (field);
+
+ if (field != NULL && field != type)
+ {
+ type = TREE_TYPE (field);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ if (type != error_mark_node && TYPE_MODE (type) == DFmode)
+ align = MAX (align, 64);
+ }
+
+ return align;
+}
+
+/* APPLE LOCAL begin mainline 2006-10-31 PR 23067, radar 4869885 */
+/* Darwin increases record alignment to the natural alignment of
+ the first field. */
+
+unsigned int
+darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
+ unsigned int specified)
+{
+ unsigned int align = MAX (computed, specified);
+
+ if (TYPE_PACKED (type))
+ return align;
+
+ /* Find the first field, looking down into aggregates. */
+ /* APPLE LOCAL begin radar 4869885 */
+ {
+ tree field = TYPE_FIELDS (type);
+ /* Skip all non field decls */
+ while (field != NULL && TREE_CODE (field) != FIELD_DECL)
+ field = TREE_CHAIN (field);
+ if (field)
+ {
+ if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree union_field = field;
+ tree union_type = TREE_TYPE (union_field);
+ tree next_union_type;
+ do
+ {
+ union_field = TREE_CHAIN (union_field);
+ if (!union_field)
+ break;
+ /* Skip all non field decls */
+ if (TREE_CODE (TREE_TYPE (union_field)) == ARRAY_TYPE)
+ next_union_type = get_inner_array_type (union_field);
+ else
+ next_union_type = TREE_TYPE (union_field);
+ if (TYPE_ALIGN (next_union_type) > TYPE_ALIGN (union_type))
+ union_type = next_union_type;
+ } while (1);
+ type = union_type;
+ }
+ else
+ type = TREE_TYPE (field);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+ }
+ }
+ /* APPLE LOCAL end radar 4869885 */
+
+ /* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */
+ if (OPTION_ALIGN_MAC68K)
+ align = MAX (align, 16);
+ /* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */
+
+ /* APPLE LOCAL begin radar 4869885 */
+ else if (type != error_mark_node && ! TYPE_PACKED (type) &&
+ maximum_field_alignment == 0 && (TARGET_ALIGN_NATURAL == 0))
+ /* APPLE LOCAL end radar 4869885 */
+ align = MAX (align, TYPE_ALIGN (type));
+
+ return align;
+}
+/* APPLE LOCAL end mainline 2006-10-31 PR 23067, radar 4869885 */
+/* Return 1 for an operand in small memory on V.4/eabi. */
+
+int
+small_data_operand (rtx op ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+#if TARGET_ELF
+ rtx sym_ref;
+
+ if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
+ return 0;
+
+ if (DEFAULT_ABI != ABI_V4)
+ return 0;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ sym_ref = op;
+
+ else if (GET_CODE (op) != CONST
+ || GET_CODE (XEXP (op, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
+ return 0;
+
+ else
+ {
+ rtx sum = XEXP (op, 0);
+ HOST_WIDE_INT summand;
+
+ /* We have to be careful here, because it is the referenced address
+ that must be 32k from _SDA_BASE_, not just the symbol. */
+ summand = INTVAL (XEXP (sum, 1));
+ if (summand < 0 || (unsigned HOST_WIDE_INT) summand > g_switch_value)
+ return 0;
+
+ sym_ref = XEXP (sum, 0);
+ }
+
+ return SYMBOL_REF_SMALL_P (sym_ref);
+#else
+ return 0;
+#endif
+}
+
+/* Return true if either operand is a general purpose register. */
+
+bool
+gpr_or_gpr_p (rtx op0, rtx op1)
+{
+ return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
+ || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
+}
+
+
+/* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
+
+static int
+constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ if (RS6000_SYMBOL_REF_TLS_P (op))
+ return 0;
+ else if (CONSTANT_POOL_ADDRESS_P (op))
+ {
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (op), Pmode))
+ {
+ *have_sym = 1;
+ return 1;
+ }
+ else
+ return 0;
+ }
+ else if (! strcmp (XSTR (op, 0), toc_label_name))
+ {
+ *have_toc = 1;
+ return 1;
+ }
+ else
+ return 0;
+ case PLUS:
+ case MINUS:
+ return (constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc)
+ && constant_pool_expr_1 (XEXP (op, 1), have_sym, have_toc));
+ case CONST:
+ return constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc);
+ case CONST_INT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static bool
+constant_pool_expr_p (rtx op)
+{
+ int have_sym = 0;
+ int have_toc = 0;
+ return constant_pool_expr_1 (op, &have_sym, &have_toc) && have_sym;
+}
+
+bool
+toc_relative_expr_p (rtx op)
+{
+ int have_sym = 0;
+ int have_toc = 0;
+ return constant_pool_expr_1 (op, &have_sym, &have_toc) && have_toc;
+}
+
+bool
+legitimate_constant_pool_address_p (rtx x)
+{
+ return (TARGET_TOC
+ && GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && (TARGET_MINIMAL_TOC || REGNO (XEXP (x, 0)) == TOC_REGISTER)
+ && constant_pool_expr_p (XEXP (x, 1)));
+}
+
+static bool
+legitimate_small_data_p (enum machine_mode mode, rtx x)
+{
+ return (DEFAULT_ABI == ABI_V4
+ && !flag_pic && !TARGET_TOC
+ && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
+ && small_data_operand (x, mode));
+}
+
+/* SPE offset addressing is limited to 5-bits worth of double words. */
+#define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
+
+bool
+rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
+{
+ unsigned HOST_WIDE_INT offset, extra;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
+ return false;
+ if (legitimate_constant_pool_address_p (x))
+ return true;
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return false;
+
+ offset = INTVAL (XEXP (x, 1));
+ extra = 0;
+ switch (mode)
+ {
+ case V16QImode:
+ case V8HImode:
+ case V4SFmode:
+ case V4SImode:
+ /* APPLE LOCAL begin radar 4994150 */
+ /* AltiVec vector modes. Only reg+reg addressing is valid and
+ constant offset zero should not occur due to canonicalization.
+ Allow any offset when not strict before reload.
+ */
+ return false;
+ /* APPLE LOCAL end radar 4994150 */
+
+ case V4HImode:
+ case V2SImode:
+ case V1DImode:
+ case V2SFmode:
+ /* SPE vector modes. */
+ return SPE_CONST_OFFSET_OK (offset);
+
+ case DFmode:
+ if (TARGET_E500_DOUBLE)
+ return SPE_CONST_OFFSET_OK (offset);
+
+ case DImode:
+ /* On e500v2, we may have:
+
+ (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
+
+ Which gets addressed with evldd instructions. */
+ if (TARGET_E500_DOUBLE)
+ return SPE_CONST_OFFSET_OK (offset);
+
+ if (mode == DFmode || !TARGET_POWERPC64)
+ extra = 4;
+ else if (offset & 3)
+ return false;
+ break;
+
+ case TFmode:
+ case TImode:
+ if (mode == TFmode || !TARGET_POWERPC64)
+ extra = 12;
+ else if (offset & 3)
+ return false;
+ else
+ extra = 8;
+ break;
+
+ default:
+ break;
+ }
+
+ offset += 0x8000;
+ return (offset < 0x10000) && (offset + extra < 0x10000);
+}
+
+static bool
+legitimate_indexed_address_p (rtx x, int strict)
+{
+ rtx op0, op1;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ /* Recognize the rtl generated by reload which we know will later be
+ replaced with proper base and index regs. */
+ if (!strict
+ && reload_in_progress
+ && (REG_P (op0) || GET_CODE (op0) == PLUS)
+ && REG_P (op1))
+ return true;
+
+ return (REG_P (op0) && REG_P (op1)
+ && ((INT_REG_OK_FOR_BASE_P (op0, strict)
+ && INT_REG_OK_FOR_INDEX_P (op1, strict))
+ || (INT_REG_OK_FOR_BASE_P (op1, strict)
+ && INT_REG_OK_FOR_INDEX_P (op0, strict))));
+}
+
+inline bool
+legitimate_indirect_address_p (rtx x, int strict)
+{
+ return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
+}
+
+bool
+macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
+{
+ if (!TARGET_MACHO || !flag_pic
+ || mode != SImode || GET_CODE (x) != MEM)
+ return false;
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) != LO_SUM)
+ return false;
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
+ return false;
+ x = XEXP (x, 1);
+
+ return CONSTANT_P (x);
+}
+
+static bool
+legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
+{
+ if (GET_CODE (x) != LO_SUM)
+ return false;
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
+ return false;
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == DImode))
+ return false;
+ x = XEXP (x, 1);
+
+ if (TARGET_ELF || TARGET_MACHO)
+ {
+ if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
+ return false;
+ if (TARGET_TOC)
+ return false;
+ if (GET_MODE_NUNITS (mode) != 1)
+ return false;
+ if (GET_MODE_BITSIZE (mode) > 64
+ || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
+ && !(TARGET_HARD_FLOAT && TARGET_FPRS && mode == DFmode)))
+ return false;
+
+ return CONSTANT_P (x);
+ }
+
+ return false;
+}
+
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This is used from only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was
+ called. In some cases it is useful to look at this to decide what
+ needs to be done.
+
+ MODE is passed so that this function can use GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this function to do nothing. It exists to
+ recognize opportunities to optimize the output.
+
+ On RS/6000, first check for the sum of a register with a constant
+ integer that is out of range. If so, generate code to add the
+ constant with the low-order 16 bits masked to the register and force
+ this result into another register (this can be done with `cau').
+ Then generate an address of REG+(CONST&0xffff), allowing for the
+ possibility of bit 16 being a one.
+
+ Then check for the sum of a register and something not constant, try to
+ load the other things into a register and return the sum. */
+
+rtx
+rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
+ if (model != 0)
+ return rs6000_legitimize_tls_address (x, model);
+ }
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000)
+ {
+ HOST_WIDE_INT high_int, low_int;
+ rtx sum;
+ low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
+ high_int = INTVAL (XEXP (x, 1)) - low_int;
+ sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ GEN_INT (high_int)), 0);
+ return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
+ }
+ else if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) != CONST_INT
+ && GET_MODE_NUNITS (mode) == 1
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS)
+ || TARGET_POWERPC64
+ || (((mode != DImode && mode != DFmode) || TARGET_E500_DOUBLE)
+ && mode != TFmode))
+ && (TARGET_POWERPC64 || mode != DImode)
+ && mode != TImode)
+ {
+ return gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
+ }
+ else if (ALTIVEC_VECTOR_MODE (mode))
+ {
+ rtx reg;
+
+ /* Make sure both operands are registers. */
+ if (GET_CODE (x) == PLUS)
+ return gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
+ force_reg (Pmode, XEXP (x, 1)));
+
+ reg = force_reg (Pmode, x);
+ return reg;
+ }
+ else if (SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DImode)))
+ {
+ if (mode == DImode)
+ return NULL_RTX;
+ /* We accept [reg + reg] and [reg + OFFSET]. */
+
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx op1 = XEXP (x, 0);
+ rtx op2 = XEXP (x, 1);
+
+ op1 = force_reg (Pmode, op1);
+
+ if (GET_CODE (op2) != REG
+ && (GET_CODE (op2) != CONST_INT
+ || !SPE_CONST_OFFSET_OK (INTVAL (op2))))
+ op2 = force_reg (Pmode, op2);
+
+ return gen_rtx_PLUS (Pmode, op1, op2);
+ }
+
+ return force_reg (Pmode, x);
+ }
+ else if (TARGET_ELF
+ && TARGET_32BIT
+ && TARGET_NO_TOC
+ && ! flag_pic
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x)
+ && GET_MODE_NUNITS (mode) == 1
+ && (GET_MODE_BITSIZE (mode) <= 32
+ || ((TARGET_HARD_FLOAT && TARGET_FPRS) && mode == DFmode)))
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_elf_high (reg, x));
+ return gen_rtx_LO_SUM (Pmode, reg, x);
+ }
+ else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
+ && ! flag_pic
+#if TARGET_MACHO
+ && ! MACHO_DYNAMIC_NO_PIC_P
+#endif
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x)
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS) || mode != DFmode)
+ && mode != DImode
+ && mode != TImode)
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_macho_high (reg, x));
+ return gen_rtx_LO_SUM (Pmode, reg, x);
+ }
+ else if (TARGET_TOC
+ && constant_pool_expr_p (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
+ {
+ return create_TOC_reference (x);
+ }
+ else
+ return NULL_RTX;
+}
+
+/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
+ We need to emit DTP-relative relocations. */
+
+static void
+rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ switch (size)
+ {
+ case 4:
+ fputs ("\t.long\t", file);
+ break;
+ case 8:
+ fputs (DOUBLE_INT_ASM_OP, file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ output_addr_const (file, x);
+ fputs ("@dtprel+0x8000", file);
+}
+
+/* Construct the SYMBOL_REF for the tls_get_addr function. */
+
+static GTY(()) rtx rs6000_tls_symbol;
+static rtx
+rs6000_tls_get_addr (void)
+{
+ if (!rs6000_tls_symbol)
+ rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
+
+ return rs6000_tls_symbol;
+}
+
+/* Construct the SYMBOL_REF for TLS GOT references. */
+
+static GTY(()) rtx rs6000_got_symbol;
+static rtx
+rs6000_got_sym (void)
+{
+ if (!rs6000_got_symbol)
+ {
+ rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+ SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
+ SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
+ }
+
+ return rs6000_got_symbol;
+}
+
+/* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
+ this (thread-local) address. */
+
+static rtx
+rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
+{
+ rtx dest, insn;
+
+ dest = gen_reg_rtx (Pmode);
+ if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
+ {
+ rtx tlsreg;
+
+ if (TARGET_64BIT)
+ {
+ tlsreg = gen_rtx_REG (Pmode, 13);
+ insn = gen_tls_tprel_64 (dest, tlsreg, addr);
+ }
+ else
+ {
+ tlsreg = gen_rtx_REG (Pmode, 2);
+ insn = gen_tls_tprel_32 (dest, tlsreg, addr);
+ }
+ emit_insn (insn);
+ }
+ else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
+ {
+ rtx tlsreg, tmp;
+
+ tmp = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ {
+ tlsreg = gen_rtx_REG (Pmode, 13);
+ insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
+ }
+ else
+ {
+ tlsreg = gen_rtx_REG (Pmode, 2);
+ insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
+ }
+ emit_insn (insn);
+ if (TARGET_64BIT)
+ insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
+ else
+ insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
+ emit_insn (insn);
+ }
+ else
+ {
+ rtx r3, got, tga, tmp1, tmp2, eqv;
+
+ /* We currently use relocations like @got@tlsgd for tls, which
+ means the linker will handle allocation of tls entries, placing
+ them in the .got section. So use a pointer to the .got section,
+ not one to secondary TOC sections used by 64-bit -mminimal-toc,
+ or to secondary GOT sections used by 32-bit -fPIC. */
+ if (TARGET_64BIT)
+ got = gen_rtx_REG (Pmode, 2);
+ else
+ {
+ if (flag_pic == 1)
+ got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+ else
+ {
+ rtx gsym = rs6000_got_sym ();
+ got = gen_reg_rtx (Pmode);
+ if (flag_pic == 0)
+ rs6000_emit_move (got, gsym, Pmode);
+ else
+ {
+ rtx tempLR, tmp3, mem;
+ rtx first, last;
+
+ tempLR = gen_reg_rtx (Pmode);
+ tmp1 = gen_reg_rtx (Pmode);
+ tmp2 = gen_reg_rtx (Pmode);
+ tmp3 = gen_reg_rtx (Pmode);
+ mem = gen_const_mem (Pmode, tmp1);
+
+ first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, gsym));
+ emit_move_insn (tmp1, tempLR);
+ emit_move_insn (tmp2, mem);
+ emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
+ last = emit_move_insn (got, tmp3);
+ REG_NOTES (last) = gen_rtx_EXPR_LIST (REG_EQUAL, gsym,
+ REG_NOTES (last));
+ REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
+ REG_NOTES (first));
+ REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
+ REG_NOTES (last));
+ }
+ }
+ }
+
+ if (model == TLS_MODEL_GLOBAL_DYNAMIC)
+ {
+ r3 = gen_rtx_REG (Pmode, 3);
+ if (TARGET_64BIT)
+ insn = gen_tls_gd_64 (r3, got, addr);
+ else
+ insn = gen_tls_gd_32 (r3, got, addr);
+ start_sequence ();
+ emit_insn (insn);
+ tga = gen_rtx_MEM (Pmode, rs6000_tls_get_addr ());
+ insn = gen_call_value (r3, tga, const0_rtx, const0_rtx);
+ insn = emit_call_insn (insn);
+ CONST_OR_PURE_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
+ insn = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insn, dest, r3, addr);
+ }
+ else if (model == TLS_MODEL_LOCAL_DYNAMIC)
+ {
+ r3 = gen_rtx_REG (Pmode, 3);
+ if (TARGET_64BIT)
+ insn = gen_tls_ld_64 (r3, got);
+ else
+ insn = gen_tls_ld_32 (r3, got);
+ start_sequence ();
+ emit_insn (insn);
+ tga = gen_rtx_MEM (Pmode, rs6000_tls_get_addr ());
+ insn = gen_call_value (r3, tga, const0_rtx, const0_rtx);
+ insn = emit_call_insn (insn);
+ CONST_OR_PURE_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
+ insn = get_insns ();
+ end_sequence ();
+ tmp1 = gen_reg_rtx (Pmode);
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_TLSLD);
+ emit_libcall_block (insn, tmp1, r3, eqv);
+ if (rs6000_tls_size == 16)
+ {
+ if (TARGET_64BIT)
+ insn = gen_tls_dtprel_64 (dest, tmp1, addr);
+ else
+ insn = gen_tls_dtprel_32 (dest, tmp1, addr);
+ }
+ else if (rs6000_tls_size == 32)
+ {
+ tmp2 = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
+ else
+ insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
+ emit_insn (insn);
+ if (TARGET_64BIT)
+ insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
+ else
+ insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
+ }
+ else
+ {
+ tmp2 = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
+ else
+ insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
+ emit_insn (insn);
+ insn = gen_rtx_SET (Pmode, dest,
+ gen_rtx_PLUS (Pmode, tmp2, tmp1));
+ }
+ emit_insn (insn);
+ }
+ else
+ {
+ /* IE, or 64 bit offset LE. */
+ tmp2 = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ insn = gen_tls_got_tprel_64 (tmp2, got, addr);
+ else
+ insn = gen_tls_got_tprel_32 (tmp2, got, addr);
+ emit_insn (insn);
+ if (TARGET_64BIT)
+ insn = gen_tls_tls_64 (dest, tmp2, addr);
+ else
+ insn = gen_tls_tls_32 (dest, tmp2, addr);
+ emit_insn (insn);
+ }
+ }
+
+ return dest;
+}
+
+/* Return 1 if X contains a thread-local symbol. */
+
+bool
+rs6000_tls_referenced_p (rtx x)
+{
+ if (! TARGET_HAVE_TLS)
+ return false;
+
+ return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
+}
+
+/* Return 1 if *X is a thread-local symbol. This is the same as
+ rs6000_tls_symbol_ref except for the type of the unused argument. */
+
+static int
+rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return RS6000_SYMBOL_REF_TLS_P (*x);
+}
+
+/* The convention appears to be to define this wherever it is used.
+ With legitimize_reload_address now defined here, REG_MODE_OK_FOR_BASE_P
+ is now used here. */
+#ifndef REG_MODE_OK_FOR_BASE_P
+#define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO)
+#endif
+
+/* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
+ replace the input X, or the original X if no replacement is called for.
+ The output parameter *WIN is 1 if the calling macro should goto WIN,
+ 0 if it should not.
+
+ For RS/6000, we wish to handle large displacements off a base
+ register by splitting the addend across an addiu/addis and the mem insn.
+ This cuts number of extra insns needed from 3 to 1.
+
+ On Darwin, we use this to generate code for floating point constants.
+ A movsf_low is generated so we wind up with 2 instructions rather than 3.
+ The Darwin code is inside #if TARGET_MACHO because only then is
+ machopic_function_base_name() defined. */
+rtx
+rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED, int *win)
+{
+ /* We must recognize output that we have already generated ourselves. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && GET_CODE (x) == LO_SUM
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 0)) == CONST
+ && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
+ && GET_CODE (XEXP (XEXP (x, 1), 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == SYMBOL_REF)
+ {
+ /* Result of previous invocation of this function on Darwin
+ floating point constant. */
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+#endif
+
+ /* Force ld/std non-word aligned offset into base register by wrapping
+ in offset 0. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < 32
+ && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) & 3) != 0
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
+ && TARGET_POWERPC64)
+ {
+ x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+ *win = 1;
+ return x;
+ }
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && !SPE_VECTOR_MODE (mode)
+ && !(TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DImode))
+ && !ALTIVEC_VECTOR_MODE (mode))
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT high
+ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+
+ /* Check for 32-bit overflow. */
+ if (high + low != val)
+ {
+ *win = 0;
+ return x;
+ }
+
+ /* Reload the high part into a base reg; leave the low part
+ in the mem directly. */
+
+ x = gen_rtx_PLUS (GET_MODE (x),
+ gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (high)),
+ GEN_INT (low));
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+
+ if (GET_CODE (x) == SYMBOL_REF
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && !SPE_VECTOR_MODE (mode)
+#if TARGET_MACHO
+ && DEFAULT_ABI == ABI_DARWIN
+ && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+#else
+ && DEFAULT_ABI == ABI_V4
+ && !flag_pic
+#endif
+ /* Don't do this for TFmode, since the result isn't offsettable.
+ The same goes for DImode without 64-bit gprs and DFmode
+ without fprs. */
+ && mode != TFmode
+ && (mode != DImode || TARGET_POWERPC64)
+ && (mode != DFmode || TARGET_POWERPC64
+ || (TARGET_FPRS && TARGET_HARD_FLOAT)))
+ {
+#if TARGET_MACHO
+ if (flag_pic)
+ {
+ rtx offset = gen_rtx_CONST (Pmode,
+ gen_rtx_MINUS (Pmode, x,
+ machopic_function_base_sym ()));
+ x = gen_rtx_LO_SUM (GET_MODE (x),
+ gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
+ gen_rtx_HIGH (Pmode, offset)), offset);
+ }
+ else
+#endif
+ x = gen_rtx_LO_SUM (GET_MODE (x),
+ gen_rtx_HIGH (Pmode, x), x);
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+
+ /* Reload an offset address wrapped by an AND that represents the
+ masking of the lower bits. Strip the outer AND and let reload
+ convert the offset address into an indirect address. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ {
+ x = XEXP (x, 0);
+ *win = 1;
+ return x;
+ }
+
+ if (TARGET_TOC
+ && constant_pool_expr_p (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
+ {
+ x = create_TOC_reference (x);
+ *win = 1;
+ return x;
+ }
+ *win = 0;
+ return x;
+}
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the RS/6000, there are four valid address: a SYMBOL_REF that
+ refers to a constant pool entry of an address (or the sum of it
+ plus a constant), a short (16-bit signed) constant plus a register,
+ the sum of two registers, or a register indirect, possibly with an
+ auto-increment. For DFmode and DImode with a constant plus register,
+ we must ensure that both words are addressable or PowerPC64 with offset
+ word aligned.
+
+ For modes spanning multiple registers (DFmode in 32-bit GPRs,
+ 32-bit DImode, TImode, TFmode), indexed addressing cannot be used because
+ adjacent memory cells are accessed by adding word-sized offsets
+ during assembly output. */
+int
+rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
+{
+ /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ x = XEXP (x, 0);
+
+ if (RS6000_SYMBOL_REF_TLS_P (x))
+ return 0;
+ if (legitimate_indirect_address_p (x, reg_ok_strict))
+ return 1;
+ if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && !SPE_VECTOR_MODE (mode)
+ && mode != TFmode
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == DImode))
+ && TARGET_UPDATE
+ && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
+ return 1;
+ if (legitimate_small_data_p (mode, x))
+ return 1;
+ if (legitimate_constant_pool_address_p (x))
+ return 1;
+ /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
+ if (! reg_ok_strict
+ && GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && (XEXP (x, 0) == virtual_stack_vars_rtx
+ || XEXP (x, 0) == arg_pointer_rtx)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return 1;
+ if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
+ return 1;
+ if (mode != TImode
+ && mode != TFmode
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS)
+ || TARGET_POWERPC64
+ || ((mode != DFmode || TARGET_E500_DOUBLE) && mode != TFmode))
+ && (TARGET_POWERPC64 || mode != DImode)
+ && legitimate_indexed_address_p (x, reg_ok_strict))
+ return 1;
+ if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
+ return 1;
+ return 0;
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+
+ On the RS/6000 this is true of all integral offsets (since AltiVec
+ modes don't allow them) or is a pre-increment or decrement.
+
+ ??? Except that due to conceptual problems in offsettable_address_p
+ we can't really report the problems of integral offsets. So leave
+ this assuming that the adjustable offset must be valid for the
+ sub-words of a TFmode operand, which is what we had before. */
+
+bool
+rs6000_mode_dependent_address (rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case PLUS:
+ if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
+ return val + 12 + 0x8000 >= 0x10000;
+ }
+ break;
+
+ case LO_SUM:
+ return true;
+
+ case PRE_INC:
+ case PRE_DEC:
+ return TARGET_UPDATE;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* More elaborate version of recog's offsettable_memref_p predicate
+ that works around the ??? note of rs6000_mode_dependent_address.
+ In particular it accepts
+
+ (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
+
+ in 32-bit mode, that the recog predicate rejects. */
+
+bool
+rs6000_offsettable_memref_p (rtx op)
+{
+ if (!MEM_P (op))
+ return false;
+
+ /* First mimic offsettable_memref_p. */
+ if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
+ return true;
+
+ /* offsettable_address_p invokes rs6000_mode_dependent_address, but
+ the latter predicate knows nothing about the mode of the memory
+ reference and, therefore, assumes that it is the largest supported
+ mode (TFmode). As a consequence, legitimate offsettable memory
+ references are rejected. rs6000_legitimate_offset_address_p contains
+ the correct logic for the PLUS case of rs6000_mode_dependent_address. */
+ return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ For the SPE, GPRs are 64 bits but only 32 bits are visible in
+ scalar instructions. The upper 32 bits are only available to the
+ SIMD instructions.
+
+ POWER and PowerPC GPRs hold 32 bits worth;
+ PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
+
+int
+rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ if (FP_REGNO_P (regno))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+
+ if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD;
+
+ if (ALTIVEC_REGNO_P (regno))
+ return
+ (GET_MODE_SIZE (mode) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD;
+
+ /* The value returned for SCmode in the E500 double case is 2 for
+ ABI compatibility; storing an SCmode value in a single register
+ would require function_arg and rs6000_spe_function_arg to handle
+ SCmode so as to pass the value correctly in a pair of
+ registers. */
+ if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode)
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+}
+
+/* Change register usage conditional on target flags. */
+void
+rs6000_conditional_register_usage (void)
+{
+ int i;
+
+ /* Set MQ register fixed (already call_used) if not POWER
+ architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
+ be allocated. */
+ if (! TARGET_POWER)
+ fixed_regs[64] = 1;
+
+ /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
+ if (TARGET_64BIT)
+ fixed_regs[13] = call_used_regs[13]
+ = call_really_used_regs[13] = 1;
+
+ /* Conditionally disable FPRs. */
+ if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ for (i = 32; i < 64; i++)
+ fixed_regs[i] = call_used_regs[i]
+ = call_really_used_regs[i] = 1;
+
+ /* The TOC register is not killed across calls in a way that is
+ visible to the compiler. */
+ if (DEFAULT_ABI == ABI_AIX)
+ call_really_used_regs[2] = 0;
+
+ if (DEFAULT_ABI == ABI_V4
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && flag_pic == 2)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (DEFAULT_ABI == ABI_V4
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && flag_pic == 1)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (DEFAULT_ABI == ABI_DARWIN
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (TARGET_TOC && TARGET_MINIMAL_TOC)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (TARGET_ALTIVEC)
+ global_regs[VSCR_REGNO] = 1;
+
+ if (TARGET_SPE)
+ {
+ global_regs[SPEFSCR_REGNO] = 1;
+ fixed_regs[FIXED_SCRATCH]
+ = call_used_regs[FIXED_SCRATCH]
+ = call_really_used_regs[FIXED_SCRATCH] = 1;
+ }
+
+ if (! TARGET_ALTIVEC)
+ {
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
+ call_really_used_regs[VRSAVE_REGNO] = 1;
+ }
+
+ if (TARGET_ALTIVEC_ABI)
+ for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
+ call_used_regs[i] = call_really_used_regs[i] = 1;
+}
+
+/* Try to output insns to set TARGET equal to the constant C if it can
+ be done in less than N insns. Do all computations in MODE.
+ Returns the place where the output has been placed if it can be
+ done and the insns have been emitted. If it would take more than N
+ insns, zero is returned and no insns and emitted. */
+
+rtx
+rs6000_emit_set_const (rtx dest, enum machine_mode mode,
+ rtx source, int n ATTRIBUTE_UNUSED)
+{
+ rtx result, insn, set;
+ HOST_WIDE_INT c0, c1;
+
+ switch (mode)
+ {
+ case QImode:
+ case HImode:
+ if (dest == NULL)
+ dest = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, source));
+ return dest;
+
+ case SImode:
+ result = no_new_pseudos ? dest : gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, result,
+ GEN_INT (INTVAL (source)
+ & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IOR (SImode, result,
+ GEN_INT (INTVAL (source) & 0xffff))));
+ result = dest;
+ break;
+
+ case DImode:
+ switch (GET_CODE (source))
+ {
+ case CONST_INT:
+ c0 = INTVAL (source);
+ c1 = -(c0 < 0);
+ break;
+
+ case CONST_DOUBLE:
+#if HOST_BITS_PER_WIDE_INT >= 64
+ c0 = CONST_DOUBLE_LOW (source);
+ c1 = -(c0 < 0);
+#else
+ c0 = CONST_DOUBLE_LOW (source);
+ c1 = CONST_DOUBLE_HIGH (source);
+#endif
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ result = rs6000_emit_set_long_const (dest, c0, c1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ insn = get_last_insn ();
+ set = single_set (insn);
+ if (! CONSTANT_P (SET_SRC (set)))
+ set_unique_reg_note (insn, REG_EQUAL, source);
+
+ return result;
+}
+
+/* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
+ fall back to a straight forward decomposition. We do this to avoid
+ exponential run times encountered when looking for longer sequences
+ with rs6000_emit_set_const. */
+static rtx
+rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
+{
+ if (!TARGET_POWERPC64)
+ {
+ rtx operand1, operand2;
+
+ operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operand2 = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
+ DImode);
+ emit_move_insn (operand1, GEN_INT (c1));
+ emit_move_insn (operand2, GEN_INT (c2));
+ }
+ else
+ {
+ HOST_WIDE_INT ud1, ud2, ud3, ud4;
+
+ ud1 = c1 & 0xffff;
+ ud2 = (c1 & 0xffff0000) >> 16;
+#if HOST_BITS_PER_WIDE_INT >= 64
+ c2 = c1 >> 32;
+#endif
+ ud3 = c2 & 0xffff;
+ ud4 = (c2 & 0xffff0000) >> 16;
+
+ if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
+ || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
+ {
+ if (ud1 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
+ else
+ emit_move_insn (dest, GEN_INT (ud1));
+ }
+
+ else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
+ || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
+ {
+ if (ud2 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud2 << 16));
+ if (ud1 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ else if ((ud4 == 0xffff && (ud3 & 0x8000))
+ || (ud4 == 0 && ! (ud3 & 0x8000)))
+ {
+ if (ud3 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud3 << 16));
+
+ if (ud2 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud2)));
+ emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (16)));
+ if (ud1 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ else
+ {
+ if (ud4 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud4 << 16));
+
+ if (ud3 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud3)));
+
+ emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
+ if (ud2 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
+ GEN_INT (ud2 << 16)));
+ if (ud1 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ }
+ return dest;
+}
+
+/* Helper for the following. Get rid of [r+r] memory refs
+ in cases where it won't work (TImode, TFmode). */
+
+static void
+rs6000_eliminate_indexed_memrefs (rtx operands[2])
+{
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (XEXP (operands[0], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0))
+ && ! reload_in_progress)
+ operands[0]
+ = replace_equiv_address (operands[0],
+ copy_addr_to_reg (XEXP (operands[0], 0)));
+
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0))
+ && ! reload_in_progress)
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_addr_to_reg (XEXP (operands[1], 0)));
+}
+
+/* Emit a move from SOURCE to DEST in mode MODE. */
+void
+rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
+{
+ rtx operands[2];
+ operands[0] = dest;
+ operands[1] = source;
+
+ /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && ! FLOAT_MODE_P (mode)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ /* FIXME. This should never happen. */
+ /* Since it seems that it does, do the safe thing and convert
+ to a CONST_INT. */
+ operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
+ }
+ gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
+ || FLOAT_MODE_P (mode)
+ || ((CONST_DOUBLE_HIGH (operands[1]) != 0
+ || CONST_DOUBLE_LOW (operands[1]) < 0)
+ && (CONST_DOUBLE_HIGH (operands[1]) != -1
+ || CONST_DOUBLE_LOW (operands[1]) >= 0)));
+
+ /* Check if GCC is setting up a block move that will end up using FP
+ registers as temporaries. We must make sure this is acceptable. */
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM
+ && mode == DImode
+ && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
+ || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
+ && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
+ ? 32 : MEM_ALIGN (operands[0])))
+ || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
+ ? 32
+ : MEM_ALIGN (operands[1]))))
+ && ! MEM_VOLATILE_P (operands [0])
+ && ! MEM_VOLATILE_P (operands [1]))
+ {
+ emit_move_insn (adjust_address (operands[0], SImode, 0),
+ adjust_address (operands[1], SImode, 0));
+ emit_move_insn (adjust_address (operands[0], SImode, 4),
+ adjust_address (operands[1], SImode, 4));
+ return;
+ }
+
+ if (!no_new_pseudos && GET_CODE (operands[0]) == MEM
+ && !gpc_reg_operand (operands[1], mode))
+ operands[1] = force_reg (mode, operands[1]);
+
+ if (mode == SFmode && ! TARGET_POWERPC
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && GET_CODE (operands[0]) == MEM)
+ {
+ int regnum;
+
+ if (reload_in_progress || reload_completed)
+ regnum = true_regnum (operands[1]);
+ else if (GET_CODE (operands[1]) == REG)
+ regnum = REGNO (operands[1]);
+ else
+ regnum = -1;
+
+ /* If operands[1] is a register, on POWER it may have
+ double-precision data in it, so truncate it to single
+ precision. */
+ if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx newreg;
+ newreg = (no_new_pseudos ? operands[1] : gen_reg_rtx (mode));
+ emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
+ operands[1] = newreg;
+ }
+ }
+
+ /* Recognize the case where operand[1] is a reference to thread-local
+ data and load its address to a register. */
+ if (rs6000_tls_referenced_p (operands[1]))
+ {
+ enum tls_model model;
+ rtx tmp = operands[1];
+ rtx addend = NULL;
+
+ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
+ {
+ addend = XEXP (XEXP (tmp, 0), 1);
+ tmp = XEXP (XEXP (tmp, 0), 0);
+ }
+
+ gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
+ model = SYMBOL_REF_TLS_MODEL (tmp);
+ gcc_assert (model != 0);
+
+ tmp = rs6000_legitimize_tls_address (tmp, model);
+ if (addend)
+ {
+ tmp = gen_rtx_PLUS (mode, tmp, addend);
+ tmp = force_operand (tmp, operands[0]);
+ }
+ operands[1] = tmp;
+ }
+
+ /* Handle the case where reload calls us with an invalid address. */
+ if (reload_in_progress && mode == Pmode
+ && (! general_operand (operands[1], mode)
+ || ! nonimmediate_operand (operands[0], mode)))
+ goto emit_set;
+
+ /* 128-bit constant floating-point values on Darwin should really be
+ loaded as two parts. */
+ if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
+ && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
+ know how to get a DFmode SUBREG of a TFmode. */
+ rs6000_emit_move (simplify_gen_subreg (DImode, operands[0], mode, 0),
+ simplify_gen_subreg (DImode, operands[1], mode, 0),
+ DImode);
+ rs6000_emit_move (simplify_gen_subreg (DImode, operands[0], mode,
+ GET_MODE_SIZE (DImode)),
+ simplify_gen_subreg (DImode, operands[1], mode,
+ GET_MODE_SIZE (DImode)),
+ DImode);
+ return;
+ }
+
+ /* FIXME: In the long term, this switch statement should go away
+ and be replaced by a sequence of tests based on things like
+ mode == Pmode. */
+ switch (mode)
+ {
+ case HImode:
+ case QImode:
+ if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != CONST_INT)
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case TFmode:
+ rs6000_eliminate_indexed_memrefs (operands);
+ /* fall through */
+
+ case DFmode:
+ case SFmode:
+ if (CONSTANT_P (operands[1])
+ && ! easy_fp_constant (operands[1], mode))
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case V16QImode:
+ case V8HImode:
+ case V4SFmode:
+ case V4SImode:
+ case V4HImode:
+ case V2SFmode:
+ case V2SImode:
+ case V1DImode:
+ if (CONSTANT_P (operands[1])
+ && !easy_vector_constant (operands[1], mode))
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case SImode:
+ case DImode:
+ /* Use default pattern for address of ELF small data */
+ if (TARGET_ELF
+ && mode == Pmode
+ && DEFAULT_ABI == ABI_V4
+ && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)
+ && small_data_operand (operands[1], mode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+
+ if (DEFAULT_ABI == ABI_V4
+ && mode == Pmode && mode == SImode
+ && flag_pic == 1 && got_operand (operands[1], mode))
+ {
+ emit_insn (gen_movsi_got (operands[0], operands[1]));
+ return;
+ }
+
+ if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
+ && TARGET_NO_TOC
+ && ! flag_pic
+ && mode == Pmode
+ && CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != CONST_INT)
+ {
+ rtx target = (no_new_pseudos ? operands[0] : gen_reg_rtx (mode));
+
+ /* If this is a function address on -mcall-aixdesc,
+ convert it to the address of the descriptor. */
+ if (DEFAULT_ABI == ABI_AIX
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && XSTR (operands[1], 0)[0] == '.')
+ {
+ const char *name = XSTR (operands[1], 0);
+ rtx new_ref;
+ while (*name == '.')
+ name++;
+ new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
+ CONSTANT_POOL_ADDRESS_P (new_ref)
+ = CONSTANT_POOL_ADDRESS_P (operands[1]);
+ SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
+ SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
+ SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
+ operands[1] = new_ref;
+ }
+
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+#if TARGET_MACHO
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ {
+ /* Take care of any required data indirection. */
+ operands[1] = rs6000_machopic_legitimize_pic_address (
+ operands[1], mode, operands[0]);
+ if (operands[0] != operands[1])
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0], operands[1]));
+ return;
+ }
+#endif
+ emit_insn (gen_macho_high (target, operands[1]));
+ emit_insn (gen_macho_low (operands[0], target, operands[1]));
+ return;
+ }
+
+ emit_insn (gen_elf_high (target, operands[1]));
+ emit_insn (gen_elf_low (operands[0], target, operands[1]));
+ return;
+ }
+
+ /* If this is a SYMBOL_REF that refers to a constant pool entry,
+ and we have put it in the TOC, we just need to make a TOC-relative
+ reference to it. */
+ if (TARGET_TOC
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && constant_pool_expr_p (operands[1])
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
+ get_pool_mode (operands[1])))
+ {
+ operands[1] = create_TOC_reference (operands[1]);
+ }
+ else if (mode == Pmode
+ && CONSTANT_P (operands[1])
+ && ((GET_CODE (operands[1]) != CONST_INT
+ && ! easy_fp_constant (operands[1], mode))
+ || (GET_CODE (operands[1]) == CONST_INT
+ && num_insns_constant (operands[1], mode) > 2)
+ || (GET_CODE (operands[0]) == REG
+ && FP_REGNO_P (REGNO (operands[0]))))
+ && GET_CODE (operands[1]) != HIGH
+ && ! legitimate_constant_pool_address_p (operands[1])
+ && ! toc_relative_expr_p (operands[1]))
+ {
+ /* Emit a USE operation so that the constant isn't deleted if
+ expensive optimizations are turned on because nobody
+ references it. This should only be done for operands that
+ contain SYMBOL_REFs with CONSTANT_POOL_ADDRESS_P set.
+ This should not be done for operands that contain LABEL_REFs.
+ For now, we just handle the obvious case. */
+ if (GET_CODE (operands[1]) != LABEL_REF)
+ emit_insn (gen_rtx_USE (VOIDmode, operands[1]));
+
+#if TARGET_MACHO
+ /* Darwin uses a special PIC legitimizer. */
+ if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
+ {
+ /* APPLE LOCAL begin radar 4232296 */
+ /* If a symbol node has been generated but its flags not set; such as in the course of
+ cost computation of generated code, do not attempt to update the static tables which
+ rely on flags of the referenced symbol to have been set. Otherwise, bogus PIC stub
+ will be generated. */
+ if (!(GET_CODE (operands[1]) == SYMBOL_REF && SYMBOL_REF_FLAGS (operands[1]) == 0))
+ operands[1] =
+ rs6000_machopic_legitimize_pic_address (operands[1], mode,
+ operands[0]);
+ /* APPLE LOCAL end radar 4232296 */
+ if (operands[0] != operands[1])
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+#endif
+
+ /* If we are to limit the number of things we put in the TOC and
+ this is a symbol plus a constant we can add in one insn,
+ just put the symbol in the TOC and add the constant. Don't do
+ this if reload is in progress. */
+ if (GET_CODE (operands[1]) == CONST
+ && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
+ && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
+ && ! side_effects_p (operands[0]))
+ {
+ rtx sym =
+ force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
+ rtx other = XEXP (XEXP (operands[1], 0), 1);
+
+ sym = force_reg (mode, sym);
+ if (mode == SImode)
+ emit_insn (gen_addsi3 (operands[0], sym, other));
+ else
+ emit_insn (gen_adddi3 (operands[0], sym, other));
+ return;
+ }
+
+ operands[1] = force_const_mem (mode, operands[1]);
+
+ if (TARGET_TOC
+ && constant_pool_expr_p (XEXP (operands[1], 0))
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
+ get_pool_constant (XEXP (operands[1], 0)),
+ get_pool_mode (XEXP (operands[1], 0))))
+ {
+ operands[1]
+ = gen_const_mem (mode,
+ create_TOC_reference (XEXP (operands[1], 0)));
+ set_mem_alias_set (operands[1], get_TOC_alias_set ());
+ }
+ }
+ break;
+
+ case TImode:
+ rs6000_eliminate_indexed_memrefs (operands);
+
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ operands[0], operands[1]),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_SCRATCH (SImode)))));
+ return;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Above, we may have called force_const_mem which may have returned
+ an invalid address. If we can, fix this up; otherwise, reload will
+ have to deal with it. */
+ if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
+ operands[1] = validize_mem (operands[1]);
+
+ emit_set:
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+}
+
+/* Nonzero if we can use a floating-point register to pass this arg. */
+#define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
+ (SCALAR_FLOAT_MODE_P (MODE) \
+ && !DECIMAL_FLOAT_MODE_P (MODE) \
+ && (CUM)->fregno <= FP_ARG_MAX_REG \
+ && TARGET_HARD_FLOAT && TARGET_FPRS)
+
+/* Nonzero if we can use an AltiVec register to pass this arg. */
+#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
+ (ALTIVEC_VECTOR_MODE (MODE) \
+ && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
+ && TARGET_ALTIVEC_ABI \
+ && (NAMED))
+
+/* Return a nonzero value to say to return the function value in
+ memory, just as large structures are always returned. TYPE will be
+ the data type of the value, and FNTYPE will be the type of the
+ function doing the returning, or @code{NULL} for libcalls.
+
+ The AIX ABI for the RS/6000 specifies that all structures are
+ returned in memory. The Darwin ABI does the same. The SVR4 ABI
+ specifies that structures <= 8 bytes are returned in r3/r4, but a
+ draft put them in memory, and GCC used to implement the draft
+ instead of the final standard. Therefore, aix_struct_return
+ controls this instead of DEFAULT_ABI; V.4 targets needing backward
+ compatibility can change DRAFT_V4_STRUCT_RET to override the
+ default, and -m switches get the final word. See
+ rs6000_override_options for more details.
+
+ The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
+ long double support is enabled. These values are returned in memory.
+
+ int_size_in_bytes returns -1 for variable size objects, which go in
+ memory always. The cast to unsigned makes -1 > 8. */
+
+static bool
+rs6000_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
+{
+ /* In the darwin64 abi, try to use registers for larger structs
+ if possible. */
+ if (rs6000_darwin64_abi
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed
+ as an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
+ if (valret)
+ return false;
+ /* Otherwise fall through to more conventional ABI rules. */
+ }
+
+ if (AGGREGATE_TYPE_P (type)
+ && (aix_struct_return
+ || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
+ return true;
+
+ /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
+ modes only exist for GCC vector types if -maltivec. */
+ if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
+ return false;
+
+ /* Return synthetic vectors in memory. */
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
+ {
+ static bool warned_for_return_big_vectors = false;
+ if (!warned_for_return_big_vectors)
+ {
+ warning (0, "GCC vector returned by reference: "
+ "non-standard ABI extension with no compatibility guarantee");
+ warned_for_return_big_vectors = true;
+ }
+ return true;
+ }
+
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
+ return true;
+
+ return false;
+}
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ For incoming args we set the number of arguments in the prototype large
+ so we never return a PARALLEL. */
+
+void
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+ rtx libname ATTRIBUTE_UNUSED, int incoming,
+ int libcall, int n_named_args)
+{
+ static CUMULATIVE_ARGS zero_cumulative;
+
+ *cum = zero_cumulative;
+ cum->words = 0;
+ cum->fregno = FP_ARG_MIN_REG;
+ cum->vregno = ALTIVEC_ARG_MIN_REG;
+ cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
+ cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
+ ? CALL_LIBCALL : CALL_NORMAL);
+ cum->sysv_gregno = GP_ARG_MIN_REG;
+ cum->stdarg = fntype
+ && (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ cum->nargs_prototype = 0;
+ if (incoming || cum->prototype)
+ cum->nargs_prototype = n_named_args;
+
+ /* Check for a longcall attribute. */
+ if ((!fntype && rs6000_default_long_calls)
+ || (fntype
+ && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
+ && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
+ cum->call_cookie |= CALL_LONG;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "\ninit_cumulative_args:");
+ if (fntype)
+ {
+ tree ret_type = TREE_TYPE (fntype);
+ fprintf (stderr, " ret code = %s,",
+ tree_code_name[ (int)TREE_CODE (ret_type) ]);
+ }
+
+ if (cum->call_cookie & CALL_LONG)
+ fprintf (stderr, " longcall,");
+
+ fprintf (stderr, " proto = %d, nargs = %d\n",
+ cum->prototype, cum->nargs_prototype);
+ }
+
+ if (fntype
+ && !TARGET_ALTIVEC
+ && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
+ {
+ error ("cannot return value in vector register because"
+ " altivec instructions are disabled, use -maltivec"
+ " to enable them");
+ }
+}
+
+/* Return true if TYPE must be passed on the stack and not in registers. */
+
+static bool
+rs6000_must_pass_in_stack (enum machine_mode mode, tree type)
+{
+ if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
+ return must_pass_in_stack_var_size (mode, type);
+ else
+ return must_pass_in_stack_var_size_or_pad (mode, type);
+}
+
+/* If defined, a C expression which determines whether, and in which
+ direction, to pad out an argument with extra space. The value
+ should be of type `enum direction': either `upward' to pad above
+ the argument, `downward' to pad below, or `none' to inhibit
+ padding.
+
+ For the AIX ABI structs are always stored left shifted in their
+ argument slot. */
+
+enum direction
+function_arg_padding (enum machine_mode mode, tree type)
+{
+#ifndef AGGREGATE_PADDING_FIXED
+#define AGGREGATE_PADDING_FIXED 0
+#endif
+#ifndef AGGREGATES_PAD_UPWARD_ALWAYS
+#define AGGREGATES_PAD_UPWARD_ALWAYS 0
+#endif
+
+ if (!AGGREGATE_PADDING_FIXED)
+ {
+ /* GCC used to pass structures of the same size as integer types as
+ if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
+ i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
+ passed padded downward, except that -mstrict-align further
+ muddied the water in that multi-component structures of 2 and 4
+ bytes in size were passed padded upward.
+
+ The following arranges for best compatibility with previous
+ versions of gcc, but removes the -mstrict-align dependency. */
+ if (BYTES_BIG_ENDIAN)
+ {
+ HOST_WIDE_INT size = 0;
+
+ if (mode == BLKmode)
+ {
+ if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ size = int_size_in_bytes (type);
+ }
+ else
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 1 || size == 2 || size == 4)
+ return downward;
+ }
+ return upward;
+ }
+
+ if (AGGREGATES_PAD_UPWARD_ALWAYS)
+ {
+ if (type != 0 && AGGREGATE_TYPE_P (type))
+ return upward;
+ }
+
+ /* Fall back to the default. */
+ return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
+}
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments.
+
+ V.4 wants long longs and doubles to be double word aligned. Just
+ testing the mode size is a boneheaded way to do this as it means
+ that other types such as complex int are also double word aligned.
+ However, we're stuck with this because changing the ABI might break
+ existing library interfaces.
+
+ Doubleword align SPE vectors.
+ Quadword align Altivec vectors.
+ Quadword align large synthetic vector types. */
+
+int
+function_arg_boundary (enum machine_mode mode, tree type)
+{
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_MODE_SIZE (mode) == 8
+ || (TARGET_HARD_FLOAT
+ && TARGET_FPRS
+ && mode == TFmode)))
+ return 64;
+ else if (SPE_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) >= 8
+ && int_size_in_bytes (type) < 16))
+ return 64;
+ else if (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) >= 16))
+ return 128;
+ else if (rs6000_darwin64_abi && mode == BLKmode
+ && type && TYPE_ALIGN (type) > 64)
+ return 128;
+ else
+ return PARM_BOUNDARY;
+}
+
+/* For a function parm of MODE and TYPE, return the starting word in
+ the parameter area. NWORDS of the parameter area are already used. */
+
+static unsigned int
+rs6000_parm_start (enum machine_mode mode, tree type, unsigned int nwords)
+{
+ unsigned int align;
+ unsigned int parm_offset;
+
+ align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
+ parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
+ return nwords + (-(parm_offset + nwords) & align);
+}
+
+/* Compute the size (in words) of a function argument. */
+
+static unsigned long
+rs6000_arg_size (enum machine_mode mode, tree type)
+{
+ unsigned long size;
+
+ if (mode != BLKmode)
+ size = GET_MODE_SIZE (mode);
+ else
+ size = int_size_in_bytes (type);
+
+ if (TARGET_32BIT)
+ return (size + 3) >> 2;
+ else
+ return (size + 7) >> 3;
+}
+
+/* Use this to flush pending int fields. */
+
+static void
+rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ HOST_WIDE_INT bitpos, int final)
+{
+ unsigned int startbit, endbit;
+ int intregs, intoffset;
+ enum machine_mode mode;
+
+ /* APPLE LOCAL begin fix 64-bit varargs 4028089 */
+ /* Handle the situations where a float is taking up the first half
+ of the GPR, and the other half is empty (typically due to
+ alignment restrictions). We can detect this by a 8-byte-aligned
+ int field, or by seeing that this is the final flush for this
+ argument. Count the word and continue on. */
+ if (cum->floats_in_gpr == 1
+ && (cum->intoffset % 64 == 0
+ || (cum->intoffset == -1 && final)))
+ {
+ cum->words++;
+ cum->floats_in_gpr = 0;
+ }
+ /* APPLE LOCAL end fix 64-bit varargs 4028089 */
+
+ if (cum->intoffset == -1)
+ return;
+
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ cum->floats_in_gpr = 0;
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ }
+ }
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ cum->words += intregs;
+ /* APPLE LOCAL begin ppc64 abi */
+ /* words should be unsigned. */
+ if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
+ {
+ int pad = (endbit/BITS_PER_WORD) - cum->words;
+ cum->words += pad;
+ }
+ /* APPLE LOCAL end ppc64 abi */
+}
+
+/* The darwin64 ABI calls for us to recurse down through structs,
+ looking for elements passed in registers. Unfortunately, we have
+ to track int register count here also because of misalignments
+ in powerpc alignment mode. */
+
+static void
+rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
+ tree type,
+ HOST_WIDE_INT startbitpos)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode;
+ if (ftype == error_mark_node)
+ continue;
+ mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
+ else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ /* APPLE LOCAL begin fix 64-bit varargs 4028089 */
+ /* Single-precision floats present a special problem for
+ us, because they are smaller than an 8-byte GPR, and so
+ the structure-packing rules combined with the standard
+ varargs behavior mean that we want to pack float/float
+ and float/int combinations into a single register's
+ space. This is complicated by the arg advance flushing,
+ which works on arbitrarily large groups of int-type
+ fields. */
+ if (mode == SFmode)
+ {
+ if (cum->floats_in_gpr == 1)
+ {
+ /* Two floats in a word; count the word and reset
+ the float count. */
+ cum->words++;
+ cum->floats_in_gpr = 0;
+ }
+ else if (bitpos % 64 == 0)
+ {
+ /* A float at the beginning of an 8-byte word;
+ count it and put off adjusting cum->words until
+ we see if a arg advance flush is going to do it
+ for us. */
+ cum->floats_in_gpr++;
+ }
+ else
+ {
+ /* The float is at the end of a word, preceded
+ by integer fields, so the arg advance flush
+ just above has already set cum->words and
+ everything is taken care of. */
+ }
+ }
+ else
+ /* APPLE LOCAL end fix 64-bit varargs 4028089 */
+ cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
+ }
+ else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
+ {
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
+ cum->vregno++;
+ cum->words += 2;
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.)
+
+ Note that for args passed by reference, function_arg will be called
+ with MODE and TYPE set to that of the pointer to the arg, not the arg
+ itself. */
+
+void
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named, int depth)
+{
+ int size;
+
+ /* Only tick off an argument if we're not recursing. */
+ if (depth == 0)
+ cum->nargs_prototype--;
+
+ if (TARGET_ALTIVEC_ABI
+ && (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) == 16)))
+ {
+ bool stack = false;
+
+ if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
+ {
+ cum->vregno++;
+ if (!TARGET_ALTIVEC)
+ error ("cannot pass argument in vector register because"
+ " altivec instructions are disabled, use -maltivec"
+ " to enable them");
+
+ /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
+ even if it is going to be passed in a vector register.
+ Darwin does the same for variable-argument functions. */
+ if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
+ || (cum->stdarg && DEFAULT_ABI != ABI_V4))
+ stack = true;
+ }
+ else
+ stack = true;
+
+ if (stack)
+ {
+ int align;
+
+ /* Vector parameters must be 16-byte aligned. This places
+ them at 2 mod 4 in terms of words in 32-bit mode, since
+ the parameter save area starts at offset 24 from the
+ stack. In 64-bit mode, they just have to start on an
+ even word, since the parameter save area is 16-byte
+ aligned. Space for GPRs is reserved even if the argument
+ will be passed in memory. */
+ if (TARGET_32BIT)
+ align = (2 - cum->words) & 3;
+ else
+ align = cum->words & 1;
+ cum->words += align + rs6000_arg_size (mode, type);
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, align=%d, ",
+ cum->words, align);
+ fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
+ cum->nargs_prototype, cum->prototype,
+ GET_MODE_NAME (mode));
+ }
+ }
+ }
+ else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
+ && !cum->stdarg
+ && cum->sysv_gregno <= GP_ARG_MAX_REG)
+ cum->sysv_gregno++;
+
+ else if (rs6000_darwin64_abi
+ && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && (size = int_size_in_bytes (type)) > 0)
+ {
+ /* Variable sized types have size == -1 and are
+ treated as if consisting entirely of ints.
+ Pad to 16 byte boundary if needed. */
+ if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+ /* For varargs, we can just go up by the size of the struct. */
+ if (!named)
+ cum->words += (size + 7) / 8;
+ else
+ {
+ /* It is tempting to say int register count just goes up by
+ sizeof(type)/8, but this is wrong in a case such as
+ { int; double; int; } [powerpc alignment]. We have to
+ grovel through the fields for these too. */
+ cum->intoffset = 0;
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ cum->floats_in_gpr = 0;
+ rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
+ rs6000_darwin64_record_arg_advance_flush (cum,
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ size * BITS_PER_UNIT, 1);
+ }
+ }
+ else if (DEFAULT_ABI == ABI_V4)
+ {
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && (mode == SFmode || mode == DFmode
+ || (mode == TFmode && !TARGET_IEEEQUAD)))
+ {
+ if (cum->fregno + (mode == TFmode ? 1 : 0) <= FP_ARG_V4_MAX_REG)
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ else
+ {
+ cum->fregno = FP_ARG_V4_MAX_REG + 1;
+ if (mode == DFmode || mode == TFmode)
+ cum->words += cum->words & 1;
+ cum->words += rs6000_arg_size (mode, type);
+ }
+ }
+ else
+ {
+ int n_words = rs6000_arg_size (mode, type);
+ int gregno = cum->sysv_gregno;
+
+ /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
+ (r7,r8) or (r9,r10). As does any other 2 word item such
+ as complex int due to a historical mistake. */
+ if (n_words == 2)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ {
+ /* Long long and SPE vectors are aligned on the stack.
+ So are other 2 word items such as complex int due to
+ a historical mistake. */
+ if (n_words == 2)
+ cum->words += cum->words & 1;
+ cum->words += n_words;
+ }
+
+ /* Note: continuing to accumulate gregno past when we've started
+ spilling to the stack indicates the fact that we've started
+ spilling to the stack to expand_builtin_saveregs. */
+ cum->sysv_gregno = gregno + n_words;
+ }
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
+ cum->words, cum->fregno);
+ fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
+ cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
+ fprintf (stderr, "mode = %4s, named = %d\n",
+ GET_MODE_NAME (mode), named);
+ }
+ }
+ else
+ {
+ int n_words = rs6000_arg_size (mode, type);
+ int start_words = cum->words;
+ int align_words = rs6000_parm_start (mode, type, start_words);
+
+ cum->words = align_words + n_words;
+
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && !DECIMAL_FLOAT_MODE_P (mode)
+ && TARGET_HARD_FLOAT && TARGET_FPRS)
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
+ cum->words, cum->fregno);
+ fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
+ cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
+ fprintf (stderr, "named = %d, align = %d, depth = %d\n",
+ named, align_words - start_words, depth);
+ }
+ }
+}
+
+static rtx
+spe_build_register_parallel (enum machine_mode mode, int gregno)
+{
+ rtx r1, r3;
+
+ switch (mode)
+ {
+ case DFmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
+
+ case DCmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ r3 = gen_rtx_REG (DImode, gregno + 2);
+ r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Determine where to put a SIMD argument on the SPE. */
+static rtx
+rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type)
+{
+ int gregno = cum->sysv_gregno;
+
+ /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
+ are passed and returned in a pair of GPRs for ABI compatibility. */
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == DCmode))
+ {
+ int n_words = rs6000_arg_size (mode, type);
+
+ /* Doubles go in an odd/even register pair (r5/r6, etc). */
+ if (mode == DFmode)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ return NULL_RTX;
+
+ return spe_build_register_parallel (mode, gregno);
+ }
+ if (cum->stdarg)
+ {
+ int n_words = rs6000_arg_size (mode, type);
+
+ /* SPE vectors are put in odd registers. */
+ if (n_words == 2 && (gregno & 1) == 0)
+ gregno += 1;
+
+ if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
+ {
+ rtx r1, r2;
+ enum machine_mode m = SImode;
+
+ r1 = gen_rtx_REG (m, gregno);
+ r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
+ r2 = gen_rtx_REG (m, gregno + 1);
+ r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
+ }
+ else
+ return NULL_RTX;
+ }
+ else
+ {
+ if (gregno <= GP_ARG_MAX_REG)
+ return gen_rtx_REG (mode, gregno);
+ else
+ return NULL_RTX;
+ }
+}
+
+/* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
+ structure between cum->intoffset and bitpos to integer registers. */
+
+static void
+rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos, rtx rvec[], int *k)
+{
+ enum machine_mode mode;
+ unsigned int regno;
+ unsigned int startbit, endbit;
+ int this_regno, intregs, intoffset;
+ rtx reg;
+
+ if (cum->intoffset == -1)
+ return;
+
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ /* If this is the trailing part of a word, try to only load that
+ much into the register. Otherwise load the whole register. Note
+ that in the latter case we may pick up unwanted bits. It's not a
+ problem at the moment but may wish to revisit. */
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ mode = word_mode;
+ }
+ }
+ else
+ mode = word_mode;
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ this_regno = cum->words + intoffset / BITS_PER_WORD;
+
+ if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
+ cum->use_stack = 1;
+
+ intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
+ if (intregs <= 0)
+ return;
+
+ intoffset /= BITS_PER_UNIT;
+ do
+ {
+ regno = GP_ARG_MIN_REG + this_regno;
+ reg = gen_rtx_REG (mode, regno);
+ rvec[(*k)++] =
+ gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
+
+ this_regno += 1;
+ intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
+ mode = word_mode;
+ intregs -= 1;
+ }
+ while (intregs > 0);
+}
+
+/* Recursive workhorse for the following. */
+
+static void
+rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, tree type,
+ HOST_WIDE_INT startbitpos, rtx rvec[],
+ int *k)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode;
+ if (ftype == error_mark_node)
+ continue;
+ mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
+ else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+#if 0
+ switch (mode)
+ {
+ case SCmode: mode = SFmode; break;
+ case DCmode: mode = DFmode; break;
+ case TCmode: mode = TFmode; break;
+ default: break;
+ }
+#endif
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->fregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ if (mode == TFmode)
+ cum->fregno++;
+ }
+ else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
+ {
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
+
+/* For the darwin64 ABI, we want to construct a PARALLEL consisting of
+ the register(s) to be used for each field and subfield of a struct
+ being passed by value, along with the offset of where the
+ register's value may be found in the block. FP fields go in FP
+ register, vector fields go in vector registers, and everything
+ else goes in int registers, packed as in memory.
+
+ This code is also used for function return values. RETVAL indicates
+ whether this is the case.
+
+ Much of this is taken from the SPARC V9 port, which has a similar
+ calling convention. */
+
+static rtx
+rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, tree type,
+ int named, bool retval)
+{
+ rtx rvec[FIRST_PSEUDO_REGISTER];
+ int k = 1, kbase = 1;
+ HOST_WIDE_INT typesize = int_size_in_bytes (type);
+ /* This is a copy; modifications are not visible to our caller. */
+ CUMULATIVE_ARGS copy_cum = *orig_cum;
+ CUMULATIVE_ARGS *cum = &copy_cum;
+
+ /* Pad to 16 byte boundary if needed. */
+ if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+
+ cum->intoffset = 0;
+ cum->use_stack = 0;
+ /* APPLE LOCAL fix 64-bit varargs 4028089 */
+ cum->floats_in_gpr = 0;
+ cum->named = named;
+
+ /* Put entries into rvec[] for individual FP and vector fields, and
+ for the chunks of memory that go in int regs. Note we start at
+ element 1; 0 is reserved for an indication of using memory, and
+ may or may not be filled in below. */
+ rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
+ rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
+
+ /* If any part of the struct went on the stack put all of it there.
+ This hack is because the generic code for
+ FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
+ parts of the struct are not at the beginning. */
+ if (cum->use_stack)
+ {
+ if (retval)
+ return NULL_RTX; /* doesn't go in registers at all */
+ kbase = 0;
+ rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ }
+ if (k > 1 || cum->use_stack)
+ return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
+ else
+ return NULL_RTX;
+}
+
+/* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
+
+static rtx
+rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
+{
+ int n_units;
+ int i, k;
+ rtx rvec[GP_ARG_NUM_REG + 1];
+
+ if (align_words >= GP_ARG_NUM_REG)
+ return NULL_RTX;
+
+ n_units = rs6000_arg_size (mode, type);
+
+ /* Optimize the simple case where the arg fits in one gpr, except in
+ the case of BLKmode due to assign_parms assuming that registers are
+ BITS_PER_WORD wide. */
+ if (n_units == 0
+ || (n_units == 1 && mode != BLKmode))
+ return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+
+ k = 0;
+ if (align_words + n_units > GP_ARG_NUM_REG)
+ /* Not all of the arg fits in gprs. Say that it goes in memory too,
+ using a magic NULL_RTX component.
+ This is not strictly correct. Only some of the arg belongs in
+ memory, not all of it. However, the normal scheme using
+ function_arg_partial_nregs can result in unusual subregs, eg.
+ (subreg:SI (reg:DF) 4), which are not handled well. The code to
+ store the whole arg to memory is often more efficient than code
+ to store pieces, and we know that space is available in the right
+ place for the whole arg. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+
+ i = 0;
+ do
+ {
+ rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
+ rtx off = GEN_INT (i++ * 4);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
+ }
+ while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
+
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called. It is
+ not modified in this routine.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On RS/6000 the first eight words of non-FP are normally in registers
+ and the rest are pushed. Under AIX, the first 13 FP args are in registers.
+ Under V.4, the first 8 FP args are in registers.
+
+ If this is floating-point and no prototype is specified, we use
+ both an FP and integer register (or possibly FP reg and stack). Library
+ functions (when CALL_LIBCALL is set) always have the proper types for args,
+ so we can pass the FP value just in one register. emit_library_function
+ doesn't support PARALLEL anyway.
+
+ Note that for args passed by reference, function_arg will be called
+ with MODE and TYPE set to that of the pointer to the arg, not the arg
+ itself. */
+
+rtx
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named)
+{
+ enum rs6000_abi abi = DEFAULT_ABI;
+
+ /* Return a marker to indicate whether CR1 needs to set or clear the
+ bit that V.4 uses to say fp args were passed in registers.
+ Assume that we don't need the marker for software floating point,
+ or compiler generated library calls. */
+ if (mode == VOIDmode)
+ {
+ if (abi == ABI_V4
+ && (cum->call_cookie & CALL_LIBCALL) == 0
+ && (cum->stdarg
+ || (cum->nargs_prototype < 0
+ && (cum->prototype || TARGET_NO_PROTOTYPE))))
+ {
+ /* For the SPE, we need to crxor CR6 always. */
+ if (TARGET_SPE_ABI)
+ return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
+ else if (TARGET_HARD_FLOAT && TARGET_FPRS)
+ return GEN_INT (cum->call_cookie
+ | ((cum->fregno == FP_ARG_MIN_REG)
+ ? CALL_V4_SET_FP_ARGS
+ : CALL_V4_CLEAR_FP_ARGS));
+ }
+
+ return GEN_INT (cum->call_cookie);
+ }
+
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE)
+ {
+ rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
+ if (rslt != NULL_RTX)
+ return rslt;
+ /* Else fall through to usual handling. */
+ }
+
+ if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
+ if (TARGET_64BIT && ! cum->prototype)
+ {
+ /* Vector parameters get passed in vector register
+ and also in GPRs or memory, in absence of prototype. */
+ int align_words;
+ rtx slot;
+ align_words = (cum->words + 1) & ~1;
+
+ if (align_words >= GP_ARG_NUM_REG)
+ {
+ slot = NULL_RTX;
+ }
+ else
+ {
+ slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ }
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ slot, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno),
+ const0_rtx)));
+ }
+ else
+ return gen_rtx_REG (mode, cum->vregno);
+ else if (TARGET_ALTIVEC_ABI
+ && (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) == 16)))
+ {
+ if (named || abi == ABI_V4)
+ return NULL_RTX;
+ else
+ {
+ /* Vector parameters to varargs functions under AIX or Darwin
+ get passed in memory and possibly also in GPRs. */
+ int align, align_words, n_words;
+ enum machine_mode part_mode;
+
+ /* Vector parameters must be 16-byte aligned. This places them at
+ 2 mod 4 in terms of words in 32-bit mode, since the parameter
+ save area starts at offset 24 from the stack. In 64-bit mode,
+ they just have to start on an even word, since the parameter
+ save area is 16-byte aligned. */
+ if (TARGET_32BIT)
+ align = (2 - cum->words) & 3;
+ else
+ align = cum->words & 1;
+ align_words = cum->words + align;
+
+ /* Out of registers? Memory, then. */
+ if (align_words >= GP_ARG_NUM_REG)
+ return NULL_RTX;
+
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type, align_words);
+
+ /* The vector value goes in GPRs. Only the part of the
+ value in GPRs is reported here. */
+ part_mode = mode;
+ n_words = rs6000_arg_size (mode, type);
+ if (align_words + n_words > GP_ARG_NUM_REG)
+ /* Fortunately, there are only two possibilities, the value
+ is either wholly in GPRs or half in GPRs and half not. */
+ part_mode = DImode;
+
+ return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
+ }
+ }
+ else if (TARGET_SPE_ABI && TARGET_SPE
+ && (SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DCmode))))
+ return rs6000_spe_function_arg (cum, mode, type);
+
+ else if (abi == ABI_V4)
+ {
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && (mode == SFmode || mode == DFmode
+ || (mode == TFmode && !TARGET_IEEEQUAD)))
+ {
+ if (cum->fregno + (mode == TFmode ? 1 : 0) <= FP_ARG_V4_MAX_REG)
+ return gen_rtx_REG (mode, cum->fregno);
+ else
+ return NULL_RTX;
+ }
+ else
+ {
+ int n_words = rs6000_arg_size (mode, type);
+ int gregno = cum->sysv_gregno;
+
+ /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
+ (r7,r8) or (r9,r10). As does any other 2 word item such
+ as complex int due to a historical mistake. */
+ if (n_words == 2)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ return NULL_RTX;
+
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type,
+ gregno - GP_ARG_MIN_REG);
+ return gen_rtx_REG (mode, gregno);
+ }
+ }
+ else
+ {
+ int align_words = rs6000_parm_start (mode, type, cum->words);
+
+ if (USE_FP_FOR_ARG_P (cum, mode, type))
+ {
+ rtx rvec[GP_ARG_NUM_REG + 1];
+ rtx r;
+ int k;
+ bool needs_psave;
+ enum machine_mode fmode = mode;
+ unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
+
+ if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
+ {
+ /* Currently, we only ever need one reg here because complex
+ doubles are split. */
+ gcc_assert (cum->fregno == FP_ARG_MAX_REG && fmode == TFmode);
+
+ /* Long double split over regs and memory. */
+ fmode = DFmode;
+ }
+
+ /* Do we also need to pass this arg in the parameter save
+ area? */
+ needs_psave = (type
+ && (cum->nargs_prototype <= 0
+ || (DEFAULT_ABI == ABI_AIX
+ && TARGET_XL_COMPAT
+ && align_words >= GP_ARG_NUM_REG)));
+
+ if (!needs_psave && mode == fmode)
+ return gen_rtx_REG (fmode, cum->fregno);
+
+ k = 0;
+ if (needs_psave)
+ {
+ /* Describe the part that goes in gprs or the stack.
+ This piece must come first, before the fprs. */
+ if (align_words < GP_ARG_NUM_REG)
+ {
+ unsigned long n_words = rs6000_arg_size (mode, type);
+
+ if (align_words + n_words > GP_ARG_NUM_REG
+ || (TARGET_32BIT && TARGET_POWERPC64))
+ {
+ /* If this is partially on the stack, then we only
+ include the portion actually in registers here. */
+ enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
+ rtx off;
+ int i = 0;
+ if (align_words + n_words > GP_ARG_NUM_REG)
+ /* Not all of the arg fits in gprs. Say that it
+ goes in memory too, using a magic NULL_RTX
+ component. Also see comment in
+ rs6000_mixed_function_arg for why the normal
+ function_arg_partial_nregs scheme doesn't work
+ in this case. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
+ const0_rtx);
+ do
+ {
+ r = gen_rtx_REG (rmode,
+ GP_ARG_MIN_REG + align_words);
+ off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
+ }
+ while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
+ }
+ else
+ {
+ /* The whole arg fits in gprs. */
+ r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
+ }
+ }
+ else
+ /* It's entirely in memory. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ }
+
+ /* Describe where this piece goes in the fprs. */
+ r = gen_rtx_REG (fmode, cum->fregno);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
+
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
+ }
+ else if (align_words < GP_ARG_NUM_REG)
+ {
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type, align_words);
+
+ if (mode == BLKmode)
+ mode = Pmode;
+
+ return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ }
+ else
+ return NULL_RTX;
+ }
+}
+
+/* For an arg passed partly in registers and partly in memory, this is
+ the number of bytes passed in registers. For args passed entirely in
+ registers or entirely in memory, zero. When an arg is described by a
+ PARALLEL, perhaps using more than one register type, this function
+ returns the number of bytes used by the first element of the PARALLEL. */
+
+static int
+rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named)
+{
+ int ret = 0;
+ int align_words;
+
+ if (DEFAULT_ABI == ABI_V4)
+ return 0;
+
+ if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
+ && cum->nargs_prototype >= 0)
+ return 0;
+
+ /* In this complicated case we just disable the partial_nregs code. */
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ return 0;
+
+ align_words = rs6000_parm_start (mode, type, cum->words);
+
+ if (USE_FP_FOR_ARG_P (cum, mode, type))
+ {
+ /* If we are passing this arg in the fixed parameter save area
+ (gprs or memory) as well as fprs, then this function should
+ return the number of partial bytes passed in the parameter
+ save area rather than partial bytes passed in fprs. */
+ if (type
+ && (cum->nargs_prototype <= 0
+ || (DEFAULT_ABI == ABI_AIX
+ && TARGET_XL_COMPAT
+ && align_words >= GP_ARG_NUM_REG)))
+ return 0;
+ else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
+ > FP_ARG_MAX_REG + 1)
+ ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
+ else if (cum->nargs_prototype >= 0)
+ return 0;
+ }
+
+ if (align_words < GP_ARG_NUM_REG
+ && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
+ ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
+
+ if (ret != 0 && TARGET_DEBUG_ARG)
+ fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
+
+ return ret;
+}
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type.
+
+ Under V.4, aggregates and long double are passed by reference.
+
+ As an extension to all 32-bit ABIs, AltiVec vectors are passed by
+ reference unless the AltiVec vector extension ABI is in force.
+
+ As an extension to all ABIs, variable sized types are passed by
+ reference. */
+
+static bool
+rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
+ return 1;
+ }
+
+ if (!type)
+ return 0;
+
+ if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
+ return 1;
+ }
+
+ if (int_size_in_bytes (type) < 0)
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
+ return 1;
+ }
+
+ /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
+ modes only exist for GCC vector types if -maltivec. */
+ if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
+ return 1;
+ }
+
+ /* Pass synthetic vectors in memory. */
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
+ {
+ static bool warned_for_pass_big_vectors = false;
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
+ if (!warned_for_pass_big_vectors)
+ {
+ warning (0, "GCC vector passed by reference: "
+ "non-standard ABI extension with no compatibility guarantee");
+ warned_for_pass_big_vectors = true;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+rs6000_move_block_from_reg (int regno, rtx x, int nregs)
+{
+ int i;
+ enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
+
+ if (nregs == 0)
+ return;
+
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
+ if (reload_completed)
+ {
+ if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
+ tem = NULL_RTX;
+ else
+ tem = simplify_gen_subreg (reg_mode, x, BLKmode,
+ i * GET_MODE_SIZE (reg_mode));
+ }
+ else
+ tem = replace_equiv_address (tem, XEXP (tem, 0));
+
+ gcc_assert (tem);
+
+ emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
+ }
+}
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
+
+static void
+setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS next_cum;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ rtx save_area = NULL_RTX, mem;
+ int first_reg_offset, set;
+
+ /* Skip the last named argument. */
+ next_cum = *cum;
+ function_arg_advance (&next_cum, mode, type, 1, 0);
+
+ if (DEFAULT_ABI == ABI_V4)
+ {
+ first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
+
+ if (! no_rtl)
+ {
+ int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
+ HOST_WIDE_INT offset = 0;
+
+ /* Try to optimize the size of the varargs save area.
+ The ABI requires that ap.reg_save_area is doubleword
+ aligned, but we don't need to allocate space for all
+ the bytes, only those to which we actually will save
+ anything. */
+ if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
+ gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG
+ && cfun->va_list_fpr_size)
+ {
+ if (gpr_reg_num)
+ fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD;
+ if (cfun->va_list_fpr_size
+ < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
+ fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
+ else
+ fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
+ * UNITS_PER_FP_WORD;
+ }
+ if (gpr_reg_num)
+ {
+ offset = -((first_reg_offset * reg_size) & ~7);
+ if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
+ {
+ gpr_reg_num = cfun->va_list_gpr_size;
+ if (reg_size == 4 && (first_reg_offset & 1))
+ gpr_reg_num++;
+ }
+ gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
+ }
+ else if (fpr_size)
+ offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD
+ - (int) (GP_ARG_NUM_REG * reg_size);
+
+ if (gpr_size + fpr_size)
+ {
+ rtx reg_save_area
+ = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
+ gcc_assert (GET_CODE (reg_save_area) == MEM);
+ reg_save_area = XEXP (reg_save_area, 0);
+ if (GET_CODE (reg_save_area) == PLUS)
+ {
+ gcc_assert (XEXP (reg_save_area, 0)
+ == virtual_stack_vars_rtx);
+ gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
+ offset += INTVAL (XEXP (reg_save_area, 1));
+ }
+ else
+ gcc_assert (reg_save_area == virtual_stack_vars_rtx);
+ }
+
+ cfun->machine->varargs_save_offset = offset;
+ save_area = plus_constant (virtual_stack_vars_rtx, offset);
+ }
+ }
+ else
+ {
+ first_reg_offset = next_cum.words;
+ save_area = virtual_incoming_args_rtx;
+
+ if (targetm.calls.must_pass_in_stack (mode, type))
+ first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
+ }
+
+ set = get_varargs_alias_set ();
+ if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
+ && cfun->va_list_gpr_size)
+ {
+ int nregs = GP_ARG_NUM_REG - first_reg_offset;
+
+ if (va_list_gpr_counter_field)
+ {
+ /* V4 va_list_gpr_size counts number of registers needed. */
+ if (nregs > cfun->va_list_gpr_size)
+ nregs = cfun->va_list_gpr_size;
+ }
+ else
+ {
+ /* char * va_list instead counts number of bytes needed. */
+ if (nregs > cfun->va_list_gpr_size / reg_size)
+ nregs = cfun->va_list_gpr_size / reg_size;
+ }
+
+ mem = gen_rtx_MEM (BLKmode,
+ plus_constant (save_area,
+ first_reg_offset * reg_size));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+ set_mem_align (mem, BITS_PER_WORD);
+
+ rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
+ nregs);
+ }
+
+ /* Save FP registers if needed. */
+ if (DEFAULT_ABI == ABI_V4
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && ! no_rtl
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG
+ && cfun->va_list_fpr_size)
+ {
+ int fregno = next_cum.fregno, nregs;
+ rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
+ rtx lab = gen_label_rtx ();
+ int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD);
+
+ emit_jump_insn
+ (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_NE (VOIDmode, cr1,
+ const0_rtx),
+ gen_rtx_LABEL_REF (VOIDmode, lab),
+ pc_rtx)));
+
+ for (nregs = 0;
+ fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
+ fregno++, off += UNITS_PER_FP_WORD, nregs++)
+ {
+ mem = gen_rtx_MEM (DFmode, plus_constant (save_area, off));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+ set_mem_align (mem, GET_MODE_ALIGNMENT (DFmode));
+ emit_move_insn (mem, gen_rtx_REG (DFmode, fregno));
+ }
+
+ emit_label (lab);
+ }
+}
+
+/* APPLE LOCAL begin Altivec */
+/* This routine determins if an extra pass over argument list is needed
+ for vector aruments. It returns true, if current argument need be
+ skipped. This depends on if we are in the first iteration (to skip
+ vectors), or 2nd iteration (to skip non-vectors).
+*/
+
+static
+bool skip_vec_args(tree arg_type, int pass, int *last_pass)
+{
+ if (DEFAULT_ABI != ABI_DARWIN)
+ return false;
+
+ if (TREE_CODE (arg_type) == VECTOR_TYPE)
+ {
+ *last_pass = 2;
+ if (pass == 1)
+ return true;
+ }
+ else if (pass == 2)
+ return true;
+ return false;
+}
+/* APPLE LOCAL end Altivec */
+
+
+/* Create the va_list data type. */
+
+static tree
+rs6000_build_builtin_va_list (void)
+{
+ tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
+
+ /* For AIX, prefer 'char *' because that's what the system
+ header files like. */
+ if (DEFAULT_ABI != ABI_V4)
+ return build_pointer_type (char_type_node);
+
+ record = (*lang_hooks.types.make_type) (RECORD_TYPE);
+ type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
+ unsigned_char_type_node);
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
+ unsigned_char_type_node);
+ /* Give the two bytes of padding a name, so that -Wpadded won't warn on
+ every user file. */
+ f_res = build_decl (FIELD_DECL, get_identifier ("reserved"),
+ short_unsigned_type_node);
+ f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
+ ptr_type_node);
+ f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
+ ptr_type_node);
+
+ va_list_gpr_counter_field = f_gpr;
+ va_list_fpr_counter_field = f_fpr;
+
+ DECL_FIELD_CONTEXT (f_gpr) = record;
+ DECL_FIELD_CONTEXT (f_fpr) = record;
+ DECL_FIELD_CONTEXT (f_res) = record;
+ DECL_FIELD_CONTEXT (f_ovf) = record;
+ DECL_FIELD_CONTEXT (f_sav) = record;
+
+ TREE_CHAIN (record) = type_decl;
+ TYPE_NAME (record) = type_decl;
+ TYPE_FIELDS (record) = f_gpr;
+ TREE_CHAIN (f_gpr) = f_fpr;
+ TREE_CHAIN (f_fpr) = f_res;
+ TREE_CHAIN (f_res) = f_ovf;
+ TREE_CHAIN (f_ovf) = f_sav;
+
+ layout_type (record);
+
+ /* The correct type is an array type of one element. */
+ return build_array_type (record, build_index_type (size_zero_node));
+}
+
+/* Implement va_start. */
+
+void
+rs6000_va_start (tree valist, rtx nextarg)
+{
+ HOST_WIDE_INT words, n_gpr, n_fpr;
+ tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, t;
+
+ /* Only SVR4 needs something special. */
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ std_expand_builtin_va_start (valist, nextarg);
+ return;
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_res = TREE_CHAIN (f_fpr);
+ f_ovf = TREE_CHAIN (f_res);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+
+ /* Count number of gp and fp argument registers used. */
+ words = current_function_args_info.words;
+ n_gpr = MIN (current_function_args_info.sysv_gregno - GP_ARG_MIN_REG,
+ GP_ARG_NUM_REG);
+ n_fpr = MIN (current_function_args_info.fregno - FP_ARG_MIN_REG,
+ FP_ARG_NUM_REG);
+
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
+ HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
+ words, n_gpr, n_fpr);
+
+ if (cfun->va_list_gpr_size)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ if (cfun->va_list_fpr_size)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ /* Find the overflow area. */
+ t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
+ if (words != 0)
+ t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
+ build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* If there were no va_arg invocations, don't set up the register
+ save area. */
+ if (!cfun->va_list_gpr_size
+ && !cfun->va_list_fpr_size
+ && n_gpr < GP_ARG_NUM_REG
+ && n_fpr < FP_ARG_V4_MAX_REG)
+ return;
+
+ /* Find the register save area. */
+ t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
+ if (cfun->machine->varargs_save_offset)
+ t = build2 (PLUS_EXPR, TREE_TYPE (sav), t,
+ build_int_cst (NULL_TREE, cfun->machine->varargs_save_offset));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement va_arg. */
+
+tree
+rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+{
+ tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, reg, t, u;
+ int size, rsize, n_reg, sav_ofs, sav_scale;
+ tree lab_false, lab_over, addr;
+ int align;
+ tree ptrtype = build_pointer_type (type);
+
+ if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
+ {
+ t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
+ return build_va_arg_indirect_ref (t);
+ }
+
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ tree elem_type = TREE_TYPE (type);
+ enum machine_mode elem_mode = TYPE_MODE (elem_type);
+ int elem_size = GET_MODE_SIZE (elem_mode);
+
+ if (elem_size < UNITS_PER_WORD)
+ {
+ tree real_part, imag_part;
+ tree post = NULL_TREE;
+
+ real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
+ &post);
+ /* Copy the value into a temporary, lest the formal temporary
+ be reused out from under us. */
+ real_part = get_initialized_tmp_var (real_part, pre_p, &post);
+ append_to_statement_list (post, pre_p);
+
+ imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
+ post_p);
+
+ return build2 (COMPLEX_EXPR, type, real_part, imag_part);
+ }
+ }
+
+ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_res = TREE_CHAIN (f_fpr);
+ f_ovf = TREE_CHAIN (f_res);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+
+ size = int_size_in_bytes (type);
+ rsize = (size + 3) / 4;
+ align = 1;
+
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && (TYPE_MODE (type) == SFmode
+ || TYPE_MODE (type) == DFmode
+ || TYPE_MODE (type) == TFmode))
+ {
+ /* FP args go in FP registers, if present. */
+ reg = fpr;
+ n_reg = (size + 7) / 8;
+ sav_ofs = 8*4;
+ sav_scale = 8;
+ if (TYPE_MODE (type) != SFmode)
+ align = 8;
+ }
+ else
+ {
+ /* Otherwise into GP registers. */
+ reg = gpr;
+ n_reg = rsize;
+ sav_ofs = 0;
+ sav_scale = 4;
+ if (n_reg == 2)
+ align = 8;
+ }
+
+ /* Pull the value out of the saved registers.... */
+
+ lab_over = NULL;
+ addr = create_tmp_var (ptr_type_node, "addr");
+ DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
+
+ /* AltiVec vectors never go in registers when -mabi=altivec. */
+ if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
+ align = 16;
+ else
+ {
+ lab_false = create_artificial_label ();
+ lab_over = create_artificial_label ();
+
+ /* Long long and SPE vectors are aligned in the registers.
+ As are any other 2 gpr item such as complex int due to a
+ historical mistake. */
+ u = reg;
+ if (n_reg == 2 && reg == gpr)
+ {
+ u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), reg,
+ size_int (n_reg - 1));
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, u);
+ }
+
+ t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
+ t = build2 (GE_EXPR, boolean_type_node, u, t);
+ u = build1 (GOTO_EXPR, void_type_node, lab_false);
+ t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
+ gimplify_and_add (t, pre_p);
+
+ t = sav;
+ if (sav_ofs)
+ t = build2 (PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
+
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, size_int (n_reg));
+ u = build1 (CONVERT_EXPR, integer_type_node, u);
+ u = build2 (MULT_EXPR, integer_type_node, u, size_int (sav_scale));
+ t = build2 (PLUS_EXPR, ptr_type_node, t, u);
+
+ t = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (t, pre_p);
+
+ t = build1 (GOTO_EXPR, void_type_node, lab_over);
+ gimplify_and_add (t, pre_p);
+
+ t = build1 (LABEL_EXPR, void_type_node, lab_false);
+ append_to_statement_list (t, pre_p);
+
+ if ((n_reg == 2 && reg != gpr) || n_reg > 2)
+ {
+ /* Ensure that we don't find any more args in regs.
+ Alignment has taken care of the n_reg == 2 gpr case. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (reg), reg, size_int (8));
+ gimplify_and_add (t, pre_p);
+ }
+ }
+
+ /* ... otherwise out of the overflow area. */
+
+ /* Care for on-stack alignment if needed. */
+ t = ovf;
+ if (align != 1)
+ {
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (NULL_TREE, -align));
+ }
+ gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
+
+ u = build2 (MODIFY_EXPR, void_type_node, addr, t);
+ gimplify_and_add (u, pre_p);
+
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ gimplify_and_add (t, pre_p);
+
+ if (lab_over)
+ {
+ t = build1 (LABEL_EXPR, void_type_node, lab_over);
+ append_to_statement_list (t, pre_p);
+ }
+
+ if (STRICT_ALIGNMENT
+ && (TYPE_ALIGN (type)
+ > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
+ {
+ /* The value (of type complex double, for example) may not be
+ aligned in memory in the saved registers, so copy via a
+ temporary. (This is the same code as used for SPARC.) */
+ tree tmp = create_tmp_var (type, "va_arg_tmp");
+ tree dest_addr = build_fold_addr_expr (tmp);
+
+ tree copy = build_function_call_expr
+ (implicit_built_in_decls[BUILT_IN_MEMCPY],
+ tree_cons (NULL_TREE, dest_addr,
+ tree_cons (NULL_TREE, addr,
+ tree_cons (NULL_TREE, size_int (rsize * 4),
+ NULL_TREE))));
+
+ gimplify_and_add (copy, pre_p);
+ addr = dest_addr;
+ }
+
+ addr = fold_convert (ptrtype, addr);
+ return build_va_arg_indirect_ref (addr);
+}
+
+/* Builtins. */
+
+static void
+def_builtin (int mask, const char *name, tree type, int code)
+{
+ if (mask & target_flags)
+ {
+ if (rs6000_builtin_decls[code])
+ abort ();
+
+ rs6000_builtin_decls[code] =
+ lang_hooks.builtin_function (name, type, code, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ }
+}
+
+/* APPLE LOCAL begin AltiVec */
+/* The AltiVec PIM operations and predicates (used in Apple AltiVec mode)
+ are stored in ALTIVEC_PIM_TABLE below, each annotated with flags indicating
+ how its arguments should be matched and/or how its return type is to be
+ determined. */
+
+enum pim_flags
+{
+ /* CR6 predicate modifiers. Not used for operations. For predicates,
+ one of the following four values shall be prepended to the argument
+ list as an INTEGER_CST. */
+
+ pim_cr6_eq = 0, /* __CR6_EQ */
+ pim_cr6_ne = 1, /* __CR6_EQ_REV */
+ pim_cr6_lt = 2, /* __CR6_LT */
+ pim_cr6_ge = 3, /* __CR6_LT_REV */
+ pim_cr6_MASK = pim_cr6_eq | pim_cr6_ne | pim_cr6_lt | pim_cr6_ge,
+
+ /* Function overload argument matching. Operations and predicates with
+ multiple overload candidates will have multiple entries, listed
+ contiguously, in the ALTIVEC_PIM_TABLE below. When the
+ rs6000_fold_builtin() routine is called, it will first point at
+ the first entry. If any of the pim_ovl_... flags is set for this
+ entry, the argument(s) to rs6000_fold_builtin() will be type-checked
+ accordingly. If the check succeeds, the current entry will be
+ used to rewrite the PIM instruction into a __builtin instruction;
+ if the check fails, the next entry in ALTIVEC_PIM_TABLE is selected
+ and the pim_ovl_... type comparison is made again. */
+
+ pim_ovl_16 = 4, /* First argument must be a 16-element vector */
+ pim_ovl_16u = 8,
+ pim_ovl_8 = 12, /* First argument must be an 8-element vector */
+ pim_ovl_8u = 16,
+ pim_ovl_8p = 20, /* First argument must be a vector pixel */
+ pim_ovl_4 = 24, /* First argument must be a 4-element vector */
+ pim_ovl_4u = 28,
+ pim_ovl_4f = 32, /* First argument must be a vector float */
+ pim_ovl_16u_16u = 36, /* First two args must be unsigned 16-el vectors */
+ pim_ovl_8u_8u = 40,
+ pim_ovl_4u_4u = 44,
+ pim_ovl_pqi_2 = 48, /* Second argument must be a pointer to QI. */
+ pim_ovl_phi_2 = 52, /* Second argument must be a pointer to HI. */
+ pim_ovl_psi_2 = 56, /* Second argument must be a pointer to SI. */
+ pim_ovl_MASK = pim_ovl_16 | pim_ovl_16u | pim_ovl_8 | pim_ovl_8u
+ | pim_ovl_8p | pim_ovl_4 | pim_ovl_4u | pim_ovl_4f
+ | pim_ovl_16u_16u | pim_ovl_8u_8u | pim_ovl_4u_4u
+ | pim_ovl_pqi_2 | pim_ovl_phi_2 | pim_ovl_psi_2,
+
+ /* Return type computation. For some operations/predicates, the return
+ type is not always the same (in which case it will be stored
+ in the ALTIVEC_PIM_table), but rather is a function of the arguments
+ supplied. */
+
+ pim_rt_12 = 512, /* Covariant with first two arguments. */
+ pim_rt_2p = 1024, /* Covariant with pointee of second argument. */
+ pim_rt_1 = 1536, /* Covariant with first argument only. */
+ pim_rt_1d = 2048, /* Double the vector element size of first arg. */
+ pim_rt_1h = 2560, /* Halve the vector element size of first arg. */
+ pim_rt_MASK = pim_rt_12 | pim_rt_2p | pim_rt_1 | pim_rt_1d | pim_rt_1h,
+
+ /* Argument manipulation. Before the __builtin instructions are called,
+ the arguments may need to be rearranged. In addition, for all
+ predicates, one of the CR6 values will be prepended to the argument
+ list (see pim_cr6_... above). */
+
+ pim_manip_swap = 8192, /* Swap the first two arguments. */
+ pim_manip_dup = 16384, /* Duplicate first argument. */
+ pim_manip_MASK = pim_manip_swap | pim_manip_dup,
+
+ /* Mark the beginning of instruction groups. For our purposes, an
+ instruction group is the collection of overload candidates for
+ a particular instruction or predicate. For example, the entries
+ "vec_abss", "vec_abss.2" and "vec_abss.3" defined in
+ altivec_init_builtins() below constitute a group, as does the
+ singleton "vec_addc" entry. */
+
+ pim_group = 32768
+};
+
+struct altivec_pim_info GTY(())
+{
+ tree rettype; /* Return type (unless pim_rt_... flags are used). */
+ int insn; /* DECL_FUNCTION_CODE of the underlying '__builtin_...'. */
+ enum pim_flags flags; /* See 'enum pim_flags' above. */
+};
+
+static GTY(()) struct altivec_pim_info
+altivec_pim_table[ALTIVEC_PIM__LAST - ALTIVEC_PIM__FIRST + 1];
+
+#define def_pim_builtin(NAME, TYPE, INSN, FLAGS) \
+do { \
+ lang_hooks.builtin_function (NAME, int_ftype_ellipsis, pim_code, \
+ BUILT_IN_MD, NULL, NULL_TREE); \
+ \
+ altivec_pim_table[pim_code - ALTIVEC_PIM__FIRST].rettype = TYPE; \
+ altivec_pim_table[pim_code - ALTIVEC_PIM__FIRST].insn \
+ = ALTIVEC_BUILTIN_##INSN; \
+ altivec_pim_table[pim_code - ALTIVEC_PIM__FIRST].flags = FLAGS; \
+ \
+ ++pim_code; \
+} while (0)
+/* APPLE LOCAL end AltiVec */
+
+/* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
+
+static const struct builtin_description bdesc_3arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmaddfp, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmhaddshs, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmhraddshs, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmladduhm, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM},
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumubm, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsummbm, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhm, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL },
+};
+
+/* DST operations: void foo (void *, const int, const char). */
+
+static const struct builtin_description bdesc_dst[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT }
+};
+
+/* Simple binary operations: VECc = foo (VECa, VECb). */
+
+static struct builtin_description bdesc_2arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_addv16qi3, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM },
+ { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
+ { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
+ { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vadduhs, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddshs, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
+ { MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
+ { MASK_ALTIVEC, CODE_FOR_andcv4si3, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsh, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavguw, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsw, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
+ { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
+ { MASK_ALTIVEC, CODE_FOR_smaxv16qi3, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB },
+ { MASK_ALTIVEC, CODE_FOR_umaxv8hi3, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH },
+ { MASK_ALTIVEC, CODE_FOR_smaxv8hi3, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH },
+ { MASK_ALTIVEC, CODE_FOR_umaxv4si3, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW },
+ { MASK_ALTIVEC, CODE_FOR_smaxv4si3, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW },
+ { MASK_ALTIVEC, CODE_FOR_smaxv4sf3, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghb, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghh, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghw, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglb, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglh, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglw, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW },
+ { MASK_ALTIVEC, CODE_FOR_uminv16qi3, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB },
+ { MASK_ALTIVEC, CODE_FOR_sminv16qi3, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB },
+ { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH },
+ { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH },
+ { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulesb, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulesh, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
+ { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwus, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkswus, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlb, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlh, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlw, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslb, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslh, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslw, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
+ { MASK_ALTIVEC, CODE_FOR_lshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_lshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_lshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_ashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_ashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_ashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
+ { MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
+ { MASK_ALTIVEC, CODE_FOR_subv8hi3, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM },
+ { MASK_ALTIVEC, CODE_FOR_subv4si3, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM },
+ { MASK_ALTIVEC, CODE_FOR_subv4sf3, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubcuw, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsububs, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubsbs, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubuhs, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubshs, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubuws, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubsws, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4ubs, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4sbs, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4shs, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
+ { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
+
+ /* Place holder, leave as first spe builtin. */
+ { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
+ { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
+ { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
+ { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
+ { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
+ { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
+ { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
+ { 0, CODE_FOR_spe_evfsdiv, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV },
+ { 0, CODE_FOR_spe_evfsmul, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL },
+ { 0, CODE_FOR_spe_evfssub, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB },
+ { 0, CODE_FOR_spe_evmergehi, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI },
+ { 0, CODE_FOR_spe_evmergehilo, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO },
+ { 0, CODE_FOR_spe_evmergelo, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO },
+ { 0, CODE_FOR_spe_evmergelohi, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI },
+ { 0, CODE_FOR_spe_evmhegsmfaa, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA },
+ { 0, CODE_FOR_spe_evmhegsmfan, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN },
+ { 0, CODE_FOR_spe_evmhegsmiaa, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA },
+ { 0, CODE_FOR_spe_evmhegsmian, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN },
+ { 0, CODE_FOR_spe_evmhegumiaa, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA },
+ { 0, CODE_FOR_spe_evmhegumian, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN },
+ { 0, CODE_FOR_spe_evmhesmf, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF },
+ { 0, CODE_FOR_spe_evmhesmfa, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA },
+ { 0, CODE_FOR_spe_evmhesmfaaw, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW },
+ { 0, CODE_FOR_spe_evmhesmfanw, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW },
+ { 0, CODE_FOR_spe_evmhesmi, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI },
+ { 0, CODE_FOR_spe_evmhesmia, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA },
+ { 0, CODE_FOR_spe_evmhesmiaaw, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW },
+ { 0, CODE_FOR_spe_evmhesmianw, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW },
+ { 0, CODE_FOR_spe_evmhessf, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF },
+ { 0, CODE_FOR_spe_evmhessfa, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA },
+ { 0, CODE_FOR_spe_evmhessfaaw, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW },
+ { 0, CODE_FOR_spe_evmhessfanw, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW },
+ { 0, CODE_FOR_spe_evmhessiaaw, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW },
+ { 0, CODE_FOR_spe_evmhessianw, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW },
+ { 0, CODE_FOR_spe_evmheumi, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI },
+ { 0, CODE_FOR_spe_evmheumia, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA },
+ { 0, CODE_FOR_spe_evmheumiaaw, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW },
+ { 0, CODE_FOR_spe_evmheumianw, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW },
+ { 0, CODE_FOR_spe_evmheusiaaw, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW },
+ { 0, CODE_FOR_spe_evmheusianw, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW },
+ { 0, CODE_FOR_spe_evmhogsmfaa, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA },
+ { 0, CODE_FOR_spe_evmhogsmfan, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN },
+ { 0, CODE_FOR_spe_evmhogsmiaa, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA },
+ { 0, CODE_FOR_spe_evmhogsmian, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN },
+ { 0, CODE_FOR_spe_evmhogumiaa, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA },
+ { 0, CODE_FOR_spe_evmhogumian, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN },
+ { 0, CODE_FOR_spe_evmhosmf, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF },
+ { 0, CODE_FOR_spe_evmhosmfa, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA },
+ { 0, CODE_FOR_spe_evmhosmfaaw, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW },
+ { 0, CODE_FOR_spe_evmhosmfanw, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW },
+ { 0, CODE_FOR_spe_evmhosmi, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI },
+ { 0, CODE_FOR_spe_evmhosmia, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA },
+ { 0, CODE_FOR_spe_evmhosmiaaw, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW },
+ { 0, CODE_FOR_spe_evmhosmianw, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW },
+ { 0, CODE_FOR_spe_evmhossf, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF },
+ { 0, CODE_FOR_spe_evmhossfa, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA },
+ { 0, CODE_FOR_spe_evmhossfaaw, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW },
+ { 0, CODE_FOR_spe_evmhossfanw, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW },
+ { 0, CODE_FOR_spe_evmhossiaaw, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW },
+ { 0, CODE_FOR_spe_evmhossianw, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW },
+ { 0, CODE_FOR_spe_evmhoumi, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI },
+ { 0, CODE_FOR_spe_evmhoumia, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA },
+ { 0, CODE_FOR_spe_evmhoumiaaw, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW },
+ { 0, CODE_FOR_spe_evmhoumianw, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW },
+ { 0, CODE_FOR_spe_evmhousiaaw, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW },
+ { 0, CODE_FOR_spe_evmhousianw, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW },
+ { 0, CODE_FOR_spe_evmwhsmf, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF },
+ { 0, CODE_FOR_spe_evmwhsmfa, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA },
+ { 0, CODE_FOR_spe_evmwhsmi, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI },
+ { 0, CODE_FOR_spe_evmwhsmia, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA },
+ { 0, CODE_FOR_spe_evmwhssf, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF },
+ { 0, CODE_FOR_spe_evmwhssfa, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA },
+ { 0, CODE_FOR_spe_evmwhumi, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI },
+ { 0, CODE_FOR_spe_evmwhumia, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA },
+ { 0, CODE_FOR_spe_evmwlsmiaaw, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW },
+ { 0, CODE_FOR_spe_evmwlsmianw, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW },
+ { 0, CODE_FOR_spe_evmwlssiaaw, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW },
+ { 0, CODE_FOR_spe_evmwlssianw, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW },
+ { 0, CODE_FOR_spe_evmwlumi, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI },
+ { 0, CODE_FOR_spe_evmwlumia, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA },
+ { 0, CODE_FOR_spe_evmwlumiaaw, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW },
+ { 0, CODE_FOR_spe_evmwlumianw, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW },
+ { 0, CODE_FOR_spe_evmwlusiaaw, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW },
+ { 0, CODE_FOR_spe_evmwlusianw, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW },
+ { 0, CODE_FOR_spe_evmwsmf, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF },
+ { 0, CODE_FOR_spe_evmwsmfa, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA },
+ { 0, CODE_FOR_spe_evmwsmfaa, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA },
+ { 0, CODE_FOR_spe_evmwsmfan, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN },
+ { 0, CODE_FOR_spe_evmwsmi, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI },
+ { 0, CODE_FOR_spe_evmwsmia, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA },
+ { 0, CODE_FOR_spe_evmwsmiaa, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA },
+ { 0, CODE_FOR_spe_evmwsmian, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN },
+ { 0, CODE_FOR_spe_evmwssf, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF },
+ { 0, CODE_FOR_spe_evmwssfa, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA },
+ { 0, CODE_FOR_spe_evmwssfaa, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA },
+ { 0, CODE_FOR_spe_evmwssfan, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN },
+ { 0, CODE_FOR_spe_evmwumi, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI },
+ { 0, CODE_FOR_spe_evmwumia, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA },
+ { 0, CODE_FOR_spe_evmwumiaa, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA },
+ { 0, CODE_FOR_spe_evmwumian, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN },
+ { 0, CODE_FOR_spe_evnand, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND },
+ { 0, CODE_FOR_spe_evnor, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR },
+ { 0, CODE_FOR_spe_evor, "__builtin_spe_evor", SPE_BUILTIN_EVOR },
+ { 0, CODE_FOR_spe_evorc, "__builtin_spe_evorc", SPE_BUILTIN_EVORC },
+ { 0, CODE_FOR_spe_evrlw, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW },
+ { 0, CODE_FOR_spe_evslw, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW },
+ { 0, CODE_FOR_spe_evsrws, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS },
+ { 0, CODE_FOR_spe_evsrwu, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU },
+ { 0, CODE_FOR_spe_evsubfw, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
+
+ /* SPE binary operations expecting a 5-bit unsigned literal. */
+ { 0, CODE_FOR_spe_evaddiw, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW },
+
+ { 0, CODE_FOR_spe_evrlwi, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI },
+ { 0, CODE_FOR_spe_evslwi, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI },
+ { 0, CODE_FOR_spe_evsrwis, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS },
+ { 0, CODE_FOR_spe_evsrwiu, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU },
+ { 0, CODE_FOR_spe_evsubifw, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW },
+ { 0, CODE_FOR_spe_evmwhssfaa, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA },
+ { 0, CODE_FOR_spe_evmwhssmaa, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA },
+ { 0, CODE_FOR_spe_evmwhsmfaa, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA },
+ { 0, CODE_FOR_spe_evmwhsmiaa, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA },
+ { 0, CODE_FOR_spe_evmwhusiaa, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA },
+ { 0, CODE_FOR_spe_evmwhumiaa, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA },
+ { 0, CODE_FOR_spe_evmwhssfan, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN },
+ { 0, CODE_FOR_spe_evmwhssian, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN },
+ { 0, CODE_FOR_spe_evmwhsmfan, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN },
+ { 0, CODE_FOR_spe_evmwhsmian, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN },
+ { 0, CODE_FOR_spe_evmwhusian, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN },
+ { 0, CODE_FOR_spe_evmwhumian, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN },
+ { 0, CODE_FOR_spe_evmwhgssfaa, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA },
+ { 0, CODE_FOR_spe_evmwhgsmfaa, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA },
+ { 0, CODE_FOR_spe_evmwhgsmiaa, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA },
+ { 0, CODE_FOR_spe_evmwhgumiaa, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA },
+ { 0, CODE_FOR_spe_evmwhgssfan, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN },
+ { 0, CODE_FOR_spe_evmwhgsmfan, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN },
+ { 0, CODE_FOR_spe_evmwhgsmian, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN },
+ { 0, CODE_FOR_spe_evmwhgumian, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN },
+ { 0, CODE_FOR_spe_brinc, "__builtin_spe_brinc", SPE_BUILTIN_BRINC },
+
+ /* Place-holder. Leave as last binary SPE builtin. */
+ { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR }
+};
+
+/* AltiVec predicates. */
+
+struct builtin_description_predicates
+{
+ const unsigned int mask;
+ const enum insn_code icode;
+ const char *opcode;
+ const char *const name;
+ const enum rs6000_builtins code;
+};
+
+static const struct builtin_description_predicates bdesc_altivec_preds[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
+/* APPLE LOCAL begin radar 4571747 */
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpequw, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpgtsw, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpgtuw, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpgtuh, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpgtsh, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpequh, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpequb, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpgtsb, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_vcmpgtub, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
+/* APPLE LOCAL end radar 4571747 */
+
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P },
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P },
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P }
+};
+
+/* SPE predicates. */
+static struct builtin_description bdesc_spe_predicates[] =
+{
+ /* Place-holder. Leave as first. */
+ { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ },
+ { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS },
+ { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU },
+ { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS },
+ { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU },
+ { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ },
+ { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT },
+ { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT },
+ { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ },
+ { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT },
+ /* Place-holder. Leave as last. */
+ { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT },
+};
+
+/* SPE evsel predicates. */
+static struct builtin_description bdesc_spe_evsel[] =
+{
+ /* Place-holder. Leave as first. */
+ { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS },
+ { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU },
+ { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS },
+ { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU },
+ { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ },
+ { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT },
+ { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT },
+ { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ },
+ { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT },
+ { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT },
+ /* Place-holder. Leave as last. */
+ { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ },
+};
+
+/* ABS* operations. */
+
+static const struct builtin_description bdesc_abs[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_absv4si2, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI },
+ { MASK_ALTIVEC, CODE_FOR_absv8hi2, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI },
+ { MASK_ALTIVEC, CODE_FOR_absv4sf2, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF },
+ { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI }
+};
+
+/* Simple unary operations: VECb = foo (unsigned literal) or VECb =
+ foo (VECa). */
+
+static struct builtin_description bdesc_1arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_vexptefp, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vlogefp, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrefp, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfim, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfin, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfip, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP },
+ { MASK_ALTIVEC, CODE_FOR_ftruncv4sf2, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrsqrtefp, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltisb, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltish, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltisw, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsb, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhpx, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsh, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklsb, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
+
+ /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
+ end with SPE_BUILTIN_EVSUBFUSIAAW. */
+ { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
+ { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
+ { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
+ { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
+ { 0, CODE_FOR_spe_evaddusiaaw, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW },
+ { 0, CODE_FOR_spe_evcntlsw, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW },
+ { 0, CODE_FOR_spe_evcntlzw, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW },
+ { 0, CODE_FOR_spe_evextsb, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB },
+ { 0, CODE_FOR_spe_evextsh, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH },
+ { 0, CODE_FOR_spe_evfsabs, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS },
+ { 0, CODE_FOR_spe_evfscfsf, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF },
+ { 0, CODE_FOR_spe_evfscfsi, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI },
+ { 0, CODE_FOR_spe_evfscfuf, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF },
+ { 0, CODE_FOR_spe_evfscfui, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI },
+ { 0, CODE_FOR_spe_evfsctsf, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF },
+ { 0, CODE_FOR_spe_evfsctsi, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI },
+ { 0, CODE_FOR_spe_evfsctsiz, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ },
+ { 0, CODE_FOR_spe_evfsctuf, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF },
+ { 0, CODE_FOR_spe_evfsctui, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI },
+ { 0, CODE_FOR_spe_evfsctuiz, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ },
+ { 0, CODE_FOR_spe_evfsnabs, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS },
+ { 0, CODE_FOR_spe_evfsneg, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG },
+ { 0, CODE_FOR_spe_evmra, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA },
+ { 0, CODE_FOR_negv2si2, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG },
+ { 0, CODE_FOR_spe_evrndw, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW },
+ { 0, CODE_FOR_spe_evsubfsmiaaw, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW },
+ { 0, CODE_FOR_spe_evsubfssiaaw, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW },
+ { 0, CODE_FOR_spe_evsubfumiaaw, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW },
+
+ /* Place-holder. Leave as last unary SPE builtin. */
+ { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW }
+};
+
+/* APPLE LOCAL begin AltiVec */
+/* Determine the return type from types T1 and T2 of the first two arguments.
+ This is required for some of the AltiVec PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_12 (tree t1, tree t2)
+{
+ /* NB: The ordering of the following statements is important.
+ Matching of more specific types (e.g., 'vector pixel') should
+ precede matching of more general types, esp. if they subsume the
+ former (e.g., 'vector of 8 elements'). */
+
+#define RETURN_IF_EITHER_IS(TYPE) if (t1 == TYPE || t2 == TYPE) return TYPE
+
+ RETURN_IF_EITHER_IS (unsigned_V16QI_type_node);
+ RETURN_IF_EITHER_IS (V16QI_type_node);
+ RETURN_IF_EITHER_IS (bool_V16QI_type_node);
+ RETURN_IF_EITHER_IS (unsigned_V8HI_type_node);
+ RETURN_IF_EITHER_IS (pixel_V8HI_type_node);
+ RETURN_IF_EITHER_IS (V8HI_type_node);
+ RETURN_IF_EITHER_IS (bool_V8HI_type_node);
+ RETURN_IF_EITHER_IS (unsigned_V4SI_type_node);
+ RETURN_IF_EITHER_IS (V4SF_type_node);
+ RETURN_IF_EITHER_IS (V4SI_type_node);
+ RETURN_IF_EITHER_IS (bool_V4SI_type_node);
+
+#undef RETURN_IF_EITHER_IS
+
+ return NULL_TREE;
+}
+
+/* Determine the return type from the pointee type of argument type T.
+ This is required for some of the AltiVec PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_2p (tree t)
+{
+ /* Must be a pointer. */
+
+ if (!t)
+ return NULL_TREE;
+
+ if (TREE_CODE (t) != POINTER_TYPE)
+ return NULL_TREE;
+
+ t = TYPE_MAIN_VARIANT (TREE_TYPE (t));
+
+ /* For pointers to vectors, the return type is the vector itself. */
+
+ if (TREE_CODE (t) == VECTOR_TYPE)
+ return t;
+
+ switch (TYPE_MODE (t))
+ {
+ case QImode:
+ return TYPE_UNSIGNED (t) ? unsigned_V16QI_type_node : V16QI_type_node;
+
+ case HImode:
+ return TYPE_UNSIGNED (t) ? unsigned_V8HI_type_node : V8HI_type_node;
+
+ case SImode:
+ return TYPE_UNSIGNED (t) ? unsigned_V4SI_type_node : V4SI_type_node;
+
+ case SFmode:
+ return V4SF_type_node;
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Determine the return type from type T by doubling the size of its
+ constituent vector elements. This is required for some of the AltiVec
+ PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_1d (tree t)
+{
+ if (t == V16QI_type_node)
+ return V8HI_type_node;
+ else if (t == unsigned_V16QI_type_node)
+ return unsigned_V8HI_type_node;
+ else if (t == bool_V16QI_type_node)
+ return bool_V8HI_type_node;
+ else if (t == V8HI_type_node)
+ return V4SI_type_node;
+ else if (t == unsigned_V8HI_type_node || t == pixel_V8HI_type_node)
+ return unsigned_V4SI_type_node;
+ else if (t == bool_V8HI_type_node)
+ return bool_V4SI_type_node;
+ else
+ return NULL_TREE; /* Invalid argument. */
+}
+
+/* Determine the return type from type T by halving the size of its
+ constituent vector elements. This is required for some of the AltiVec
+ PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_1h (tree t)
+{
+ if (t == V8HI_type_node)
+ return V16QI_type_node;
+ else if (t == unsigned_V8HI_type_node || t == pixel_V8HI_type_node)
+ return unsigned_V16QI_type_node;
+ else if (t == bool_V8HI_type_node)
+ return bool_V16QI_type_node;
+ else if (t == V4SI_type_node)
+ return V8HI_type_node;
+ else if (t == unsigned_V4SI_type_node)
+ return unsigned_V8HI_type_node;
+ else if (t == bool_V4SI_type_node)
+ return bool_V8HI_type_node;
+ else
+ return NULL_TREE; /* Invalid argument. */
+}
+
+/* Given the types T1 and T2 of the first two arguments, and INFO pointing
+ to the first of available overload candidates (in the ALTIVEC_PIM_TABLE)
+ for an AltiVec PIM operation or predicate, select a desired overload
+ candidate by incrementing and returning INFO as appropriate. If no
+ overload candidate is suitable, return NULL. */
+
+static struct altivec_pim_info *
+altivec_ovl_resolve (struct altivec_pim_info *info, tree t1, tree t2)
+{
+ /* Make sure we have all the types that we need. */
+ if (!t1)
+ return 0;
+
+ if (!t2 && (info->flags & pim_ovl_MASK) >= pim_ovl_16u_16u)
+ return 0;
+
+ /* Examine overload candidates in order, and return the first one
+ that matches. For this scheme to work, overload candidates must
+ be ordered from most to least type-specific. */
+ do
+ {
+ switch (info->flags & pim_ovl_MASK)
+ {
+
+#define OVL_MATCH(EXPR) if (EXPR) return info; break
+
+ case pim_ovl_16:
+ OVL_MATCH (TYPE_MODE (t1) == V16QImode);
+
+ case pim_ovl_16u:
+ OVL_MATCH (TYPE_MODE (t1) == V16QImode && TYPE_UNSIGNED (t1));
+
+ case pim_ovl_8:
+ OVL_MATCH (TYPE_MODE (t1) == V8HImode);
+
+ case pim_ovl_8u:
+ OVL_MATCH (TYPE_MODE (t1) == V8HImode && TYPE_UNSIGNED (t1));
+
+ case pim_ovl_8p:
+ OVL_MATCH (t1 == pixel_V8HI_type_node);
+
+ case pim_ovl_4:
+ OVL_MATCH (TYPE_MODE (t1) == V4SImode || TYPE_MODE (t1) == V4SFmode);
+
+ case pim_ovl_4u:
+ OVL_MATCH (TYPE_MODE (t1) == V4SImode && TYPE_UNSIGNED (t1));
+
+ case pim_ovl_4f:
+ OVL_MATCH (TYPE_MODE (t1) == V4SFmode);
+
+ case pim_ovl_16u_16u:
+ OVL_MATCH (t1 == unsigned_V16QI_type_node
+ || t2 == unsigned_V16QI_type_node);
+
+ case pim_ovl_8u_8u:
+ OVL_MATCH (t1 == unsigned_V8HI_type_node
+ || t1 == pixel_V8HI_type_node
+ || t2 == unsigned_V8HI_type_node
+ || t2 == pixel_V8HI_type_node);
+
+ case pim_ovl_4u_4u:
+ OVL_MATCH (t1 == unsigned_V4SI_type_node
+ || t2 == unsigned_V4SI_type_node);
+
+ case pim_ovl_pqi_2:
+ OVL_MATCH (TREE_CODE (t2) == POINTER_TYPE
+ && (TYPE_MODE (TREE_TYPE (t2)) == QImode
+ || TYPE_MODE (TREE_TYPE (t2)) == V16QImode));
+
+ case pim_ovl_phi_2:
+ OVL_MATCH (TREE_CODE (t2) == POINTER_TYPE
+ && (TYPE_MODE (TREE_TYPE (t2)) == HImode
+ || TYPE_MODE (TREE_TYPE (t2)) == V8HImode));
+
+ case pim_ovl_psi_2:
+ OVL_MATCH (TREE_CODE (t2) == POINTER_TYPE
+ && (TYPE_MODE (TREE_TYPE (t2)) == SImode
+ || TYPE_MODE (TREE_TYPE (t2)) == V4SImode
+ || TYPE_MODE (TREE_TYPE (t2)) == SFmode
+ || TYPE_MODE (TREE_TYPE (t2)) == V4SFmode));
+
+ default: /* Catch-all. */
+ return info;
+
+#undef OVL_MATCH
+ }
+ }
+ while (!((++info)->flags & pim_group)); /* Advance to next candidate. */
+
+ return NULL; /* No suitable overload candidate found. */
+}
+
+/* Convert each function argument in the ARGS list into a corresponding
+ type found in the TYPES list. This must be done before calling the
+ __builtin_... AltiVec instructions, whose declared argument types may differ
+ from what was passed to rs6000_fold_builtin(). */
+
+static tree
+altivec_convert_args (tree types, tree args)
+{
+ tree t, a;
+
+ for (t = types, a = args; t && a; t = TREE_CHAIN (t), a = TREE_CHAIN (a))
+ {
+ TREE_VALUE (a) = convert (TREE_VALUE (t), TREE_VALUE (a));
+
+ /* Suppress overflows, so that GIMPLE does not create temporary
+ variables on us. */
+ if (TREE_CODE (TREE_VALUE (a)) == INTEGER_CST)
+ {
+ TREE_OVERFLOW (TREE_VALUE (a)) = 0;
+ TREE_CONSTANT_OVERFLOW (TREE_VALUE (a)) = 0;
+ }
+ }
+
+ /* At this point we've converted all of the arguments we're supposed
+ to have. Anything extra is an error and we should mark it as such. */
+ for (a = args; a; a = TREE_CHAIN (a))
+ {
+ if (VOID_TYPE_P (TREE_TYPE (TREE_VALUE (a))))
+ {
+ error ("Too many arguments to altivec builtin.");
+ TREE_VALUE (a) = error_mark_node;
+ }
+ }
+
+ return args;
+}
+
+/* The following function rewrites FNDECL and ARGLIST by substituting AltiVec
+ PIM operations or predicates with built-in instructions defined above.
+ Type casts are provided if needed. */
+
+tree
+rs6000_fold_builtin (tree fndecl, tree arglist, bool ARG_UNUSED (ignore))
+{
+ tree rettype;
+ tree typ1 = NULL_TREE, typ2 = NULL_TREE;
+ int ovl_error = 0;
+ enum built_in_function fcode;
+ struct altivec_pim_info *info;
+
+ /* Bail out if not in Apple AltiVec mode. */
+ if (!rs6000_altivec_pim)
+ return NULL_TREE;
+
+ fcode = DECL_FUNCTION_CODE (fndecl);
+
+ /* Bail out unless we are looking at one of the AltiVec PIM
+ operations/predicates. */
+
+ if (fcode < ALTIVEC_PIM__FIRST || fcode > ALTIVEC_PIM__LAST)
+ return NULL_TREE;
+
+ /* Point at the first (and possibly only) entry in ALTIVEC_PIM_TABLE
+ describing this PIM operation/predicate, and how to convert it to
+ a __builtin_... call. */
+
+ info = altivec_pim_table + (fcode - ALTIVEC_PIM__FIRST);
+
+ if (arglist)
+ /* APPLE LOCAL begin radar 5021057 */
+ {
+ if (TREE_VALUE (arglist) == error_mark_node)
+ return NULL_TREE;
+ typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (arglist)));
+ }
+ /* APPLE LOCAL end radar 5021057 */
+
+ if (arglist && TREE_CHAIN (arglist))
+ {
+ if (TREE_VALUE (TREE_CHAIN (arglist)) == error_mark_node)
+ return NULL_TREE;
+ typ2 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist))));
+ }
+
+ /* Select from a list of overloaded functions, if needed. */
+
+ if (info->flags & pim_ovl_MASK)
+ {
+ info = altivec_ovl_resolve (info, typ1, typ2);
+
+ if (!info)
+ {
+ /* No suitable overload candidate was found! */
+ ovl_error = 1; /* We use this to indicate error. */
+ /* Point at the first overload candidate again. */
+ info = altivec_pim_table + (fcode - ALTIVEC_PIM__FIRST);
+ }
+ }
+
+ /* Determine the return type. */
+
+ switch (info->flags & pim_rt_MASK)
+ {
+ case pim_rt_12:
+ /* Return type is covariant with the first two arguments. */
+ rettype = altivec_cov_rt_12 (typ1, typ2);
+ break;
+
+ /* Return type is covariant with pointee of second argument. */
+ case pim_rt_2p:
+ rettype = altivec_cov_rt_2p (typ2);
+ break;
+
+ /* Return type is covariant with the first argument only. */
+ case pim_rt_1:
+ rettype = typ1;
+ break;
+
+ /* Return type is covariant with first argument, but with doubled
+ vector element sizes. */
+ case pim_rt_1d:
+ rettype = altivec_cov_rt_1d (typ1);
+ break;
+
+ /* Return type is covariant with first argument, but with halved
+ vector element sizes. */
+ case pim_rt_1h:
+ rettype = altivec_cov_rt_1h (typ1);
+ break;
+
+ default:
+ /* Retrieve return type to use from ALTIVEC_PIM_TABLE. */
+ rettype = info->rettype;
+ }
+
+ /* Rearrange arguments, as needed. */
+
+ switch (info->flags & pim_manip_MASK)
+ {
+ case pim_manip_swap:
+ if (!typ1 || !typ2)
+ rettype = NULL_TREE;
+ else
+ {
+ tree swap = TREE_VALUE (arglist);
+
+ TREE_VALUE (arglist) = TREE_VALUE (TREE_CHAIN (arglist));
+ TREE_VALUE (TREE_CHAIN (arglist)) = swap;
+ }
+
+ break;
+
+ case pim_manip_dup:
+ if (!typ1 || typ2)
+ rettype = NULL_TREE;
+ else
+ TREE_CHAIN (arglist) = tree_cons (NULL_TREE, TREE_VALUE (arglist),
+ NULL_TREE);
+
+ break;
+ }
+
+ /* For predicates, prepend the proper CR6 value to the argument list. */
+
+ if (fcode >= ALTIVEC_PIM_VEC_ALL_EQ)
+ arglist = tree_cons (NULL_TREE,
+ build_int_cst (NULL_TREE, info->flags & pim_cr6_MASK),
+ arglist);
+
+ /* If we could not properly determine an overload candidate or a return type,
+ issue an error. */
+
+ if (ovl_error || !rettype)
+ {
+ error ("invalid argument(s) for AltiVec operation or predicate");
+ /* Choose the return type for the first overload candidate, if
+ a type has been provided. Otherwise, use 'vector signed int'. */
+ rettype = info->rettype ? info->rettype : V4SI_type_node;
+ }
+
+ /* Retrieve the underlying AltiVec __builtin_... to call, and call it. */
+
+ fndecl = rs6000_builtin_decls [info->insn];
+ arglist = altivec_convert_args (TYPE_ARG_TYPES (TREE_TYPE (fndecl)),
+ arglist);
+
+ return convert (rettype, build_function_call_expr (fndecl, arglist));
+}
+/* APPLE LOCAL end AltiVec */
+
+static rtx
+rs6000_expand_unop_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat;
+ /* APPLE LOCAL begin Altivec */
+ tree arg0;
+ rtx op0;
+ /* APPLE LOCAL end Altivec */
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ /* APPLE LOCAL begin Altivec */
+ if (!arglist || !TREE_VALUE (arglist))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ /* APPLE LOCAL end Altivec */
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (icode == CODE_FOR_altivec_vspltisb
+ || icode == CODE_FOR_altivec_vspltish
+ /* APPLE LOCAL begin 4119059 */
+ || icode == CODE_FOR_altivec_vspltisw)
+ {
+ /* Only allow 5-bit *signed* literals. */
+ if (GET_CODE (op0) != CONST_INT
+ || INTVAL (op0) > 15
+ || INTVAL (op0) < -16)
+ {
+ error ("argument 1 must be a 5-bit signed literal");
+ return const0_rtx;
+ }
+ }
+ if (icode == CODE_FOR_spe_evsplatfi
+ /* APPLE LOCAL end 4119059 */
+ || icode == CODE_FOR_spe_evsplati)
+ {
+ /* Only allow 5-bit *signed* literals. */
+ if (GET_CODE (op0) != CONST_INT
+ || INTVAL (op0) > 15
+ || INTVAL (op0) < -16)
+ {
+ error ("argument 1 must be a 5-bit signed literal");
+ return const0_rtx;
+ }
+ }
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+altivec_expand_abs_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat, scratch1, scratch2;
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ tree arg0;
+ rtx op0;
+ enum machine_mode tmode;
+ enum machine_mode mode0;
+ if (!arglist
+ || !TREE_VALUE (arglist))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+
+ /* If we have invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ scratch1 = gen_reg_rtx (mode0);
+ scratch2 = gen_reg_rtx (mode0);
+
+ pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+rs6000_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat;
+ /* APPLE LOCAL begin Altivec */
+ tree arg0;
+ tree arg1;
+ rtx op0;
+ rtx op1;
+ /* APPLE LOCAL end Altivec */
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ /* APPLE LOCAL begin Altivec */
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist)))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ /* APPLE LOCAL end Altivec */
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (icode == CODE_FOR_altivec_vcfux
+ || icode == CODE_FOR_altivec_vcfsx
+ || icode == CODE_FOR_altivec_vctsxs
+ || icode == CODE_FOR_altivec_vctuxs
+ || icode == CODE_FOR_altivec_vspltb
+ || icode == CODE_FOR_altivec_vsplth
+ || icode == CODE_FOR_altivec_vspltw
+ || icode == CODE_FOR_spe_evaddiw
+ || icode == CODE_FOR_spe_evldd
+ || icode == CODE_FOR_spe_evldh
+ || icode == CODE_FOR_spe_evldw
+ || icode == CODE_FOR_spe_evlhhesplat
+ || icode == CODE_FOR_spe_evlhhossplat
+ || icode == CODE_FOR_spe_evlhhousplat
+ || icode == CODE_FOR_spe_evlwhe
+ || icode == CODE_FOR_spe_evlwhos
+ || icode == CODE_FOR_spe_evlwhou
+ || icode == CODE_FOR_spe_evlwhsplat
+ || icode == CODE_FOR_spe_evlwwsplat
+ || icode == CODE_FOR_spe_evrlwi
+ || icode == CODE_FOR_spe_evslwi
+ || icode == CODE_FOR_spe_evsrwis
+ || icode == CODE_FOR_spe_evsubifw
+ || icode == CODE_FOR_spe_evsrwiu)
+ {
+ /* Only allow 5-bit unsigned literals. */
+ STRIP_NOPS (arg1);
+ if (TREE_CODE (arg1) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg1) & ~0x1f)
+ {
+ error ("argument 2 must be a 5-bit unsigned literal");
+ return const0_rtx;
+ }
+ }
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
+ tree arglist, rtx target)
+{
+ rtx pat, scratch;
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ tree cr6_form;
+ tree arg0;
+ tree arg1;
+ rtx op0;
+ rtx op1;
+ enum machine_mode tmode;
+ enum machine_mode mode0;
+ enum machine_mode mode1;
+ int cr6_form_int;
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist))
+ || !TREE_CHAIN (TREE_CHAIN (arglist))
+ || !TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ cr6_form = TREE_VALUE (arglist);
+ arg0 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ tmode = SImode;
+ mode0 = insn_data[icode].operand[1].mode;
+ mode1 = insn_data[icode].operand[2].mode;
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+
+ if (TREE_CODE (cr6_form) != INTEGER_CST)
+ {
+ error ("argument 1 of __builtin_altivec_predicate must be a constant");
+ return const0_rtx;
+ }
+ else
+ cr6_form_int = TREE_INT_CST_LOW (cr6_form);
+
+ gcc_assert (mode0 == mode1);
+
+ /* If we have invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ scratch = gen_reg_rtx (mode0);
+
+ pat = GEN_FCN (icode) (scratch, op0, op1,
+ gen_rtx_SYMBOL_REF (Pmode, opcode));
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ /* The vec_any* and vec_all* predicates use the same opcodes for two
+ different operations, but the bits in CR6 will be different
+ depending on what information we want. So we have to play tricks
+ with CR6 to get the right bits out.
+
+ If you think this is disgusting, look at the specs for the
+ AltiVec predicates. */
+
+ switch (cr6_form_int)
+ {
+ case 0:
+ emit_insn (gen_cr6_test_for_zero (target));
+ break;
+ case 1:
+ emit_insn (gen_cr6_test_for_zero_reverse (target));
+ break;
+ case 2:
+ emit_insn (gen_cr6_test_for_lt (target));
+ break;
+ case 3:
+ emit_insn (gen_cr6_test_for_lt_reverse (target));
+ break;
+ default:
+ error ("argument 1 of __builtin_altivec_predicate is out of range");
+ break;
+ }
+
+ return target;
+}
+
+static rtx
+altivec_expand_lv_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat, addr;
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ tree arg0;
+ tree arg1;
+ enum machine_mode tmode;
+ enum machine_mode mode0;
+ enum machine_mode mode1;
+ rtx op0;
+ rtx op1;
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist)))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = Pmode;
+ mode1 = Pmode;
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (op0 == const0_rtx)
+ {
+ addr = gen_rtx_MEM (tmode, op1);
+ }
+ else
+ {
+ op0 = copy_to_mode_reg (mode0, op0);
+ addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
+ }
+
+ pat = GEN_FCN (icode) (target, addr);
+
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+spe_expand_stv_builtin (enum insn_code icode, tree arglist)
+{
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx pat;
+ enum machine_mode mode0 = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[2].mode;
+
+ /* Invalid arguments. Bail before doing anything stoopid! */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
+ op0 = copy_to_mode_reg (mode2, op0);
+ if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
+ op1 = copy_to_mode_reg (mode0, op1);
+ if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
+ op2 = copy_to_mode_reg (mode1, op2);
+
+ pat = GEN_FCN (icode) (op1, op2, op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+}
+
+static rtx
+altivec_expand_stv_builtin (enum insn_code icode, tree arglist)
+{
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ tree arg0;
+ tree arg1;
+ tree arg2;
+ rtx op0;
+ rtx op1;
+ rtx op2;
+ rtx pat, addr;
+ enum machine_mode tmode;
+ enum machine_mode mode1;
+ enum machine_mode mode2;
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist))
+ || !TREE_CHAIN (TREE_CHAIN (arglist))
+ || !TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ tmode = insn_data[icode].operand[0].mode;
+ mode1 = Pmode;
+ mode2 = Pmode;
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+
+ /* Invalid arguments. Bail before doing anything stoopid! */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
+ op0 = copy_to_mode_reg (tmode, op0);
+
+ op2 = copy_to_mode_reg (mode2, op2);
+
+ if (op1 == const0_rtx)
+ {
+ addr = gen_rtx_MEM (tmode, op2);
+ }
+ else
+ {
+ op1 = copy_to_mode_reg (mode1, op1);
+ addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
+ }
+
+ pat = GEN_FCN (icode) (addr, op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+}
+
+static rtx
+rs6000_expand_ternop_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat;
+ /* APPLE LOCAL begin Altivec. */
+ tree arg0, arg1, arg2;
+ rtx op0, op1, op2;
+ /* APPLE LOCAL end Altivec. */
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[3].mode;
+
+ /* APPLE LOCAL begin Altivec. */
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist))
+ || !TREE_CHAIN (TREE_CHAIN (arglist))
+ || !TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ /* APPLE LOCAL end Altivec. */
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (icode == CODE_FOR_altivec_vsldoi_v4sf
+ || icode == CODE_FOR_altivec_vsldoi_v4si
+ || icode == CODE_FOR_altivec_vsldoi_v8hi
+ || icode == CODE_FOR_altivec_vsldoi_v16qi)
+ {
+ /* Only allow 4-bit unsigned literals. */
+ STRIP_NOPS (arg2);
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg2) & ~0xf)
+ {
+ error ("argument 3 must be a 4-bit unsigned literal");
+ return const0_rtx;
+ }
+ }
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+/* Expand the lvx builtins. */
+static rtx
+altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0;
+ enum machine_mode tmode, mode0;
+ rtx pat, op0;
+ enum insn_code icode;
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
+ icode = CODE_FOR_altivec_lvx_v16qi;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
+ icode = CODE_FOR_altivec_lvx_v8hi;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
+ icode = CODE_FOR_altivec_lvx_v4si;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
+ icode = CODE_FOR_altivec_lvx_v4sf;
+ break;
+ default:
+ *expandedp = false;
+ return NULL_RTX;
+ }
+
+ *expandedp = true;
+
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ if (!arglist
+ || !TREE_VALUE (arglist))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Expand the stvx builtins. */
+static rtx
+altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+ bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1;
+ enum machine_mode mode0, mode1;
+ rtx pat, op0, op1;
+ enum insn_code icode;
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
+ icode = CODE_FOR_altivec_stvx_v16qi;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
+ icode = CODE_FOR_altivec_stvx_v8hi;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
+ icode = CODE_FOR_altivec_stvx_v4si;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
+ icode = CODE_FOR_altivec_stvx_v4sf;
+ break;
+ default:
+ *expandedp = false;
+ return NULL_RTX;
+ }
+
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist)))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+
+ *expandedp = true;
+ return NULL_RTX;
+}
+
+/* Expand the dst builtins. */
+static rtx
+altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+ bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1, arg2;
+ enum machine_mode mode0, mode1, mode2;
+ rtx pat, op0, op1, op2;
+ struct builtin_description *d;
+ size_t i;
+
+ *expandedp = false;
+
+ /* Handle DST variants. */
+ d = (struct builtin_description *) bdesc_dst;
+ for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
+ if (d->code == fcode)
+ {
+ /* APPLE LOCAL begin Alitvec radar 5447227 */
+ if (!arglist
+ || !TREE_VALUE (arglist)
+ || !TREE_CHAIN (arglist)
+ || !TREE_VALUE (TREE_CHAIN (arglist))
+ || !TREE_CHAIN (TREE_CHAIN (arglist))
+ || !TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))))
+ {
+ error ("too few arguments to function");
+ return const0_rtx;
+ }
+ /* APPLE LOCAL end Alitvec radar 5447227 */
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+
+ /* Invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ *expandedp = true;
+ STRIP_NOPS (arg2);
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg2) & ~0x3)
+ {
+ error ("argument to %qs must be a 2-bit unsigned literal", d->name);
+ return const0_rtx;
+ }
+
+ if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (Pmode, op0);
+ if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (d->icode) (op0, op1, op2);
+ if (pat != 0)
+ emit_insn (pat);
+
+ return NULL_RTX;
+ }
+
+ return NULL_RTX;
+}
+
+/* Expand vec_init builtin. */
+static rtx
+altivec_expand_vec_init_builtin (tree type, tree arglist, rtx target)
+{
+ enum machine_mode tmode = TYPE_MODE (type);
+ enum machine_mode inner_mode = GET_MODE_INNER (tmode);
+ int i, n_elt = GET_MODE_NUNITS (tmode);
+ rtvec v = rtvec_alloc (n_elt);
+
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
+ {
+ rtx x = expand_normal (TREE_VALUE (arglist));
+ RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
+ }
+
+ gcc_assert (arglist == NULL);
+
+ if (!target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
+ return target;
+}
+
+/* Return the integer constant in ARG. Constrain it to be in the range
+ of the subparts of VEC_TYPE; issue an error if not. */
+
+static int
+get_element_number (tree vec_type, tree arg)
+{
+ unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
+
+ if (!host_integerp (arg, 1)
+ || (elt = tree_low_cst (arg, 1), elt > max))
+ {
+ error ("selector must be an integer constant in the range 0..%wi", max);
+ return 0;
+ }
+
+ return elt;
+}
+
+/* Expand vec_set builtin. */
+static rtx
+altivec_expand_vec_set_builtin (tree arglist)
+{
+ enum machine_mode tmode, mode1;
+ tree arg0, arg1, arg2;
+ int elt;
+ rtx op0, op1;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ tmode = TYPE_MODE (TREE_TYPE (arg0));
+ mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
+ elt = get_element_number (TREE_TYPE (arg0), arg2);
+
+ if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
+ op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
+
+ op0 = force_reg (tmode, op0);
+ op1 = force_reg (mode1, op1);
+
+ rs6000_expand_vector_set (op0, op1, elt);
+
+ return op0;
+}
+
+/* Expand vec_ext builtin. */
+static rtx
+altivec_expand_vec_ext_builtin (tree arglist, rtx target)
+{
+ enum machine_mode tmode, mode0;
+ tree arg0, arg1;
+ int elt;
+ rtx op0;
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+
+ op0 = expand_normal (arg0);
+ elt = get_element_number (TREE_TYPE (arg0), arg1);
+
+ tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ mode0 = TYPE_MODE (TREE_TYPE (arg0));
+ gcc_assert (VECTOR_MODE_P (mode0));
+
+ op0 = force_reg (mode0, op0);
+
+ if (optimize || !target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_extract (target, op0, elt);
+
+ return target;
+}
+
+/* Expand the builtin in EXP and store the result in TARGET. Store
+ true in *EXPANDEDP if we found a builtin to expand. */
+static rtx
+altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
+{
+ struct builtin_description *d;
+ struct builtin_description_predicates *dp;
+ size_t i;
+ enum insn_code icode;
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ tree arg0;
+ rtx op0, pat;
+ enum machine_mode tmode, mode0;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ if (fcode >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && fcode <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
+ {
+ *expandedp = true;
+ error ("unresolved overload for Altivec builtin %qF", fndecl);
+ return const0_rtx;
+ }
+
+ target = altivec_expand_ld_builtin (exp, target, expandedp);
+ if (*expandedp)
+ return target;
+
+ target = altivec_expand_st_builtin (exp, target, expandedp);
+ if (*expandedp)
+ return target;
+
+ target = altivec_expand_dst_builtin (exp, target, expandedp);
+ if (*expandedp)
+ return target;
+
+ *expandedp = true;
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_STVX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, arglist);
+ case ALTIVEC_BUILTIN_STVEBX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, arglist);
+ case ALTIVEC_BUILTIN_STVEHX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, arglist);
+ case ALTIVEC_BUILTIN_STVEWX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, arglist);
+ case ALTIVEC_BUILTIN_STVXL:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, arglist);
+
+ case ALTIVEC_BUILTIN_MFVSCR:
+ icode = CODE_FOR_altivec_mfvscr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ pat = GEN_FCN (icode) (target);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_MTVSCR:
+ icode = CODE_FOR_altivec_mtvscr;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_DSSALL:
+ emit_insn (gen_altivec_dssall ());
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_DSS:
+ icode = CODE_FOR_altivec_dss;
+ arg0 = TREE_VALUE (arglist);
+ STRIP_NOPS (arg0);
+ op0 = expand_normal (arg0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (TREE_CODE (arg0) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg0) & ~0x3)
+ {
+ error ("argument to dss must be a 2-bit unsigned literal");
+ return const0_rtx;
+ }
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ emit_insn (gen_altivec_dss (op0));
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
+ return altivec_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
+
+ case ALTIVEC_BUILTIN_VEC_SET_V4SI:
+ case ALTIVEC_BUILTIN_VEC_SET_V8HI:
+ case ALTIVEC_BUILTIN_VEC_SET_V16QI:
+ case ALTIVEC_BUILTIN_VEC_SET_V4SF:
+ return altivec_expand_vec_set_builtin (arglist);
+
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
+ return altivec_expand_vec_ext_builtin (arglist, target);
+
+ default:
+ break;
+ /* Fall through. */
+ }
+
+ /* Expand abs* operations. */
+ d = (struct builtin_description *) bdesc_abs;
+ for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
+ if (d->code == fcode)
+ return altivec_expand_abs_builtin (d->icode, arglist, target);
+
+ /* Expand the AltiVec predicates. */
+ dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
+ for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
+ if (dp->code == fcode)
+ return altivec_expand_predicate_builtin (dp->icode, dp->opcode,
+ arglist, target);
+
+ /* LV* are funky. We initialized them differently. */
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_LVSL:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVSR:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVEBX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVEHX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVEWX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVXL:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
+ arglist, target);
+ default:
+ break;
+ /* Fall through. */
+ }
+
+ *expandedp = false;
+ return NULL_RTX;
+}
+
+/* Binops that need to be initialized manually, but can be expanded
+ automagically by rs6000_expand_binop_builtin. */
+static struct builtin_description bdesc_2arg_spe[] =
+{
+ { 0, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
+ { 0, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
+ { 0, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
+ { 0, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
+ { 0, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
+ { 0, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
+ { 0, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
+ { 0, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
+ { 0, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
+ { 0, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
+ { 0, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
+ { 0, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
+ { 0, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
+ { 0, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
+ { 0, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
+ { 0, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
+ { 0, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
+ { 0, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
+ { 0, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
+ { 0, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
+ { 0, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
+ { 0, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
+};
+
+/* Expand the builtin in EXP and store the result in TARGET. Store
+ true in *EXPANDEDP if we found a builtin to expand.
+
+ This expands the SPE builtins that are not simple unary and binary
+ operations. */
+static rtx
+spe_expand_builtin (tree exp, rtx target, bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ tree arg1, arg0;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ enum insn_code icode;
+ enum machine_mode tmode, mode0;
+ rtx pat, op0;
+ struct builtin_description *d;
+ size_t i;
+
+ *expandedp = true;
+
+ /* Syntax check for a 5-bit unsigned immediate. */
+ switch (fcode)
+ {
+ case SPE_BUILTIN_EVSTDD:
+ case SPE_BUILTIN_EVSTDH:
+ case SPE_BUILTIN_EVSTDW:
+ case SPE_BUILTIN_EVSTWHE:
+ case SPE_BUILTIN_EVSTWHO:
+ case SPE_BUILTIN_EVSTWWE:
+ case SPE_BUILTIN_EVSTWWO:
+ arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ if (TREE_CODE (arg1) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg1) & ~0x1f)
+ {
+ error ("argument 2 must be a 5-bit unsigned literal");
+ return const0_rtx;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* The evsplat*i instructions are not quite generic. */
+ switch (fcode)
+ {
+ case SPE_BUILTIN_EVSPLATFI:
+ return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
+ arglist, target);
+ case SPE_BUILTIN_EVSPLATI:
+ return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
+ arglist, target);
+ default:
+ break;
+ }
+
+ d = (struct builtin_description *) bdesc_2arg_spe;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
+ if (d->code == fcode)
+ return rs6000_expand_binop_builtin (d->icode, arglist, target);
+
+ d = (struct builtin_description *) bdesc_spe_predicates;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
+ if (d->code == fcode)
+ return spe_expand_predicate_builtin (d->icode, arglist, target);
+
+ d = (struct builtin_description *) bdesc_spe_evsel;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
+ if (d->code == fcode)
+ return spe_expand_evsel_builtin (d->icode, arglist, target);
+
+ switch (fcode)
+ {
+ case SPE_BUILTIN_EVSTDDX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, arglist);
+ case SPE_BUILTIN_EVSTDHX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, arglist);
+ case SPE_BUILTIN_EVSTDWX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, arglist);
+ case SPE_BUILTIN_EVSTWHEX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, arglist);
+ case SPE_BUILTIN_EVSTWHOX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, arglist);
+ case SPE_BUILTIN_EVSTWWEX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, arglist);
+ case SPE_BUILTIN_EVSTWWOX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, arglist);
+ case SPE_BUILTIN_EVSTDD:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, arglist);
+ case SPE_BUILTIN_EVSTDH:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, arglist);
+ case SPE_BUILTIN_EVSTDW:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, arglist);
+ case SPE_BUILTIN_EVSTWHE:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, arglist);
+ case SPE_BUILTIN_EVSTWHO:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, arglist);
+ case SPE_BUILTIN_EVSTWWE:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, arglist);
+ case SPE_BUILTIN_EVSTWWO:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, arglist);
+ case SPE_BUILTIN_MFSPEFSCR:
+ icode = CODE_FOR_spe_mfspefscr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ pat = GEN_FCN (icode) (target);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+ case SPE_BUILTIN_MTSPEFSCR:
+ icode = CODE_FOR_spe_mtspefscr;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_normal (arg0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+ default:
+ break;
+ }
+
+ *expandedp = false;
+ return NULL_RTX;
+}
+
+static rtx
+spe_expand_predicate_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat, scratch, tmp;
+ tree form = TREE_VALUE (arglist);
+ tree arg0 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ int form_int;
+ enum rtx_code code;
+
+ if (TREE_CODE (form) != INTEGER_CST)
+ {
+ error ("argument 1 of __builtin_spe_predicate must be a constant");
+ return const0_rtx;
+ }
+ else
+ form_int = TREE_INT_CST_LOW (form);
+
+ gcc_assert (mode0 == mode1);
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != SImode
+ || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
+ target = gen_reg_rtx (SImode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ scratch = gen_reg_rtx (CCmode);
+
+ pat = GEN_FCN (icode) (scratch, op0, op1);
+ if (! pat)
+ return const0_rtx;
+ emit_insn (pat);
+
+ /* There are 4 variants for each predicate: _any_, _all_, _upper_,
+ _lower_. We use one compare, but look in different bits of the
+ CR for each variant.
+
+ There are 2 elements in each SPE simd type (upper/lower). The CR
+ bits are set as follows:
+
+ BIT0 | BIT 1 | BIT 2 | BIT 3
+ U | L | (U | L) | (U & L)
+
+ So, for an "all" relationship, BIT 3 would be set.
+ For an "any" relationship, BIT 2 would be set. Etc.
+
+ Following traditional nomenclature, these bits map to:
+
+ BIT0 | BIT 1 | BIT 2 | BIT 3
+ LT | GT | EQ | OV
+
+ Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
+ */
+
+ switch (form_int)
+ {
+ /* All variant. OV bit. */
+ case 0:
+ /* We need to get to the OV bit, which is the ORDERED bit. We
+ could generate (ordered:SI (reg:CC xx) (const_int 0)), but
+ that's ugly and will make validate_condition_mode die.
+ So let's just use another pattern. */
+ emit_insn (gen_move_from_CR_ov_bit (target, scratch));
+ return target;
+ /* Any variant. EQ bit. */
+ case 1:
+ code = EQ;
+ break;
+ /* Upper variant. LT bit. */
+ case 2:
+ code = LT;
+ break;
+ /* Lower variant. GT bit. */
+ case 3:
+ code = GT;
+ break;
+ default:
+ error ("argument 1 of __builtin_spe_predicate is out of range");
+ return const0_rtx;
+ }
+
+ tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
+ emit_move_insn (target, tmp);
+
+ return target;
+}
+
+/* The evsel builtins look like this:
+
+ e = __builtin_spe_evsel_OP (a, b, c, d);
+
+ and work like this:
+
+ e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
+ e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
+*/
+
+static rtx
+spe_expand_evsel_builtin (enum insn_code icode, tree arglist, rtx target)
+{
+ rtx pat, scratch;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ tree arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = expand_normal (arg3);
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ gcc_assert (mode0 == mode1);
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node
+ || arg2 == error_mark_node || arg3 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != mode0
+ || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
+ target = gen_reg_rtx (mode0);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode0, op1);
+ if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
+ op2 = copy_to_mode_reg (mode0, op2);
+ if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
+ op3 = copy_to_mode_reg (mode0, op3);
+
+ /* Generate the compare. */
+ scratch = gen_reg_rtx (CCmode);
+ pat = GEN_FCN (icode) (scratch, op0, op1);
+ if (! pat)
+ return const0_rtx;
+ emit_insn (pat);
+
+ if (mode0 == V2SImode)
+ emit_insn (gen_spe_evsel (target, op2, op3, scratch));
+ else
+ emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
+
+ return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ struct builtin_description *d;
+ size_t i;
+ rtx ret;
+ bool success;
+
+ if (fcode == ALTIVEC_BUILTIN_MASK_FOR_LOAD
+ || fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
+ {
+ int icode = (int) CODE_FOR_altivec_lvsr;
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode = insn_data[icode].operand[1].mode;
+ tree arg;
+ rtx op, addr, pat;
+
+ gcc_assert (TARGET_ALTIVEC);
+
+ arg = TREE_VALUE (arglist);
+ gcc_assert (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE);
+ op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
+ addr = memory_address (mode, op);
+ if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
+ op = addr;
+ else
+ {
+ /* For the load case need to negate the address. */
+ op = gen_reg_rtx (GET_MODE (addr));
+ emit_insn (gen_rtx_SET (VOIDmode, op,
+ gen_rtx_NEG (GET_MODE (addr), addr)));
+ }
+ op = gen_rtx_MEM (mode, op);
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ /*pat = gen_altivec_lvsr (target, op);*/
+ pat = GEN_FCN (icode) (target, op);
+ if (!pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+ }
+
+ if (TARGET_ALTIVEC)
+ {
+ ret = altivec_expand_builtin (exp, target, &success);
+
+ if (success)
+ return ret;
+ }
+ if (TARGET_SPE)
+ {
+ ret = spe_expand_builtin (exp, target, &success);
+
+ if (success)
+ return ret;
+ }
+
+ gcc_assert (TARGET_ALTIVEC || TARGET_SPE);
+
+ /* Handle simple unary operations. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_unop_builtin (d->icode, arglist, target);
+
+ /* Handle simple binary operations. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_binop_builtin (d->icode, arglist, target);
+
+ /* Handle simple ternary operations. */
+ d = (struct builtin_description *) bdesc_3arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_ternop_builtin (d->icode, arglist, target);
+
+ /* APPLE LOCAL begin 5774356 */
+ /* It looks like a builtin call, but there is something wrong;
+ maybe the wrong number of arguments. Return failure. */
+ return NULL_RTX;
+ /* APPLE LOCAL end 5774356 */
+}
+
+static tree
+build_opaque_vector_type (tree node, int nunits)
+{
+ node = copy_node (node);
+ TYPE_MAIN_VARIANT (node) = node;
+ return build_vector_type (node, nunits);
+}
+
+static void
+rs6000_init_builtins (void)
+{
+ V2SI_type_node = build_vector_type (intSI_type_node, 2);
+ V2SF_type_node = build_vector_type (float_type_node, 2);
+ V4HI_type_node = build_vector_type (intHI_type_node, 4);
+ V4SI_type_node = build_vector_type (intSI_type_node, 4);
+ V4SF_type_node = build_vector_type (float_type_node, 4);
+ V8HI_type_node = build_vector_type (intHI_type_node, 8);
+ V16QI_type_node = build_vector_type (intQI_type_node, 16);
+
+ unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
+ unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
+ unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
+
+ opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
+ opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
+ opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
+ opaque_V4SI_type_node = copy_node (V4SI_type_node);
+
+ /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
+ types, especially in C++ land. Similarly, 'vector pixel' is distinct from
+ 'vector unsigned short'. */
+
+ bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
+ bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
+ bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
+ pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
+
+ long_integer_type_internal_node = long_integer_type_node;
+ long_unsigned_type_internal_node = long_unsigned_type_node;
+ intQI_type_internal_node = intQI_type_node;
+ uintQI_type_internal_node = unsigned_intQI_type_node;
+ intHI_type_internal_node = intHI_type_node;
+ uintHI_type_internal_node = unsigned_intHI_type_node;
+ intSI_type_internal_node = intSI_type_node;
+ uintSI_type_internal_node = unsigned_intSI_type_node;
+ float_type_internal_node = float_type_node;
+ void_type_internal_node = void_type_node;
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool char"),
+ bool_char_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool short"),
+ bool_short_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool int"),
+ bool_int_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__pixel"),
+ pixel_type_node));
+
+ bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
+ bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
+ bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
+ pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned char"),
+ unsigned_V16QI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed char"),
+ V16QI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool char"),
+ bool_V16QI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned short"),
+ unsigned_V8HI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed short"),
+ V8HI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool short"),
+ bool_V8HI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned int"),
+ unsigned_V4SI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed int"),
+ V4SI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool int"),
+ bool_V4SI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector float"),
+ V4SF_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __pixel"),
+ pixel_V8HI_type_node));
+
+ if (TARGET_SPE)
+ spe_init_builtins ();
+ if (TARGET_ALTIVEC)
+ altivec_init_builtins ();
+ if (TARGET_ALTIVEC || TARGET_SPE)
+ rs6000_common_init_builtins ();
+
+ /* APPLE LOCAL begin constant cfstrings */
+#ifdef SUBTARGET_INIT_BUILTINS
+ SUBTARGET_INIT_BUILTINS;
+#endif
+ /* APPLE LOCAL end constant cfstrings */
+
+#if TARGET_XCOFF
+ /* AIX libm provides clog as __clog. */
+ if (built_in_decls [BUILT_IN_CLOG])
+ set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
+#endif
+}
+
+/* Search through a set of builtins and enable the mask bits.
+ DESC is an array of builtins.
+ SIZE is the total number of builtins.
+ START is the builtin enum at which to start.
+ END is the builtin enum at which to end. */
+static void
+enable_mask_for_builtins (struct builtin_description *desc, int size,
+ enum rs6000_builtins start,
+ enum rs6000_builtins end)
+{
+ int i;
+
+ for (i = 0; i < size; ++i)
+ if (desc[i].code == start)
+ break;
+
+ if (i == size)
+ return;
+
+ for (; i < size; ++i)
+ {
+ /* Flip all the bits on. */
+ desc[i].mask = target_flags;
+ if (desc[i].code == end)
+ break;
+ }
+}
+
+static void
+spe_init_builtins (void)
+{
+ tree endlink = void_list_node;
+ tree puint_type_node = build_pointer_type (unsigned_type_node);
+ tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
+ struct builtin_description *d;
+ size_t i;
+
+ tree v2si_ftype_4_v2si
+ = build_function_type
+ (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ endlink)))));
+
+ tree v2sf_ftype_4_v2sf
+ = build_function_type
+ (opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ endlink)))));
+
+ tree int_ftype_int_v2si_v2si
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ endlink))));
+
+ tree int_ftype_int_v2sf_v2sf
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_puint_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, puint_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_puint_char
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, puint_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_pv2si_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_pv2si_char
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree void_ftype_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, integer_type_node, endlink));
+
+ tree int_ftype_void
+ = build_function_type (integer_type_node, endlink);
+
+ tree v2si_ftype_pv2si_int
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+
+ tree v2si_ftype_puint_int
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, puint_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+
+ tree v2si_ftype_pushort_int
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, pushort_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+
+ tree v2si_ftype_signed_char
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, signed_char_type_node,
+ endlink));
+
+ /* The initialization of the simple binary and unary builtins is
+ done in rs6000_common_init_builtins, but we have to enable the
+ mask bits here manually because we have run out of `target_flags'
+ bits. We really need to redesign this mask business. */
+
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_2arg,
+ ARRAY_SIZE (bdesc_2arg),
+ SPE_BUILTIN_EVADDW,
+ SPE_BUILTIN_EVXOR);
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_1arg,
+ ARRAY_SIZE (bdesc_1arg),
+ SPE_BUILTIN_EVABS,
+ SPE_BUILTIN_EVSUBFUSIAAW);
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_predicates,
+ ARRAY_SIZE (bdesc_spe_predicates),
+ SPE_BUILTIN_EVCMPEQ,
+ SPE_BUILTIN_EVFSTSTLT);
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_evsel,
+ ARRAY_SIZE (bdesc_spe_evsel),
+ SPE_BUILTIN_EVSEL_CMPGTS,
+ SPE_BUILTIN_EVSEL_FSTSTEQ);
+
+ (*lang_hooks.decls.pushdecl)
+ (build_decl (TYPE_DECL, get_identifier ("__ev64_opaque__"),
+ opaque_V2SI_type_node));
+
+ /* Initialize irregular SPE builtins. */
+
+ def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
+ def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
+ def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
+ def_builtin (target_flags, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
+ def_builtin (target_flags, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
+ def_builtin (target_flags, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
+ def_builtin (target_flags, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
+ def_builtin (target_flags, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
+ def_builtin (target_flags, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
+ def_builtin (target_flags, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
+ def_builtin (target_flags, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
+ def_builtin (target_flags, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
+ def_builtin (target_flags, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
+ def_builtin (target_flags, "__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
+ def_builtin (target_flags, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
+ def_builtin (target_flags, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
+ def_builtin (target_flags, "__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
+ def_builtin (target_flags, "__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
+
+ /* Loads. */
+ def_builtin (target_flags, "__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
+ def_builtin (target_flags, "__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
+ def_builtin (target_flags, "__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
+ def_builtin (target_flags, "__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
+ def_builtin (target_flags, "__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
+ def_builtin (target_flags, "__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
+ def_builtin (target_flags, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
+ def_builtin (target_flags, "__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
+ def_builtin (target_flags, "__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
+ def_builtin (target_flags, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
+ def_builtin (target_flags, "__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
+ def_builtin (target_flags, "__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
+ def_builtin (target_flags, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
+
+ /* Predicates. */
+ d = (struct builtin_description *) bdesc_spe_predicates;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
+ {
+ tree type;
+
+ switch (insn_data[d->icode].operand[1].mode)
+ {
+ case V2SImode:
+ type = int_ftype_int_v2si_v2si;
+ break;
+ case V2SFmode:
+ type = int_ftype_int_v2sf_v2sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Evsel predicates. */
+ d = (struct builtin_description *) bdesc_spe_evsel;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
+ {
+ tree type;
+
+ switch (insn_data[d->icode].operand[1].mode)
+ {
+ case V2SImode:
+ type = v2si_ftype_4_v2si;
+ break;
+ case V2SFmode:
+ type = v2sf_ftype_4_v2sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+}
+
+static void
+altivec_init_builtins (void)
+{
+ struct builtin_description *d;
+ struct builtin_description_predicates *dp;
+ size_t i;
+ tree ftype;
+
+ tree pfloat_type_node = build_pointer_type (float_type_node);
+ tree pint_type_node = build_pointer_type (integer_type_node);
+ tree pshort_type_node = build_pointer_type (short_integer_type_node);
+ tree pchar_type_node = build_pointer_type (char_type_node);
+
+ tree pvoid_type_node = build_pointer_type (void_type_node);
+
+ tree pcfloat_type_node = build_pointer_type (build_qualified_type (float_type_node, TYPE_QUAL_CONST));
+ tree pcint_type_node = build_pointer_type (build_qualified_type (integer_type_node, TYPE_QUAL_CONST));
+ tree pcshort_type_node = build_pointer_type (build_qualified_type (short_integer_type_node, TYPE_QUAL_CONST));
+ tree pcchar_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
+
+ tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
+
+ tree int_ftype_opaque
+ = build_function_type_list (integer_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+
+ tree opaque_ftype_opaque_int
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, integer_type_node, NULL_TREE);
+ tree opaque_ftype_opaque_opaque_int
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ tree int_ftype_int_opaque_opaque
+ = build_function_type_list (integer_type_node,
+ integer_type_node, opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+ tree int_ftype_int_v4si_v4si
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V4SI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_pcfloat
+ = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
+ tree void_ftype_pfloat_v4sf
+ = build_function_type_list (void_type_node,
+ pfloat_type_node, V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_pcint
+ = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
+ tree void_ftype_pint_v4si
+ = build_function_type_list (void_type_node,
+ pint_type_node, V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_pcshort
+ = build_function_type_list (V8HI_type_node, pcshort_type_node, NULL_TREE);
+ tree void_ftype_pshort_v8hi
+ = build_function_type_list (void_type_node,
+ pshort_type_node, V8HI_type_node, NULL_TREE);
+ tree v16qi_ftype_pcchar
+ = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
+ tree void_ftype_pchar_v16qi
+ = build_function_type_list (void_type_node,
+ pchar_type_node, V16QI_type_node, NULL_TREE);
+ tree void_ftype_v4si
+ = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_void
+ = build_function_type (V8HI_type_node, void_list_node);
+ tree void_ftype_void
+ = build_function_type (void_type_node, void_list_node);
+ tree void_ftype_int
+ = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
+
+ tree opaque_ftype_long_pcvoid
+ = build_function_type_list (opaque_V4SI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree v16qi_ftype_long_pcvoid
+ = build_function_type_list (V16QI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree v8hi_ftype_long_pcvoid
+ = build_function_type_list (V8HI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree v4si_ftype_long_pcvoid
+ = build_function_type_list (V4SI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+
+ tree void_ftype_opaque_long_pvoid
+ = build_function_type_list (void_type_node,
+ opaque_V4SI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v4si_long_pvoid
+ = build_function_type_list (void_type_node,
+ V4SI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v16qi_long_pvoid
+ = build_function_type_list (void_type_node,
+ V16QI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v8hi_long_pvoid
+ = build_function_type_list (void_type_node,
+ V8HI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree int_ftype_int_v8hi_v8hi
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V8HI_type_node,
+ V8HI_type_node, NULL_TREE);
+ tree int_ftype_int_v16qi_v16qi
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V16QI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree int_ftype_int_v4sf_v4sf
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V4SF_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_v4si
+ = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi
+ = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi
+ = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf
+ = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree void_ftype_pcvoid_int_int
+ = build_function_type_list (void_type_node,
+ pcvoid_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4si);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4si);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort,
+ ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar,
+ ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
+
+ /* Add the DST variants. */
+ d = (struct builtin_description *) bdesc_dst;
+ for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
+ def_builtin (d->mask, d->name, void_ftype_pcvoid_int_int, d->code);
+
+ /* Initialize the predicates. */
+ dp = (struct builtin_description_predicates *) bdesc_altivec_preds;
+ for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
+ {
+ enum machine_mode mode1;
+ tree type;
+ bool is_overloaded = dp->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && dp->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ mode1 = VOIDmode;
+ else
+ mode1 = insn_data[dp->icode].operand[1].mode;
+
+ switch (mode1)
+ {
+ case VOIDmode:
+ type = int_ftype_int_opaque_opaque;
+ break;
+ case V4SImode:
+ type = int_ftype_int_v4si_v4si;
+ break;
+ case V8HImode:
+ type = int_ftype_int_v8hi_v8hi;
+ break;
+ case V16QImode:
+ type = int_ftype_int_v16qi_v16qi;
+ break;
+ case V4SFmode:
+ type = int_ftype_int_v4sf_v4sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (dp->mask, dp->name, type, dp->code);
+ }
+
+ /* Initialize the abs* operators. */
+ d = (struct builtin_description *) bdesc_abs;
+ for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
+ {
+ enum machine_mode mode0;
+ tree type;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ if (TARGET_ALTIVEC)
+ {
+ tree decl;
+
+ /* Initialize target builtin that implements
+ targetm.vectorize.builtin_mask_for_load. */
+
+ decl = lang_hooks.builtin_function ("__builtin_altivec_mask_for_load",
+ v16qi_ftype_long_pcvoid,
+ ALTIVEC_BUILTIN_MASK_FOR_LOAD,
+ BUILT_IN_MD, NULL,
+ tree_cons (get_identifier ("const"),
+ NULL_TREE, NULL_TREE));
+ /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
+ altivec_builtin_mask_for_load = decl;
+ }
+
+ /* APPLE LOCAL begin AltiVec */
+ /* If Apple AltiVec is enabled, we need to define additional builtins
+ in lieu of what <altivec.h> provides for FSF AltiVec. */
+ if (rs6000_altivec_pim)
+ {
+ tree int_ftype_ellipsis = build_function_type (integer_type_node,
+ NULL_TREE);
+ int pim_code = ALTIVEC_PIM__FIRST;
+
+ /* NB: For overloaded operations/predicates, the pim_... flags specify
+ how to match up the argument types and how to determine the
+ return type, if necessary; the rs6000_fold_builtin() routine
+ does all this. */
+
+ /* PIM Operations. */
+
+ gcc_assert (pim_code == ALTIVEC_PIM_VEC_ABS);
+
+ def_pim_builtin ("vec_abs", V16QI_type_node, ABS_V16QI, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_abs.2", V8HI_type_node, ABS_V8HI, pim_ovl_8);
+ def_pim_builtin ("vec_abs.3", V4SF_type_node, ABS_V4SF, pim_ovl_4f);
+ def_pim_builtin ("vec_abs.4", V4SI_type_node, ABS_V4SI, pim_ovl_4);
+
+ def_pim_builtin ("vec_abss", V16QI_type_node, ABSS_V16QI, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_abss.2", V8HI_type_node, ABSS_V8HI, pim_ovl_8);
+ def_pim_builtin ("vec_abss.3", V4SI_type_node, ABSS_V4SI, pim_ovl_4);
+
+ def_pim_builtin ("vec_add", NULL_TREE, VADDUBM, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_add.2", NULL_TREE, VADDUHM, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_add.3", V4SF_type_node, VADDFP, pim_ovl_4f);
+ def_pim_builtin ("vec_add.4", NULL_TREE, VADDUWM, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_addc", unsigned_V4SI_type_node, VADDCUW, pim_group);
+
+ def_pim_builtin ("vec_adds", NULL_TREE, VADDUBS, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_adds.2", NULL_TREE, VADDSBS, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_adds.3", NULL_TREE, VADDUHS, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_adds.4", NULL_TREE, VADDSHS, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_adds.5", NULL_TREE, VADDUWS, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_adds.6", NULL_TREE, VADDSWS, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_and", NULL_TREE, VAND, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_andc", NULL_TREE, VANDC, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_avg", NULL_TREE, VAVGUB, pim_ovl_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_avg.2", NULL_TREE, VAVGSB, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_avg.3", NULL_TREE, VAVGUH, pim_ovl_8u | pim_rt_12);
+ def_pim_builtin ("vec_avg.4", NULL_TREE, VAVGSH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_avg.5", NULL_TREE, VAVGUW, pim_ovl_4u | pim_rt_12);
+ def_pim_builtin ("vec_avg.6", NULL_TREE, VAVGSW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_ceil", V4SF_type_node, VRFIP, pim_group);
+
+ def_pim_builtin ("vec_cmpb", V4SI_type_node, VCMPBFP, pim_group);
+
+ def_pim_builtin ("vec_cmpeq", bool_V16QI_type_node, VCMPEQUB, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_cmpeq.2", bool_V8HI_type_node, VCMPEQUH, pim_ovl_8);
+ def_pim_builtin ("vec_cmpeq.3", bool_V4SI_type_node, VCMPEQFP, pim_ovl_4f);
+ def_pim_builtin ("vec_cmpeq.4", bool_V4SI_type_node, VCMPEQUW, pim_ovl_4);
+
+ def_pim_builtin ("vec_cmpge", bool_V4SI_type_node, VCMPGEFP, pim_group);
+
+ def_pim_builtin ("vec_cmpgt", bool_V16QI_type_node, VCMPGTUB, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_cmpgt.2", bool_V16QI_type_node, VCMPGTSB, pim_ovl_16);
+ def_pim_builtin ("vec_cmpgt.3", bool_V8HI_type_node, VCMPGTUH, pim_ovl_8u);
+ def_pim_builtin ("vec_cmpgt.4", bool_V8HI_type_node, VCMPGTSH, pim_ovl_8);
+ def_pim_builtin ("vec_cmpgt.5", bool_V4SI_type_node, VCMPGTFP, pim_ovl_4f);
+ def_pim_builtin ("vec_cmpgt.6", bool_V4SI_type_node, VCMPGTUW, pim_ovl_4u);
+ def_pim_builtin ("vec_cmpgt.7", bool_V4SI_type_node, VCMPGTSW, pim_ovl_4);
+
+ def_pim_builtin ("vec_cmple", bool_V4SI_type_node, VCMPGEFP, pim_manip_swap | pim_group);
+
+ def_pim_builtin ("vec_cmplt", bool_V16QI_type_node, VCMPGTUB, pim_ovl_16u | pim_manip_swap | pim_group);
+ def_pim_builtin ("vec_cmplt.2", bool_V16QI_type_node, VCMPGTSB, pim_ovl_16 | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.3", bool_V8HI_type_node, VCMPGTUH, pim_ovl_8u | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.4", bool_V8HI_type_node, VCMPGTSH, pim_ovl_8 | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.5", bool_V4SI_type_node, VCMPGTFP, pim_ovl_4f | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.6", bool_V4SI_type_node, VCMPGTUW, pim_ovl_4u | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.7", bool_V4SI_type_node, VCMPGTSW, pim_ovl_4 | pim_manip_swap);
+
+ def_pim_builtin ("vec_ctf", V4SF_type_node, VCFUX, pim_ovl_4u | pim_group);
+ def_pim_builtin ("vec_ctf.2", V4SF_type_node, VCFSX, pim_ovl_4);
+
+ def_pim_builtin ("vec_cts", V4SI_type_node, VCTSXS, pim_ovl_4f | pim_group);
+
+ def_pim_builtin ("vec_ctu", unsigned_V4SI_type_node, VCTUXS, pim_ovl_4f | pim_group);
+
+ def_pim_builtin ("vec_dss", void_type_node, DSS, pim_group);
+
+ def_pim_builtin ("vec_dssall", void_type_node, DSSALL, pim_group);
+
+ def_pim_builtin ("vec_dst", void_type_node, DST, pim_group);
+
+ def_pim_builtin ("vec_dstst", void_type_node, DSTST, pim_group);
+
+ def_pim_builtin ("vec_dststt", void_type_node, DSTSTT, pim_group);
+
+ def_pim_builtin ("vec_dstt", void_type_node, DSTT, pim_group);
+
+ def_pim_builtin ("vec_expte", V4SF_type_node, VEXPTEFP, pim_group);
+
+ def_pim_builtin ("vec_floor", V4SF_type_node, VRFIM, pim_group);
+
+ def_pim_builtin ("vec_ld", NULL_TREE, LVX, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_lde", NULL_TREE, LVEBX, pim_ovl_pqi_2 | pim_rt_2p | pim_group);
+ def_pim_builtin ("vec_lde.2", NULL_TREE, LVEHX, pim_ovl_phi_2 | pim_rt_2p);
+ def_pim_builtin ("vec_lde.3", NULL_TREE, LVEWX, pim_ovl_psi_2 | pim_rt_2p);
+
+ def_pim_builtin ("vec_ldl", NULL_TREE, LVXL, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_loge", V4SF_type_node, VLOGEFP, pim_group);
+
+ def_pim_builtin ("vec_lvebx", NULL_TREE, LVEBX, pim_rt_2p | pim_group);
+ def_pim_builtin ("vec_lvehx", NULL_TREE, LVEHX, pim_rt_2p | pim_group);
+ def_pim_builtin ("vec_lvewx", NULL_TREE, LVEWX, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_lvsl", unsigned_V16QI_type_node, LVSL, pim_group);
+
+ def_pim_builtin ("vec_lvsr", unsigned_V16QI_type_node, LVSR, pim_group);
+
+ def_pim_builtin ("vec_lvx", NULL_TREE, LVX, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_lvxl", NULL_TREE, LVXL, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_madd", V4SF_type_node, VMADDFP, pim_group);
+
+ def_pim_builtin ("vec_madds", V8HI_type_node, VMHADDSHS, pim_group);
+
+ def_pim_builtin ("vec_max", NULL_TREE, VMAXUB, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_max.2", NULL_TREE, VMAXSB, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_max.3", NULL_TREE, VMAXUH, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_max.4", NULL_TREE, VMAXSH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_max.5", NULL_TREE, VMAXFP, pim_ovl_4f | pim_rt_12);
+ def_pim_builtin ("vec_max.6", NULL_TREE, VMAXUW, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_max.7", NULL_TREE, VMAXSW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mergeh", NULL_TREE, VMRGHB, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_mergeh.2", NULL_TREE, VMRGHH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_mergeh.3", NULL_TREE, VMRGHW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mergel", NULL_TREE, VMRGLB, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_mergel.2", NULL_TREE, VMRGLH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_mergel.3", NULL_TREE, VMRGLW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mfvscr", unsigned_V8HI_type_node, MFVSCR, pim_group);
+
+ def_pim_builtin ("vec_min", NULL_TREE, VMINUB, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_min.2", NULL_TREE, VMINSB, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_min.3", NULL_TREE, VMINUH, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_min.4", NULL_TREE, VMINSH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_min.5", NULL_TREE, VMINFP, pim_ovl_4f | pim_rt_12);
+ def_pim_builtin ("vec_min.6", NULL_TREE, VMINUW, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_min.7", NULL_TREE, VMINSW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mladd", unsigned_V8HI_type_node, VMLADDUHM, pim_ovl_8u_8u | pim_group);
+ def_pim_builtin ("vec_mladd.2", V8HI_type_node, VMLADDUHM, pim_ovl_8);
+
+ def_pim_builtin ("vec_mradds", V8HI_type_node, VMHRADDSHS, pim_group);
+
+ def_pim_builtin ("vec_msum", unsigned_V4SI_type_node, VMSUMUBM, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_msum.2", V4SI_type_node, VMSUMMBM, pim_ovl_16);
+ def_pim_builtin ("vec_msum.3", unsigned_V4SI_type_node, VMSUMUHM, pim_ovl_8u);
+ def_pim_builtin ("vec_msum.4", V4SI_type_node, VMSUMSHM, pim_ovl_8);
+
+ def_pim_builtin ("vec_msums", unsigned_V4SI_type_node, VMSUMUHS, pim_ovl_8u | pim_group);
+ def_pim_builtin ("vec_msums.2", V4SI_type_node, VMSUMSHS, pim_ovl_8);
+
+ def_pim_builtin ("vec_mtvscr", void_type_node, MTVSCR, pim_group);
+
+ def_pim_builtin ("vec_mule", unsigned_V8HI_type_node, VMULEUB, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_mule.2", V8HI_type_node, VMULESB, pim_ovl_16);
+ def_pim_builtin ("vec_mule.3", unsigned_V4SI_type_node, VMULEUH, pim_ovl_8u);
+ def_pim_builtin ("vec_mule.4", V4SI_type_node, VMULESH, pim_ovl_8);
+
+ def_pim_builtin ("vec_mulo", unsigned_V8HI_type_node, VMULOUB, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_mulo.2", V8HI_type_node, VMULOSB, pim_ovl_16);
+ def_pim_builtin ("vec_mulo.3", unsigned_V4SI_type_node, VMULOUH, pim_ovl_8u);
+ def_pim_builtin ("vec_mulo.4", V4SI_type_node, VMULOSH, pim_ovl_8);
+
+ def_pim_builtin ("vec_nmsub", V4SF_type_node, VNMSUBFP, pim_group);
+
+ def_pim_builtin ("vec_nor", NULL_TREE, VNOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_or", NULL_TREE, VOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_pack", NULL_TREE, VPKUHUM, pim_ovl_8 | pim_rt_1h | pim_group);
+ def_pim_builtin ("vec_pack.2", NULL_TREE, VPKUWUM, pim_ovl_4 | pim_rt_1h);
+
+ def_pim_builtin ("vec_packpx", pixel_V8HI_type_node, VPKPX, pim_group);
+
+ def_pim_builtin ("vec_packs", unsigned_V16QI_type_node, VPKUHUS, pim_ovl_8u | pim_group);
+ def_pim_builtin ("vec_packs.2", V16QI_type_node, VPKSHSS, pim_ovl_8);
+ def_pim_builtin ("vec_packs.3", unsigned_V8HI_type_node, VPKUWUS, pim_ovl_4u);
+ def_pim_builtin ("vec_packs.4", V8HI_type_node, VPKSWSS, pim_ovl_4);
+
+ def_pim_builtin ("vec_packsu", unsigned_V16QI_type_node, VPKUHUS, pim_ovl_8u | pim_group);
+ def_pim_builtin ("vec_packsu.2", unsigned_V16QI_type_node, VPKSHUS, pim_ovl_8);
+ def_pim_builtin ("vec_packsu.3", unsigned_V8HI_type_node, VPKUWUS, pim_ovl_4u);
+ def_pim_builtin ("vec_packsu.4", unsigned_V8HI_type_node, VPKSWUS, pim_ovl_4);
+
+ def_pim_builtin ("vec_perm", V16QI_type_node, VPERM_4SI, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_re", V4SF_type_node, VREFP, pim_group);
+
+ def_pim_builtin ("vec_rl", NULL_TREE, VRLB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_rl.2", NULL_TREE, VRLH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_rl.3", NULL_TREE, VRLW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_round", V4SF_type_node, VRFIN, pim_group);
+
+ def_pim_builtin ("vec_rsqrte", V4SF_type_node, VRSQRTEFP, pim_group);
+
+ def_pim_builtin ("vec_sel", NULL_TREE, VSEL_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_sl", NULL_TREE, VSLB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_sl.2", NULL_TREE, VSLH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_sl.3", NULL_TREE, VSLW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_sld", NULL_TREE, VSLDOI_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_sll", NULL_TREE, VSL, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_slo", NULL_TREE, VSLO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_splat", NULL_TREE, VSPLTB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_splat.2", NULL_TREE, VSPLTH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_splat.3", NULL_TREE, VSPLTW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_splat_s8", V16QI_type_node, VSPLTISB, pim_group);
+
+ def_pim_builtin ("vec_splat_s16", V8HI_type_node, VSPLTISH, pim_group);
+
+ def_pim_builtin ("vec_splat_s32", V4SI_type_node, VSPLTISW, pim_group);
+
+ def_pim_builtin ("vec_splat_u8", unsigned_V16QI_type_node, VSPLTISB, pim_group);
+
+ def_pim_builtin ("vec_splat_u16", unsigned_V8HI_type_node, VSPLTISH, pim_group);
+
+ def_pim_builtin ("vec_splat_u32", unsigned_V4SI_type_node, VSPLTISW, pim_group);
+
+ def_pim_builtin ("vec_sr", NULL_TREE, VSRB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_sr.2", NULL_TREE, VSRH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_sr.3", NULL_TREE, VSRW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_sra", NULL_TREE, VSRAB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_sra.2", NULL_TREE, VSRAH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_sra.3", NULL_TREE, VSRAW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_srl", NULL_TREE, VSR, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_sro", NULL_TREE, VSRO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_st", void_type_node, STVX, pim_group);
+
+ def_pim_builtin ("vec_ste", void_type_node, STVEBX, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_ste.2", void_type_node, STVEHX, pim_ovl_8);
+ def_pim_builtin ("vec_ste.3", void_type_node, STVEWX, pim_ovl_4);
+
+ def_pim_builtin ("vec_stl", void_type_node, STVXL, pim_group);
+
+ def_pim_builtin ("vec_stvebx", void_type_node, STVEBX, pim_group);
+ def_pim_builtin ("vec_stvehx", void_type_node, STVEHX, pim_group);
+ def_pim_builtin ("vec_stvewx", void_type_node, STVEWX, pim_group);
+
+ def_pim_builtin ("vec_stvx", void_type_node, STVX, pim_group);
+
+ def_pim_builtin ("vec_stvxl", void_type_node, STVXL, pim_group);
+
+ def_pim_builtin ("vec_sub", NULL_TREE, VSUBUBM, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_sub.2", NULL_TREE, VSUBUHM, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_sub.3", NULL_TREE, VSUBFP, pim_ovl_4f | pim_rt_12);
+ def_pim_builtin ("vec_sub.4", NULL_TREE, VSUBUWM, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_subc", unsigned_V4SI_type_node, VSUBCUW, pim_group);
+
+ def_pim_builtin ("vec_subs", NULL_TREE, VSUBUBS, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_subs.2", NULL_TREE, VSUBSBS, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_subs.3", NULL_TREE, VSUBUHS, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_subs.4", NULL_TREE, VSUBSHS, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_subs.5", NULL_TREE, VSUBUWS, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_subs.6", NULL_TREE, VSUBSWS, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_sum4s", unsigned_V4SI_type_node, VSUM4UBS, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_sum4s.2", V4SI_type_node, VSUM4SBS, pim_ovl_16);
+ def_pim_builtin ("vec_sum4s.3", V4SI_type_node, VSUM4SHS, pim_ovl_8);
+
+ def_pim_builtin ("vec_sum2s", V4SI_type_node, VSUM2SWS, pim_group);
+
+ def_pim_builtin ("vec_sums", V4SI_type_node, VSUMSWS, pim_group);
+
+ def_pim_builtin ("vec_trunc", V4SF_type_node, VRFIZ, pim_group);
+
+ def_pim_builtin ("vec_unpackh", NULL_TREE, VUPKHSB, pim_ovl_16 | pim_rt_1d | pim_group);
+ def_pim_builtin ("vec_unpackh.2", NULL_TREE, VUPKHPX, pim_ovl_8p | pim_rt_1d);
+ def_pim_builtin ("vec_unpackh.3", NULL_TREE, VUPKHSH, pim_ovl_8 | pim_rt_1d);
+
+ def_pim_builtin ("vec_unpackl", NULL_TREE, VUPKLSB, pim_ovl_16 | pim_rt_1d | pim_group);
+ def_pim_builtin ("vec_unpackl.2", NULL_TREE, VUPKLPX, pim_ovl_8p | pim_rt_1d);
+ def_pim_builtin ("vec_unpackl.3", NULL_TREE, VUPKLSH, pim_ovl_8 | pim_rt_1d);
+
+ gcc_assert (pim_code == ALTIVEC_PIM_VEC_VADDCUW);
+
+ def_pim_builtin ("vec_vaddcuw", unsigned_V4SI_type_node, VADDCUW, pim_group);
+
+ def_pim_builtin ("vec_vaddfp", V4SF_type_node, VADDFP, pim_group);
+
+ def_pim_builtin ("vec_vaddsbs", NULL_TREE, VADDSBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddshs", NULL_TREE, VADDSHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddsws", NULL_TREE, VADDSWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddubm", NULL_TREE, VADDUBM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddubs", NULL_TREE, VADDUBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduhm", NULL_TREE, VADDUHM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduhs", NULL_TREE, VADDUHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduwm", NULL_TREE, VADDUWM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduws", NULL_TREE, VADDUWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vand", NULL_TREE, VAND, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vandc", NULL_TREE, VANDC, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgsb", NULL_TREE, VAVGSB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgsh", NULL_TREE, VAVGSH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgsw", NULL_TREE, VAVGSW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgub", NULL_TREE, VAVGUB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavguh", NULL_TREE, VAVGUH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavguw", NULL_TREE, VAVGUW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vcfsx", V4SF_type_node, VCFSX, pim_group);
+
+ def_pim_builtin ("vec_vcfux", V4SF_type_node, VCFUX, pim_group);
+
+ def_pim_builtin ("vec_vcmpbfp", V4SI_type_node, VCMPBFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpeqfp", bool_V4SI_type_node, VCMPEQFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpequb", bool_V16QI_type_node, VCMPEQUB, pim_group);
+
+ def_pim_builtin ("vec_vcmpequh", bool_V8HI_type_node, VCMPEQUH, pim_group);
+
+ def_pim_builtin ("vec_vcmpequw", bool_V4SI_type_node, VCMPEQUW, pim_group);
+
+ def_pim_builtin ("vec_vcmpgefp", bool_V4SI_type_node, VCMPGEFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtfp", bool_V4SI_type_node, VCMPGTFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtsb", bool_V16QI_type_node, VCMPGTSB, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtsh", bool_V8HI_type_node, VCMPGTSH, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtsw", bool_V4SI_type_node, VCMPGTSW, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtub", bool_V16QI_type_node, VCMPGTUB, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtuh", bool_V8HI_type_node, VCMPGTUH, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtuw", bool_V4SI_type_node, VCMPGTUW, pim_group);
+
+ def_pim_builtin ("vec_vctsxs", V4SI_type_node, VCTSXS, pim_group);
+
+ def_pim_builtin ("vec_vctuxs", unsigned_V4SI_type_node, VCTUXS, pim_group);
+
+ def_pim_builtin ("vec_vexptefp", V4SF_type_node, VEXPTEFP, pim_group);
+
+ def_pim_builtin ("vec_vlogefp", V4SF_type_node, VLOGEFP, pim_group);
+
+ def_pim_builtin ("vec_vmaddfp", V4SF_type_node, VMADDFP, pim_group);
+
+ def_pim_builtin ("vec_vmaxfp", NULL_TREE, VMAXFP, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxsb", NULL_TREE, VMAXSB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxsh", NULL_TREE, VMAXSH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxsw", NULL_TREE, VMAXSW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxub", NULL_TREE, VMAXUB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxuh", NULL_TREE, VMAXUH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxuw", NULL_TREE, VMAXUW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmhaddshs", V8HI_type_node, VMHADDSHS, pim_group);
+
+ def_pim_builtin ("vec_vmhraddshs", V8HI_type_node, VMHRADDSHS, pim_group);
+
+ def_pim_builtin ("vec_vminfp", NULL_TREE, VMINFP, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminsb", NULL_TREE, VMINSB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminsh", NULL_TREE, VMINSH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminsw", NULL_TREE, VMINSW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminub", NULL_TREE, VMINUB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminuh", NULL_TREE, VMINUH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminuw", NULL_TREE, VMINUW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmladduhm", NULL_TREE, VMLADDUHM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrghb", NULL_TREE, VMRGHB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrghh", NULL_TREE, VMRGHH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrghw", NULL_TREE, VMRGHW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrglb", NULL_TREE, VMRGLB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrglh", NULL_TREE, VMRGLH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrglw", NULL_TREE, VMRGLW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmsummbm", V4SI_type_node, VMSUMMBM, pim_group);
+
+ def_pim_builtin ("vec_vmsumshm", V4SI_type_node, VMSUMSHM, pim_group);
+
+ def_pim_builtin ("vec_vmsumshs", V4SI_type_node, VMSUMSHS, pim_group);
+
+ def_pim_builtin ("vec_vmsumubm", unsigned_V4SI_type_node, VMSUMUBM, pim_group);
+
+ def_pim_builtin ("vec_vmsumuhm", unsigned_V4SI_type_node, VMSUMUHM, pim_group);
+
+ def_pim_builtin ("vec_vmsumuhs", unsigned_V4SI_type_node, VMSUMUHS, pim_group);
+
+ def_pim_builtin ("vec_vmulesb", V8HI_type_node, VMULESB, pim_group);
+
+ def_pim_builtin ("vec_vmulesh", V4SI_type_node, VMULESH, pim_group);
+
+ def_pim_builtin ("vec_vmuleub", unsigned_V8HI_type_node, VMULEUB, pim_group);
+
+ def_pim_builtin ("vec_vmuleuh", unsigned_V4SI_type_node, VMULEUH, pim_group);
+
+ def_pim_builtin ("vec_vmulosb", V8HI_type_node, VMULOSB, pim_group);
+
+ def_pim_builtin ("vec_vmulosh", V4SI_type_node, VMULOSH, pim_group);
+
+ def_pim_builtin ("vec_vmuloub", unsigned_V8HI_type_node, VMULOUB, pim_group);
+
+ def_pim_builtin ("vec_vmulouh", unsigned_V4SI_type_node, VMULOUH, pim_group);
+
+ def_pim_builtin ("vec_vnmsubfp", V4SF_type_node, VNMSUBFP, pim_group);
+
+ def_pim_builtin ("vec_vnor", NULL_TREE, VNOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vor", NULL_TREE, VOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vperm", V16QI_type_node, VPERM_4SI, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vpkpx", pixel_V8HI_type_node, VPKPX, pim_group);
+
+ def_pim_builtin ("vec_vpkshss", V16QI_type_node, VPKSHSS, pim_group);
+
+ def_pim_builtin ("vec_vpkshus", unsigned_V16QI_type_node, VPKSHUS, pim_group);
+
+ def_pim_builtin ("vec_vpkswss", V8HI_type_node, VPKSWSS, pim_group);
+
+ def_pim_builtin ("vec_vpkswus", unsigned_V8HI_type_node, VPKSWUS, pim_group);
+
+ def_pim_builtin ("vec_vpkuhum", NULL_TREE, VPKUHUM, pim_rt_1h | pim_group);
+
+ def_pim_builtin ("vec_vpkuhus", unsigned_V16QI_type_node, VPKUHUS, pim_group);
+
+ def_pim_builtin ("vec_vpkuwum", NULL_TREE, VPKUWUM, pim_rt_1h | pim_group);
+
+ def_pim_builtin ("vec_vpkuwus", unsigned_V8HI_type_node, VPKUWUS, pim_group);
+
+ def_pim_builtin ("vec_vrefp", V4SF_type_node, VREFP, pim_group);
+
+ def_pim_builtin ("vec_vrfim", V4SF_type_node, VRFIM, pim_group);
+
+ def_pim_builtin ("vec_vrfin", V4SF_type_node, VRFIN, pim_group);
+
+ def_pim_builtin ("vec_vrfip", V4SF_type_node, VRFIP, pim_group);
+
+ def_pim_builtin ("vec_vrfiz", V4SF_type_node, VRFIZ, pim_group);
+
+ def_pim_builtin ("vec_vrlb", NULL_TREE, VRLB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vrlh", NULL_TREE, VRLH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vrlw", NULL_TREE, VRLW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vrsqrtefp", V4SF_type_node, VRSQRTEFP, pim_group);
+
+ def_pim_builtin ("vec_vsel", NULL_TREE, VSEL_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsl", NULL_TREE, VSL, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslb", NULL_TREE, VSLB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsldoi", NULL_TREE, VSLDOI_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslh", NULL_TREE, VSLH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslo", NULL_TREE, VSLO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslw", NULL_TREE, VSLW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vspltb", NULL_TREE, VSPLTB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsplth", NULL_TREE, VSPLTH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vspltisb", V16QI_type_node, VSPLTISB, pim_group);
+
+ def_pim_builtin ("vec_vspltish", V8HI_type_node, VSPLTISH, pim_group);
+
+ def_pim_builtin ("vec_vspltisw", V4SI_type_node, VSPLTISW, pim_group);
+
+ def_pim_builtin ("vec_vspltw", NULL_TREE, VSPLTW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsr", NULL_TREE, VSR, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrab", NULL_TREE, VSRAB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrah", NULL_TREE, VSRAH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsraw", NULL_TREE, VSRAW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrb", NULL_TREE, VSRB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrh", NULL_TREE, VSRH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsro", NULL_TREE, VSRO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrw", NULL_TREE, VSRW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsubcuw", unsigned_V4SI_type_node, VSUBCUW, pim_group);
+
+ def_pim_builtin ("vec_vsubfp", NULL_TREE, VSUBFP, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubsbs", NULL_TREE, VSUBSBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubshs", NULL_TREE, VSUBSHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubsws", NULL_TREE, VSUBSWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsububm", NULL_TREE, VSUBUBM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsububs", NULL_TREE, VSUBUBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuhm", NULL_TREE, VSUBUHM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuhs", NULL_TREE, VSUBUHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuwm", NULL_TREE, VSUBUWM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuws", NULL_TREE, VSUBUWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsum4sbs", V4SI_type_node, VSUM4SBS, pim_group);
+
+ def_pim_builtin ("vec_vsum4shs", V4SI_type_node, VSUM4SHS, pim_group);
+
+ def_pim_builtin ("vec_vsum4ubs", unsigned_V4SI_type_node, VSUM4UBS, pim_group);
+
+ def_pim_builtin ("vec_vsum2sws", V4SI_type_node, VSUM2SWS, pim_group);
+
+ def_pim_builtin ("vec_vsumsws", V4SI_type_node, VSUMSWS, pim_group);
+
+ def_pim_builtin ("vec_vupkhpx", NULL_TREE, VUPKHPX, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupkhsb", NULL_TREE, VUPKHSB, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupkhsh", NULL_TREE, VUPKHSH, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupklpx", NULL_TREE, VUPKLPX, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupklsb", NULL_TREE, VUPKLSB, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupklsh", NULL_TREE, VUPKLSH, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vxor", NULL_TREE, VXOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_xor", NULL_TREE, VXOR, pim_rt_12 | pim_group);
+
+ /* PIM Predicates. */
+
+ gcc_assert (pim_code == ALTIVEC_PIM_VEC_ALL_EQ);
+
+ def_pim_builtin ("vec_all_eq", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_lt | pim_group);
+ def_pim_builtin ("vec_all_eq.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_lt);
+ def_pim_builtin ("vec_all_eq.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_lt);
+ def_pim_builtin ("vec_all_eq.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_lt);
+
+ def_pim_builtin ("vec_all_ge", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_eq | pim_group);
+ def_pim_builtin ("vec_all_ge.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_cr6_lt);
+ def_pim_builtin ("vec_all_ge.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_eq);
+
+ def_pim_builtin ("vec_all_gt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_lt | pim_group);
+ def_pim_builtin ("vec_all_gt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_lt);
+
+ def_pim_builtin ("vec_all_in", integer_type_node, VCMPBFP_P, pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_le", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_eq | pim_group);
+ def_pim_builtin ("vec_all_le.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_le.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_eq);
+
+ def_pim_builtin ("vec_all_lt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_lt | pim_group);
+ def_pim_builtin ("vec_all_lt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_lt);
+
+ def_pim_builtin ("vec_all_nan", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_ne", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_eq | pim_group);
+ def_pim_builtin ("vec_all_ne.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ne.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ne.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_eq);
+
+ def_pim_builtin ("vec_all_nge", integer_type_node, VCMPGEFP_P, pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_ngt", integer_type_node, VCMPGTFP_P, pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_nle", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_nlt", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_numeric", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_lt | pim_group);
+
+ def_pim_builtin ("vec_any_eq", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_ne | pim_group);
+ def_pim_builtin ("vec_any_eq.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_ne);
+ def_pim_builtin ("vec_any_eq.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_ne);
+ def_pim_builtin ("vec_any_eq.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_ne);
+
+ def_pim_builtin ("vec_any_ge", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_ge | pim_group);
+ def_pim_builtin ("vec_any_ge.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_cr6_ne);
+ def_pim_builtin ("vec_any_ge.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_ge);
+
+ def_pim_builtin ("vec_any_gt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_ne | pim_group);
+ def_pim_builtin ("vec_any_gt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_ne);
+
+ def_pim_builtin ("vec_any_le", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_ge | pim_group);
+ def_pim_builtin ("vec_any_le.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_le.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_ge);
+
+ def_pim_builtin ("vec_any_lt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_ne | pim_group);
+ def_pim_builtin ("vec_any_lt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_ne);
+
+ def_pim_builtin ("vec_any_nan", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_ne", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_ge | pim_group);
+ def_pim_builtin ("vec_any_ne.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ne.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ne.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_ge);
+
+ def_pim_builtin ("vec_any_nge", integer_type_node, VCMPGEFP_P, pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_ngt", integer_type_node, VCMPGTFP_P, pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_nle", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_nlt", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_numeric", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_ne | pim_group);
+
+ def_pim_builtin ("vec_any_out", integer_type_node, VCMPBFP_P, pim_cr6_ne | pim_group);
+
+ gcc_assert (pim_code == ALTIVEC_PIM__LAST + 1);
+ }
+ /* APPLE LOCAL end AltiVec */
+
+ /* Access to the vec_init patterns. */
+ ftype = build_function_type_list (V4SI_type_node, integer_type_node,
+ integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI);
+
+ ftype = build_function_type_list (V16QI_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, float_type_node,
+ float_type_node, float_type_node,
+ float_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF);
+
+ /* Access to the vec_set patterns. */
+ ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
+ intSI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
+ intHI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI);
+
+ ftype = build_function_type_list (V8HI_type_node, V16QI_type_node,
+ intQI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
+ float_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF);
+
+ /* Access to the vec_extract patterns. */
+ ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI);
+
+ ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI);
+
+ ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI);
+
+ ftype = build_function_type_list (float_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF);
+}
+
+static void
+rs6000_common_init_builtins (void)
+{
+ struct builtin_description *d;
+ size_t i;
+
+ tree v4sf_ftype_v4sf_v4sf_v16qi
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_v16qi
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_v16qi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_v16qi
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_int
+ = build_function_type_list (V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_int
+ = build_function_type_list (V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_int
+ = build_function_type_list (V16QI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v16qi
+ = build_function_type_list (V8HI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf
+ = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2si_v2si
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ opaque_V2SI_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2sf_v2sf
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ opaque_V2SF_type_node, NULL_TREE);
+
+ tree v2si_ftype_int_int
+ = build_function_type_list (opaque_V2SI_type_node,
+ integer_type_node, integer_type_node,
+ NULL_TREE);
+
+ tree opaque_ftype_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2si
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2sf
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SF_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2si
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SI_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2sf
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SF_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2si_char
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ char_type_node, NULL_TREE);
+
+ tree v2si_ftype_int_char
+ = build_function_type_list (opaque_V2SI_type_node,
+ integer_type_node, char_type_node, NULL_TREE);
+
+ tree v2si_ftype_char
+ = build_function_type_list (opaque_V2SI_type_node,
+ char_type_node, NULL_TREE);
+
+ tree int_ftype_int_int
+ = build_function_type_list (integer_type_node,
+ integer_type_node, integer_type_node,
+ NULL_TREE);
+
+ tree opaque_ftype_opaque_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4si_int
+ = build_function_type_list (V4SF_type_node,
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf_int
+ = build_function_type_list (V4SI_type_node,
+ V4SF_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_int
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_int
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_int
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_int
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_int
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_int
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_int
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree opaque_ftype_opaque_opaque_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_v4si
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_v4sf
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_v4si
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_v8hi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ V8HI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v8hi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v16qi_v16qi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf_v4sf
+ = build_function_type_list (V4SI_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree v8hi_ftype_v16qi_v16qi
+ = build_function_type_list (V8HI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v8hi
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v8hi_ftype_v4si_v4si
+ = build_function_type_list (V8HI_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v16qi_ftype_v8hi_v8hi
+ = build_function_type_list (V16QI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v4si_ftype_v16qi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V16QI_type_node, V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v16qi_v16qi
+ = build_function_type_list (V4SI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi
+ = build_function_type_list (V4SI_type_node, V8HI_type_node, NULL_TREE);
+ tree int_ftype_v4si_v4si
+ = build_function_type_list (integer_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree int_ftype_v4sf_v4sf
+ = build_function_type_list (integer_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree int_ftype_v16qi_v16qi
+ = build_function_type_list (integer_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree int_ftype_v8hi_v8hi
+ = build_function_type_list (integer_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+
+ /* Add the simple ternary operators. */
+ d = (struct builtin_description *) bdesc_3arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ {
+ enum machine_mode mode0, mode1, mode2, mode3;
+ tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ mode2 = VOIDmode;
+ mode3 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ mode3 = insn_data[d->icode].operand[3].mode;
+ }
+
+ /* When all four are of the same mode. */
+ if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
+ {
+ switch (mode0)
+ {
+ case VOIDmode:
+ type = opaque_ftype_opaque_opaque_opaque;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_v4si;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_v4sf;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_v8hi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
+ {
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_v16qi;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_v16qi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
+ && mode3 == V4SImode)
+ type = v4si_ftype_v16qi_v16qi_v4si;
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
+ && mode3 == V4SImode)
+ type = v4si_ftype_v8hi_v8hi_v4si;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
+ && mode3 == V4SImode)
+ type = v4sf_ftype_v4sf_v4sf_v4si;
+
+ /* vchar, vchar, vchar, 4 bit literal. */
+ else if (mode0 == V16QImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v16qi_ftype_v16qi_v16qi_int;
+
+ /* vshort, vshort, vshort, 4 bit literal. */
+ else if (mode0 == V8HImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v8hi_ftype_v8hi_v8hi_int;
+
+ /* vint, vint, vint, 4 bit literal. */
+ else if (mode0 == V4SImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v4si_ftype_v4si_v4si_int;
+
+ /* vfloat, vfloat, vfloat, 4 bit literal. */
+ else if (mode0 == V4SFmode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v4sf_ftype_v4sf_v4sf_int;
+
+ else
+ gcc_unreachable ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the simple binary operators. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ {
+ enum machine_mode mode0, mode1, mode2;
+ tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ mode2 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ }
+
+ /* When all three operands are of the same mode. */
+ if (mode0 == mode1 && mode1 == mode2)
+ {
+ switch (mode0)
+ {
+ case VOIDmode:
+ type = opaque_ftype_opaque_opaque;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi;
+ break;
+ case V2SImode:
+ type = v2si_ftype_v2si_v2si;
+ break;
+ case V2SFmode:
+ type = v2sf_ftype_v2sf_v2sf;
+ break;
+ case SImode:
+ type = int_ftype_int_int;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* A few other combos we really don't want to do manually. */
+
+ /* vint, vfloat, vfloat. */
+ else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == V4SFmode)
+ type = v4si_ftype_v4sf_v4sf;
+
+ /* vshort, vchar, vchar. */
+ else if (mode0 == V8HImode && mode1 == V16QImode && mode2 == V16QImode)
+ type = v8hi_ftype_v16qi_v16qi;
+
+ /* vint, vshort, vshort. */
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode)
+ type = v4si_ftype_v8hi_v8hi;
+
+ /* vshort, vint, vint. */
+ else if (mode0 == V8HImode && mode1 == V4SImode && mode2 == V4SImode)
+ type = v8hi_ftype_v4si_v4si;
+
+ /* vchar, vshort, vshort. */
+ else if (mode0 == V16QImode && mode1 == V8HImode && mode2 == V8HImode)
+ type = v16qi_ftype_v8hi_v8hi;
+
+ /* vint, vchar, vint. */
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V4SImode)
+ type = v4si_ftype_v16qi_v4si;
+
+ /* vint, vchar, vchar. */
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode)
+ type = v4si_ftype_v16qi_v16qi;
+
+ /* vint, vshort, vint. */
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
+ type = v4si_ftype_v8hi_v4si;
+
+ /* vint, vint, 5 bit literal. */
+ else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
+ type = v4si_ftype_v4si_int;
+
+ /* vshort, vshort, 5 bit literal. */
+ else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
+ type = v8hi_ftype_v8hi_int;
+
+ /* vchar, vchar, 5 bit literal. */
+ else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
+ type = v16qi_ftype_v16qi_int;
+
+ /* vfloat, vint, 5 bit literal. */
+ else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
+ type = v4sf_ftype_v4si_int;
+
+ /* vint, vfloat, 5 bit literal. */
+ else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
+ type = v4si_ftype_v4sf_int;
+
+ else if (mode0 == V2SImode && mode1 == SImode && mode2 == SImode)
+ type = v2si_ftype_int_int;
+
+ else if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
+ type = v2si_ftype_v2si_char;
+
+ else if (mode0 == V2SImode && mode1 == SImode && mode2 == QImode)
+ type = v2si_ftype_int_char;
+
+ else
+ {
+ /* int, x, x. */
+ gcc_assert (mode0 == SImode);
+ switch (mode1)
+ {
+ case V4SImode:
+ type = int_ftype_v4si_v4si;
+ break;
+ case V4SFmode:
+ type = int_ftype_v4sf_v4sf;
+ break;
+ case V16QImode:
+ type = int_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = int_ftype_v8hi_v8hi;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the simple unary operators. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ {
+ enum machine_mode mode0, mode1;
+ tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ }
+
+ if (mode0 == V4SImode && mode1 == QImode)
+ type = v4si_ftype_int;
+ else if (mode0 == V8HImode && mode1 == QImode)
+ type = v8hi_ftype_int;
+ else if (mode0 == V16QImode && mode1 == QImode)
+ type = v16qi_ftype_int;
+ else if (mode0 == VOIDmode && mode1 == VOIDmode)
+ type = opaque_ftype_opaque;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode)
+ type = v4sf_ftype_v4sf;
+ else if (mode0 == V8HImode && mode1 == V16QImode)
+ type = v8hi_ftype_v16qi;
+ else if (mode0 == V4SImode && mode1 == V8HImode)
+ type = v4si_ftype_v8hi;
+ else if (mode0 == V2SImode && mode1 == V2SImode)
+ type = v2si_ftype_v2si;
+ else if (mode0 == V2SFmode && mode1 == V2SFmode)
+ type = v2sf_ftype_v2sf;
+ else if (mode0 == V2SFmode && mode1 == V2SImode)
+ type = v2sf_ftype_v2si;
+ else if (mode0 == V2SImode && mode1 == V2SFmode)
+ type = v2si_ftype_v2sf;
+ else if (mode0 == V2SImode && mode1 == QImode)
+ type = v2si_ftype_char;
+ else
+ gcc_unreachable ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+}
+
+static void
+rs6000_init_libfuncs (void)
+{
+ if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
+ && !TARGET_POWER2 && !TARGET_POWERPC)
+ {
+ /* AIX library routines for float->int conversion. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
+ }
+
+ if (!TARGET_IEEEQUAD)
+ /* AIX/Darwin/64-bit Linux quad floating point routines. */
+ if (!TARGET_XL_COMPAT)
+ {
+ set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
+ set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
+ set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
+
+ if (TARGET_SOFT_FLOAT)
+ {
+ set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
+ set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
+ set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
+ set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
+ set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
+ set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
+ set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
+ set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
+
+ set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
+ set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
+ set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
+ set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
+ set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
+ }
+ }
+ else
+ {
+ set_optab_libfunc (add_optab, TFmode, "_xlqadd");
+ set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
+ set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
+ }
+ else
+ {
+ /* 32-bit SVR4 quad floating point routines. */
+
+ set_optab_libfunc (add_optab, TFmode, "_q_add");
+ set_optab_libfunc (sub_optab, TFmode, "_q_sub");
+ set_optab_libfunc (neg_optab, TFmode, "_q_neg");
+ set_optab_libfunc (smul_optab, TFmode, "_q_mul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
+ if (TARGET_PPC_GPOPT || TARGET_POWER2)
+ set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
+
+ set_optab_libfunc (eq_optab, TFmode, "_q_feq");
+ set_optab_libfunc (ne_optab, TFmode, "_q_fne");
+ set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
+ set_optab_libfunc (ge_optab, TFmode, "_q_fge");
+ set_optab_libfunc (lt_optab, TFmode, "_q_flt");
+ set_optab_libfunc (le_optab, TFmode, "_q_fle");
+
+ set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
+ set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
+ set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
+ set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
+ set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
+ }
+}
+
+
+/* Expand a block clear operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the length
+ operands[3] is the alignment */
+
+int
+expand_block_clear (rtx operands[])
+{
+ rtx orig_dest = operands[0];
+ rtx bytes_rtx = operands[1];
+ rtx align_rtx = operands[3];
+ bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ HOST_WIDE_INT align;
+ HOST_WIDE_INT bytes;
+ int offset;
+ int clear_bytes;
+ int clear_step;
+ /* APPLE LOCAL Altivec 3840704 */
+ bool cpu_altivec = TARGET_ALTIVEC && ! flag_disable_opts_for_faltivec;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* This must be a fixed size alignment */
+ gcc_assert (GET_CODE (align_rtx) == CONST_INT);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
+
+ /* Anything to clear? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ /* Use the builtin memset after a point, to avoid huge code bloat.
+ When optimize_size, avoid any significant code bloat; calling
+ memset is about 4 instructions, so allow for one instruction to
+ load zero and three to do clearing. */
+ /* APPLE LOCAL Altivec 3840704 */
+ if (cpu_altivec && align >= 128)
+ clear_step = 16;
+ else if (TARGET_POWERPC64 && align >= 32)
+ clear_step = 8;
+ else
+ clear_step = 4;
+
+ if (optimize_size && bytes > 3 * clear_step)
+ return 0;
+ if (! optimize_size && bytes > 8 * clear_step)
+ return 0;
+
+ for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
+ {
+ enum machine_mode mode = BLKmode;
+ rtx dest;
+
+ /* APPLE LOCAL Altivec 3840704 */
+ if (bytes >= 16 && cpu_altivec && align >= 128)
+ {
+ clear_bytes = 16;
+ mode = V4SImode;
+ }
+ else if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
+ {
+ clear_bytes = 8;
+ mode = DImode;
+ }
+ else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
+ { /* move 4 bytes */
+ clear_bytes = 4;
+ mode = SImode;
+ }
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ { /* move 2 bytes */
+ clear_bytes = 2;
+ mode = HImode;
+ }
+ else /* move 1 byte at a time */
+ {
+ clear_bytes = 1;
+ mode = QImode;
+ }
+
+ dest = adjust_address (orig_dest, mode, offset);
+
+ emit_move_insn (dest, CONST0_RTX (mode));
+ }
+
+ return 1;
+}
+
+
+/* Expand a block move operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the source
+ operands[2] is the length
+ operands[3] is the alignment */
+
+#define MAX_MOVE_REG 4
+
+int
+expand_block_move (rtx operands[])
+{
+ rtx orig_dest = operands[0];
+ rtx orig_src = operands[1];
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ int constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ int align;
+ int bytes;
+ int offset;
+ int move_bytes;
+ rtx stores[MAX_MOVE_REG];
+ int num_reg = 0;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* This must be a fixed size alignment */
+ gcc_assert (GET_CODE (align_rtx) == CONST_INT);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
+
+ /* Anything to move? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ /* store_one_arg depends on expand_block_move to handle at least the size of
+ reg_parm_stack_space. */
+ if (bytes > (TARGET_POWERPC64 ? 64 : 32))
+ return 0;
+
+ for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
+ {
+ union {
+ rtx (*movmemsi) (rtx, rtx, rtx, rtx);
+ rtx (*mov) (rtx, rtx);
+ } gen_func;
+ enum machine_mode mode = BLKmode;
+ rtx src, dest;
+
+ /* Altivec first, since it will be faster than a string move
+ when it applies, and usually not significantly larger. */
+ /* APPLE LOCAL begin Altivec 3840704 */
+ if (TARGET_ALTIVEC && ! flag_disable_opts_for_faltivec
+ && bytes >= 16 && align >= 128)
+ /* APPLE LOCAL end Altivec 3840704 */
+ {
+ move_bytes = 16;
+ mode = V4SImode;
+ gen_func.mov = gen_movv4si;
+ }
+ else if (TARGET_STRING
+ && bytes > 24 /* move up to 32 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8]
+ && ! fixed_regs[9]
+ && ! fixed_regs[10]
+ && ! fixed_regs[11]
+ && ! fixed_regs[12])
+ {
+ move_bytes = (bytes > 32) ? 32 : bytes;
+ gen_func.movmemsi = gen_movmemsi_8reg;
+ }
+ else if (TARGET_STRING
+ && bytes > 16 /* move up to 24 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8]
+ && ! fixed_regs[9]
+ && ! fixed_regs[10])
+ {
+ move_bytes = (bytes > 24) ? 24 : bytes;
+ gen_func.movmemsi = gen_movmemsi_6reg;
+ }
+ else if (TARGET_STRING
+ && bytes > 8 /* move up to 16 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8])
+ {
+ move_bytes = (bytes > 16) ? 16 : bytes;
+ gen_func.movmemsi = gen_movmemsi_4reg;
+ }
+ else if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
+ {
+ move_bytes = 8;
+ mode = DImode;
+ gen_func.mov = gen_movdi;
+ }
+ else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
+ { /* move up to 8 bytes at a time */
+ move_bytes = (bytes > 8) ? 8 : bytes;
+ gen_func.movmemsi = gen_movmemsi_2reg;
+ }
+ else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
+ { /* move 4 bytes */
+ move_bytes = 4;
+ mode = SImode;
+ gen_func.mov = gen_movsi;
+ }
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ { /* move 2 bytes */
+ move_bytes = 2;
+ mode = HImode;
+ gen_func.mov = gen_movhi;
+ }
+ else if (TARGET_STRING && bytes > 1)
+ { /* move up to 4 bytes at a time */
+ move_bytes = (bytes > 4) ? 4 : bytes;
+ gen_func.movmemsi = gen_movmemsi_1reg;
+ }
+ else /* move 1 byte at a time */
+ {
+ move_bytes = 1;
+ mode = QImode;
+ gen_func.mov = gen_movqi;
+ }
+
+ src = adjust_address (orig_src, mode, offset);
+ dest = adjust_address (orig_dest, mode, offset);
+
+ if (mode != BLKmode)
+ {
+ rtx tmp_reg = gen_reg_rtx (mode);
+
+ emit_insn ((*gen_func.mov) (tmp_reg, src));
+ stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
+ }
+
+ if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
+ {
+ int i;
+ for (i = 0; i < num_reg; i++)
+ emit_insn (stores[i]);
+ num_reg = 0;
+ }
+
+ if (mode == BLKmode)
+ {
+ /* Move the address into scratch registers. The movmemsi
+ patterns require zero offset. */
+ if (!REG_P (XEXP (src, 0)))
+ {
+ rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
+ src = replace_equiv_address (src, src_reg);
+ }
+ set_mem_size (src, GEN_INT (move_bytes));
+
+ if (!REG_P (XEXP (dest, 0)))
+ {
+ rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
+ dest = replace_equiv_address (dest, dest_reg);
+ }
+ set_mem_size (dest, GEN_INT (move_bytes));
+
+ emit_insn ((*gen_func.movmemsi) (dest, src,
+ GEN_INT (move_bytes & 31),
+ align_rtx));
+ }
+ }
+
+ return 1;
+}
+
+
+/* Return a string to perform a load_multiple operation.
+ operands[0] is the vector.
+ operands[1] is the source address.
+ operands[2] is the first destination register. */
+
+const char *
+rs6000_output_load_multiple (rtx operands[3])
+{
+ /* We have to handle the case where the pseudo used to contain the address
+ is assigned to one of the output registers. */
+ int i, j;
+ int words = XVECLEN (operands[0], 0);
+ rtx xop[10];
+
+ if (XVECLEN (operands[0], 0) == 1)
+ return "{l|lwz} %2,0(%1)";
+
+ for (i = 0; i < words; i++)
+ if (refers_to_regno_p (REGNO (operands[2]) + i,
+ REGNO (operands[2]) + i + 1, operands[1], 0))
+ {
+ if (i == words-1)
+ {
+ xop[0] = GEN_INT (4 * (words-1));
+ xop[1] = operands[1];
+ xop[2] = operands[2];
+ output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
+ return "";
+ }
+ else if (i == 0)
+ {
+ xop[0] = GEN_INT (4 * (words-1));
+ xop[1] = operands[1];
+ xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+ output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
+ return "";
+ }
+ else
+ {
+ for (j = 0; j < words; j++)
+ if (j != i)
+ {
+ xop[0] = GEN_INT (j * 4);
+ xop[1] = operands[1];
+ xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
+ output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
+ }
+ xop[0] = GEN_INT (i * 4);
+ xop[1] = operands[1];
+ output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
+ return "";
+ }
+ }
+
+ return "{lsi|lswi} %2,%1,%N0";
+}
+
+
+/* A validation routine: say whether CODE, a condition code, and MODE
+ match. The other alternatives either don't make sense or should
+ never be generated. */
+
+void
+validate_condition_mode (enum rtx_code code, enum machine_mode mode)
+{
+ gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
+ || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
+ && GET_MODE_CLASS (mode) == MODE_CC);
+
+ /* These don't make sense. */
+ gcc_assert ((code != GT && code != LT && code != GE && code != LE)
+ || mode != CCUNSmode);
+
+ gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
+ || mode == CCUNSmode);
+
+ gcc_assert (mode == CCFPmode
+ || (code != ORDERED && code != UNORDERED
+ && code != UNEQ && code != LTGT
+ && code != UNGT && code != UNLT
+ && code != UNGE && code != UNLE));
+
+ /* These should never be generated except for
+ flag_finite_math_only. */
+ gcc_assert (mode != CCFPmode
+ || flag_finite_math_only
+ || (code != LE && code != GE
+ && code != UNEQ && code != LTGT
+ && code != UNGT && code != UNLT));
+
+ /* These are invalid; the information is not there. */
+ gcc_assert (mode != CCEQmode || code == EQ || code == NE);
+}
+
+
+/* Return 1 if ANDOP is a mask that has no bits on that are not in the
+ mask required to convert the result of a rotate insn into a shift
+ left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
+
+int
+includes_lshift_p (rtx shiftop, rtx andop)
+{
+ unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
+
+ shift_mask <<= INTVAL (shiftop);
+
+ return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
+}
+
+/* Similar, but for right shift. */
+
+int
+includes_rshift_p (rtx shiftop, rtx andop)
+{
+ unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
+
+ shift_mask >>= INTVAL (shiftop);
+
+ return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
+}
+
+/* Return 1 if ANDOP is a mask suitable for use with an rldic insn
+ to perform a left shift. It must have exactly SHIFTOP least
+ significant 0's, then one or more 1's, then zero or more 0's. */
+
+int
+includes_rldic_lshift_p (rtx shiftop, rtx andop)
+{
+ if (GET_CODE (andop) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb, shift_mask;
+
+ c = INTVAL (andop);
+ if (c == 0 || c == ~0)
+ return 0;
+
+ shift_mask = ~0;
+ shift_mask <<= INTVAL (shiftop);
+
+ /* Find the least significant one bit. */
+ lsb = c & -c;
+
+ /* It must coincide with the LSB of the shift mask. */
+ if (-lsb != shift_mask)
+ return 0;
+
+ /* Invert to look for the next transition (if any). */
+ c = ~c;
+
+ /* Remove the low group of ones (originally low group of zeros). */
+ c &= -lsb;
+
+ /* Again find the lsb, and check we have all 1's above. */
+ lsb = c & -c;
+ return c == -lsb;
+ }
+ else if (GET_CODE (andop) == CONST_DOUBLE
+ && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
+ {
+ HOST_WIDE_INT low, high, lsb;
+ HOST_WIDE_INT shift_mask_low, shift_mask_high;
+
+ low = CONST_DOUBLE_LOW (andop);
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = CONST_DOUBLE_HIGH (andop);
+
+ if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
+ || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
+ return 0;
+
+ if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
+ {
+ shift_mask_high = ~0;
+ if (INTVAL (shiftop) > 32)
+ shift_mask_high <<= INTVAL (shiftop) - 32;
+
+ lsb = high & -high;
+
+ if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
+ return 0;
+
+ high = ~high;
+ high &= -lsb;
+
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ shift_mask_low = ~0;
+ shift_mask_low <<= INTVAL (shiftop);
+
+ lsb = low & -low;
+
+ if (-lsb != shift_mask_low)
+ return 0;
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = ~high;
+ low = ~low;
+ low &= -lsb;
+
+ if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
+ {
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ lsb = low & -low;
+ return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
+ to perform a left shift. It must have SHIFTOP or more least
+ significant 0's, with the remainder of the word 1's. */
+
+int
+includes_rldicr_lshift_p (rtx shiftop, rtx andop)
+{
+ if (GET_CODE (andop) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb, shift_mask;
+
+ shift_mask = ~0;
+ shift_mask <<= INTVAL (shiftop);
+ c = INTVAL (andop);
+
+ /* Find the least significant one bit. */
+ lsb = c & -c;
+
+ /* It must be covered by the shift mask.
+ This test also rejects c == 0. */
+ if ((lsb & shift_mask) == 0)
+ return 0;
+
+ /* Check we have all 1's above the transition, and reject all 1's. */
+ return c == -lsb && lsb != 1;
+ }
+ else if (GET_CODE (andop) == CONST_DOUBLE
+ && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
+ {
+ HOST_WIDE_INT low, lsb, shift_mask_low;
+
+ low = CONST_DOUBLE_LOW (andop);
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ {
+ HOST_WIDE_INT high, shift_mask_high;
+
+ high = CONST_DOUBLE_HIGH (andop);
+
+ if (low == 0)
+ {
+ shift_mask_high = ~0;
+ if (INTVAL (shiftop) > 32)
+ shift_mask_high <<= INTVAL (shiftop) - 32;
+
+ lsb = high & -high;
+
+ if ((lsb & shift_mask_high) == 0)
+ return 0;
+
+ return high == -lsb;
+ }
+ if (high != ~0)
+ return 0;
+ }
+
+ shift_mask_low = ~0;
+ shift_mask_low <<= INTVAL (shiftop);
+
+ lsb = low & -low;
+
+ if ((lsb & shift_mask_low) == 0)
+ return 0;
+
+ return low == -lsb && lsb != 1;
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if operands will generate a valid arguments to rlwimi
+instruction for insert with right shift in 64-bit mode. The mask may
+not start on the first bit or stop on the last bit because wrap-around
+effects of instruction do not correspond to semantics of RTL insn. */
+
+int
+insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
+{
+ if (INTVAL (startop) > 32
+ && INTVAL (startop) < 64
+ && INTVAL (sizeop) > 1
+ && INTVAL (sizeop) + INTVAL (startop) < 64
+ && INTVAL (shiftop) > 0
+ && INTVAL (sizeop) + INTVAL (shiftop) < 32
+ && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
+ for lfq and stfq insns iff the registers are hard registers. */
+
+int
+registers_ok_for_quad_peep (rtx reg1, rtx reg2)
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ return 0;
+
+ /* We might have been passed non floating point registers. */
+ if (!FP_REGNO_P (REGNO (reg1))
+ || !FP_REGNO_P (REGNO (reg2)))
+ return 0;
+
+ return (REGNO (reg1) == REGNO (reg2) - 1);
+}
+
+/* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
+ addr1 and addr2 must be in consecutive memory locations
+ (addr2 == addr1 + 8). */
+
+int
+mems_ok_for_quad_peep (rtx mem1, rtx mem2)
+{
+ rtx addr1, addr2;
+ unsigned int reg1, reg2;
+ int offset1, offset2;
+
+ /* The mems cannot be volatile. */
+ if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
+ return 0;
+
+ addr1 = XEXP (mem1, 0);
+ addr2 = XEXP (mem2, 0);
+
+ /* Extract an offset (if used) from the first addr. */
+ if (GET_CODE (addr1) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr1, 0)) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (XEXP (addr1, 0));
+ /* The offset must be constant! */
+ if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
+ return 0;
+ offset1 = INTVAL (XEXP (addr1, 1));
+ }
+ }
+ else if (GET_CODE (addr1) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (addr1);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset1 = 0;
+ }
+
+ /* And now for the second addr. */
+ if (GET_CODE (addr2) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr2, 0)) != REG)
+ return 0;
+ else
+ {
+ reg2 = REGNO (XEXP (addr2, 0));
+ /* The offset must be constant. */
+ if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+ offset2 = INTVAL (XEXP (addr2, 1));
+ }
+ }
+ else if (GET_CODE (addr2) != REG)
+ return 0;
+ else
+ {
+ reg2 = REGNO (addr2);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset2 = 0;
+ }
+
+ /* Both of these must have the same base register. */
+ if (reg1 != reg2)
+ return 0;
+
+ /* The offset for the second addr must be 8 more than the first addr. */
+ if (offset2 != offset1 + 8)
+ return 0;
+
+ /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
+ instructions. */
+ return 1;
+}
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+enum reg_class
+rs6000_secondary_reload_class (enum reg_class class,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx in)
+{
+ int regno;
+
+ if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
+#if TARGET_MACHO
+ && MACHOPIC_INDIRECT
+#endif
+ ))
+ {
+ /* We cannot copy a symbolic operand directly into anything
+ other than BASE_REGS for TARGET_ELF. So indicate that a
+ register from BASE_REGS is needed as an intermediate
+ register.
+
+ On Darwin, pic addresses require a load from memory, which
+ needs a base register. */
+ if (class != BASE_REGS
+ && (GET_CODE (in) == SYMBOL_REF
+ || GET_CODE (in) == HIGH
+ || GET_CODE (in) == LABEL_REF
+ || GET_CODE (in) == CONST))
+ return BASE_REGS;
+ }
+
+ if (GET_CODE (in) == REG)
+ {
+ regno = REGNO (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ regno = true_regnum (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = -1;
+ }
+ }
+ else if (GET_CODE (in) == SUBREG)
+ {
+ regno = true_regnum (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = -1;
+ }
+ else
+ regno = -1;
+
+ /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
+ into anything. */
+ if (class == GENERAL_REGS || class == BASE_REGS
+ || (regno >= 0 && INT_REGNO_P (regno)))
+ return NO_REGS;
+
+ /* Constants, memory, and FP registers can go into FP registers. */
+ if ((regno == -1 || FP_REGNO_P (regno))
+ && (class == FLOAT_REGS || class == NON_SPECIAL_REGS))
+ return NO_REGS;
+
+ /* Memory, and AltiVec registers can go into AltiVec registers. */
+ if ((regno == -1 || ALTIVEC_REGNO_P (regno))
+ && class == ALTIVEC_REGS)
+ return NO_REGS;
+
+ /* We can copy among the CR registers. */
+ if ((class == CR_REGS || class == CR0_REGS)
+ && regno >= 0 && CR_REGNO_P (regno))
+ return NO_REGS;
+
+ /* Otherwise, we need GENERAL_REGS. */
+ return GENERAL_REGS;
+}
+
+/* Given a comparison operation, return the bit number in CCR to test. We
+ know this is a valid comparison.
+
+ SCC_P is 1 if this is for an scc. That means that %D will have been
+ used instead of %C, so the bits will be in different places.
+
+ Return -1 if OP isn't a valid comparison for some reason. */
+
+int
+ccr_bit (rtx op, int scc_p)
+{
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode cc_mode;
+ int cc_regnum;
+ int base_bit;
+ rtx reg;
+
+ if (!COMPARISON_P (op))
+ return -1;
+
+ reg = XEXP (op, 0);
+
+ gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
+
+ cc_mode = GET_MODE (reg);
+ cc_regnum = REGNO (reg);
+ base_bit = 4 * (cc_regnum - CR0_REGNO);
+
+ validate_condition_mode (code, cc_mode);
+
+ /* When generating a sCOND operation, only positive conditions are
+ allowed. */
+ gcc_assert (!scc_p
+ || code == EQ || code == GT || code == LT || code == UNORDERED
+ || code == GTU || code == LTU);
+
+ switch (code)
+ {
+ case NE:
+ return scc_p ? base_bit + 3 : base_bit + 2;
+ case EQ:
+ return base_bit + 2;
+ case GT: case GTU: case UNLE:
+ return base_bit + 1;
+ case LT: case LTU: case UNGE:
+ return base_bit;
+ case ORDERED: case UNORDERED:
+ return base_bit + 3;
+
+ case GE: case GEU:
+ /* If scc, we will have done a cror to put the bit in the
+ unordered position. So test that bit. For integer, this is ! LT
+ unless this is an scc insn. */
+ return scc_p ? base_bit + 3 : base_bit;
+
+ case LE: case LEU:
+ return scc_p ? base_bit + 3 : base_bit + 1;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the GOT register. */
+
+rtx
+rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
+{
+ /* The second flow pass currently (June 1999) can't update
+ regs_ever_live without disturbing other parts of the compiler, so
+ update it here to make the prolog/epilogue code happy. */
+ if (no_new_pseudos && ! regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM])
+ regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ current_function_uses_pic_offset_table = 1;
+
+ return pic_offset_table_rtx;
+}
+
+/* Function to init struct machine_function.
+ This will be called, via a pointer variable,
+ from push_function_context. */
+
+static struct machine_function *
+rs6000_init_machine_status (void)
+{
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ machine_function *mf = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
+ mf->substitute_pic_base_reg = INVALID_REGNUM;
+ return mf;
+ /* APPLE LOCAL end volatile pic base reg in leaves */
+}
+
+/* These macros test for integers and extract the low-order bits. */
+#define INT_P(X) \
+((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
+ && GET_MODE (X) == VOIDmode)
+
+#define INT_LOWPART(X) \
+ (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
+
+int
+extract_MB (rtx op)
+{
+ int i;
+ unsigned long val = INT_LOWPART (op);
+
+ /* If the high bit is zero, the value is the first 1 bit we find
+ from the left. */
+ if ((val & 0x80000000) == 0)
+ {
+ gcc_assert (val & 0xffffffff);
+
+ i = 1;
+ while (((val <<= 1) & 0x80000000) == 0)
+ ++i;
+ return i;
+ }
+
+ /* If the high bit is set and the low bit is not, or the mask is all
+ 1's, the value is zero. */
+ if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
+ return 0;
+
+ /* Otherwise we have a wrap-around mask. Look for the first 0 bit
+ from the right. */
+ i = 31;
+ while (((val >>= 1) & 1) != 0)
+ --i;
+
+ return i;
+}
+
+int
+extract_ME (rtx op)
+{
+ int i;
+ unsigned long val = INT_LOWPART (op);
+
+ /* If the low bit is zero, the value is the first 1 bit we find from
+ the right. */
+ if ((val & 1) == 0)
+ {
+ gcc_assert (val & 0xffffffff);
+
+ i = 30;
+ while (((val >>= 1) & 1) == 0)
+ --i;
+
+ return i;
+ }
+
+ /* If the low bit is set and the high bit is not, or the mask is all
+ 1's, the value is 31. */
+ if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
+ return 31;
+
+ /* Otherwise we have a wrap-around mask. Look for the first 0 bit
+ from the left. */
+ i = 0;
+ while (((val <<= 1) & 0x80000000) != 0)
+ ++i;
+
+ return i;
+}
+
+/* Locate some local-dynamic symbol still in use by this function
+ so that we can print its name in some tls_ld pattern. */
+
+static const char *
+rs6000_get_some_local_dynamic_name (void)
+{
+ rtx insn;
+
+ if (cfun->machine->some_ld_name)
+ return cfun->machine->some_ld_name;
+
+ for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
+ if (INSN_P (insn)
+ && for_each_rtx (&PATTERN (insn),
+ rs6000_get_some_local_dynamic_name_1, 0))
+ return cfun->machine->some_ld_name;
+
+ gcc_unreachable ();
+}
+
+/* Helper function for rs6000_get_some_local_dynamic_name. */
+
+static int
+rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ const char *str = XSTR (x, 0);
+ if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
+ {
+ cfun->machine->some_ld_name = str;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Write out a function code label. */
+
+void
+rs6000_output_function_entry (FILE *file, const char *fname)
+{
+ if (fname[0] != '.')
+ {
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ if (DOT_SYMBOLS)
+ putc ('.', file);
+ else
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
+ break;
+
+ case ABI_V4:
+ case ABI_DARWIN:
+ break;
+ }
+ }
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+}
+
+/* Print an operand. Recognize special options, documented below. */
+
+#if TARGET_ELF
+#define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
+#define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
+#else
+#define SMALL_DATA_RELOC "sda21"
+#define SMALL_DATA_REG 0
+#endif
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ int i;
+ HOST_WIDE_INT val;
+ unsigned HOST_WIDE_INT uval;
+
+ switch (code)
+ {
+ case '.':
+ /* Write out an instruction after the call which may be replaced
+ with glue code by the loader. This depends on the AIX version. */
+ asm_fprintf (file, RS6000_CALL_GLUE);
+ return;
+
+ /* %a is output_address. */
+
+ case 'A':
+ /* If X is a constant integer whose low-order 5 bits are zero,
+ write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
+ in the AIX assembler where "sri" with a zero shift count
+ writes a trash instruction. */
+ if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
+ putc ('l', file);
+ else
+ putc ('r', file);
+ return;
+
+ case 'b':
+ /* If constant, low-order 16 bits of constant, unsigned.
+ Otherwise, write normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'B':
+ /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
+ for 64-bit mask direction. */
+ putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
+ return;
+
+ /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
+ output_operand. */
+
+ case 'c':
+ /* X is a CR register. Print the number of the GT bit of the CR. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%E value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
+ return;
+
+ case 'D':
+ /* Like 'J' but get to the GT bit only. */
+ gcc_assert (GET_CODE (x) == REG);
+
+ /* Bit 1 is GT bit. */
+ i = 4 * (REGNO (x) - CR0_REGNO) + 1;
+
+ /* Add one for shift count in rlinm for scc. */
+ fprintf (file, "%d", i + 1);
+ return;
+
+ case 'E':
+ /* X is a CR register. Print the number of the EQ bit of the CR */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%E value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
+ return;
+
+ case 'f':
+ /* X is a CR register. Print the shift count needed to move it
+ to the high-order four bits. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%f value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 'F':
+ /* Similar, but print the count for the rotate in the opposite
+ direction. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%F value");
+ else
+ fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 'G':
+ /* X is a constant integer. If it is negative, print "m",
+ otherwise print "z". This is to make an aze or ame insn. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%G value");
+ else if (INTVAL (x) >= 0)
+ putc ('z', file);
+ else
+ putc ('m', file);
+ return;
+
+ case 'h':
+ /* If constant, output low-order five bits. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'H':
+ /* If constant, output low-order six bits. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'I':
+ /* Print `i' if this is a constant, else nothing. */
+ if (INT_P (x))
+ putc ('i', file);
+ return;
+
+ case 'j':
+ /* Write the bit number in CCR for jump. */
+ i = ccr_bit (x, 0);
+ if (i == -1)
+ output_operand_lossage ("invalid %%j code");
+ else
+ fprintf (file, "%d", i);
+ return;
+
+ case 'J':
+ /* Similar, but add one for shift count in rlinm for scc and pass
+ scc flag to `ccr_bit'. */
+ i = ccr_bit (x, 1);
+ if (i == -1)
+ output_operand_lossage ("invalid %%J code");
+ else
+ /* If we want bit 31, write a shift count of zero, not 32. */
+ fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ return;
+
+ case 'k':
+ /* X must be a constant. Write the 1's complement of the
+ constant. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%k value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
+ return;
+
+ case 'K':
+ /* X must be a symbolic constant on ELF. Write an
+ expression suitable for an 'addi' that adds in the low 16
+ bits of the MEM. */
+ if (GET_CODE (x) != CONST)
+ {
+ print_operand_address (file, x);
+ fputs ("@l", file);
+ }
+ else
+ {
+ if (GET_CODE (XEXP (x, 0)) != PLUS
+ || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
+ output_operand_lossage ("invalid %%K value");
+ print_operand_address (file, XEXP (XEXP (x, 0), 0));
+ fputs ("@l", file);
+ /* For GNU as, there must be a non-alphanumeric character
+ between 'l' and the number. The '-' is added by
+ print_operand() already. */
+ if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
+ fputs ("+", file);
+ print_operand (file, XEXP (XEXP (x, 0), 1), 0);
+ }
+ return;
+
+ /* %l is output_asm_label. */
+
+ case 'L':
+ /* Write second word of DImode or DFmode reference. Works on register
+ or non-indexed memory only. */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x) + 1], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ /* Handle possible auto-increment. Since it is pre-increment and
+ we have already done it, we can just use an offset of word. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0),
+ UNITS_PER_WORD));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode,
+ UNITS_PER_WORD),
+ 0));
+
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ case 'm':
+ /* MB value for a mask operand. */
+ if (! mask_operand (x, SImode))
+ output_operand_lossage ("invalid %%m value");
+
+ fprintf (file, "%d", extract_MB (x));
+ return;
+
+ case 'M':
+ /* ME value for a mask operand. */
+ if (! mask_operand (x, SImode))
+ output_operand_lossage ("invalid %%M value");
+
+ fprintf (file, "%d", extract_ME (x));
+ return;
+
+ /* %n outputs the negative of its operand. */
+
+ case 'N':
+ /* Write the number of elements in the vector times 4. */
+ if (GET_CODE (x) != PARALLEL)
+ output_operand_lossage ("invalid %%N value");
+ else
+ fprintf (file, "%d", XVECLEN (x, 0) * 4);
+ return;
+
+ case 'O':
+ /* Similar, but subtract 1 first. */
+ if (GET_CODE (x) != PARALLEL)
+ output_operand_lossage ("invalid %%O value");
+ else
+ fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
+ return;
+
+ case 'p':
+ /* X is a CONST_INT that is a power of two. Output the logarithm. */
+ if (! INT_P (x)
+ || INT_LOWPART (x) < 0
+ || (i = exact_log2 (INT_LOWPART (x))) < 0)
+ output_operand_lossage ("invalid %%p value");
+ else
+ fprintf (file, "%d", i);
+ return;
+
+ case 'P':
+ /* The operand must be an indirect memory reference. The result
+ is the register name. */
+ if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
+ || REGNO (XEXP (x, 0)) >= 32)
+ output_operand_lossage ("invalid %%P value");
+ else
+ fputs (reg_names[REGNO (XEXP (x, 0))], file);
+ return;
+
+ case 'q':
+ /* This outputs the logical code corresponding to a boolean
+ expression. The expression may have one or both operands
+ negated (if one, only the first one). For condition register
+ logical operations, it will also treat the negated
+ CR codes as NOTs, but not handle NOTs of them. */
+ {
+ const char *const *t = 0;
+ const char *s;
+ enum rtx_code code = GET_CODE (x);
+ static const char * const tbl[3][3] = {
+ { "and", "andc", "nor" },
+ { "or", "orc", "nand" },
+ { "xor", "eqv", "xor" } };
+
+ if (code == AND)
+ t = tbl[0];
+ else if (code == IOR)
+ t = tbl[1];
+ else if (code == XOR)
+ t = tbl[2];
+ else
+ output_operand_lossage ("invalid %%q value");
+
+ if (GET_CODE (XEXP (x, 0)) != NOT)
+ s = t[0];
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) == NOT)
+ s = t[2];
+ else
+ s = t[1];
+ }
+
+ fputs (s, file);
+ }
+ return;
+
+ case 'Q':
+ if (TARGET_MFCRF)
+ fputc (',', file);
+ /* FALLTHRU */
+ else
+ return;
+
+ case 'R':
+ /* X is a CR register. Print the mask for `mtcrf'. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%R value");
+ else
+ fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 's':
+ /* Low 5 bits of 32 - value */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%s value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
+ return;
+
+ case 'S':
+ /* PowerPC64 mask position. All 0's is excluded.
+ CONST_INT 32-bit mask is considered sign-extended so any
+ transition must occur within the CONST_INT, not on the boundary. */
+ if (! mask64_operand (x, DImode))
+ output_operand_lossage ("invalid %%S value");
+
+ uval = INT_LOWPART (x);
+
+ if (uval & 1) /* Clear Left */
+ {
+#if HOST_BITS_PER_WIDE_INT > 64
+ uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
+#endif
+ i = 64;
+ }
+ else /* Clear Right */
+ {
+ uval = ~uval;
+#if HOST_BITS_PER_WIDE_INT > 64
+ uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
+#endif
+ i = 63;
+ }
+ while (uval != 0)
+ --i, uval >>= 1;
+ gcc_assert (i >= 0);
+ fprintf (file, "%d", i);
+ return;
+
+ case 't':
+ /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
+ gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
+
+ /* Bit 3 is OV bit. */
+ i = 4 * (REGNO (x) - CR0_REGNO) + 3;
+
+ /* If we want bit 31, write a shift count of zero, not 32. */
+ fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ return;
+
+ case 'T':
+ /* Print the symbolic name of a branch target register. */
+ if (GET_CODE (x) != REG || (REGNO (x) != LINK_REGISTER_REGNUM
+ && REGNO (x) != COUNT_REGISTER_REGNUM))
+ output_operand_lossage ("invalid %%T value");
+ else if (REGNO (x) == LINK_REGISTER_REGNUM)
+ fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
+ else
+ fputs ("ctr", file);
+ return;
+
+ case 'u':
+ /* High-order 16 bits of constant for use in unsigned operand. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%u value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (INT_LOWPART (x) >> 16) & 0xffff);
+ return;
+
+ case 'v':
+ /* High-order 16 bits of constant for use in signed operand. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%v value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (INT_LOWPART (x) >> 16) & 0xffff);
+ return;
+
+ case 'U':
+ /* Print `u' if this has an auto-increment or auto-decrement. */
+ if (GET_CODE (x) == MEM
+ && (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC))
+ putc ('u', file);
+ return;
+
+ case 'V':
+ /* Print the trap code for this operand. */
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ fputs ("eq", file); /* 4 */
+ break;
+ case NE:
+ fputs ("ne", file); /* 24 */
+ break;
+ case LT:
+ fputs ("lt", file); /* 16 */
+ break;
+ case LE:
+ fputs ("le", file); /* 20 */
+ break;
+ case GT:
+ fputs ("gt", file); /* 8 */
+ break;
+ case GE:
+ fputs ("ge", file); /* 12 */
+ break;
+ case LTU:
+ fputs ("llt", file); /* 2 */
+ break;
+ case LEU:
+ fputs ("lle", file); /* 6 */
+ break;
+ case GTU:
+ fputs ("lgt", file); /* 1 */
+ break;
+ case GEU:
+ fputs ("lge", file); /* 5 */
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'w':
+ /* If constant, low-order 16 bits of constant, signed. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'W':
+ /* MB value for a PowerPC64 rldic operand. */
+ val = (GET_CODE (x) == CONST_INT
+ ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
+
+ if (val < 0)
+ i = -1;
+ else
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
+ if ((val <<= 1) < 0)
+ break;
+
+#if HOST_BITS_PER_WIDE_INT == 32
+ if (GET_CODE (x) == CONST_INT && i >= 0)
+ i += 32; /* zero-extend high-part was all 0's */
+ else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
+ {
+ val = CONST_DOUBLE_LOW (x);
+
+ gcc_assert (val);
+ if (val < 0)
+ --i;
+ else
+ for ( ; i < 64; i++)
+ if ((val <<= 1) < 0)
+ break;
+ }
+#endif
+
+ fprintf (file, "%d", i + 1);
+ return;
+
+ case 'X':
+ if (GET_CODE (x) == MEM
+ && legitimate_indexed_address_p (XEXP (x, 0), 0))
+ putc ('x', file);
+ return;
+
+ case 'Y':
+ /* Like 'L', for third word of TImode */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x) + 2], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ case 'z':
+ /* X is a SYMBOL_REF. Write out the name preceded by a
+ period and without any trailing data in brackets. Used for function
+ names. If we are configured for System V (or the embedded ABI) on
+ the PowerPC, do not emit the period, since those systems do not use
+ TOCs and the like. */
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ /* Mark the decl as referenced so that cgraph will output the
+ function. */
+ if (SYMBOL_REF_DECL (x))
+ mark_decl_referenced (SYMBOL_REF_DECL (x));
+
+ /* For macho, check to see if we need a stub. */
+ if (TARGET_MACHO)
+ {
+ const char *name = XSTR (x, 0);
+#if TARGET_MACHO
+ /* APPLE LOCAL begin axe stubs 5571540 */
+ if (darwin_stubs
+ && MACHOPIC_INDIRECT
+ /* APPLE LOCAL end axe stubs 5571540 */
+ && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
+ name = machopic_indirection_name (x, /*stub_p=*/true);
+#endif
+ assemble_name (file, name);
+ }
+ else if (!DOT_SYMBOLS)
+ assemble_name (file, XSTR (x, 0));
+ else
+ rs6000_output_function_entry (file, XSTR (x, 0));
+ return;
+
+ case 'Z':
+ /* Like 'L', for last word of TImode. */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x) + 3], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ /* Print AltiVec or SPE memory operand. */
+ case 'y':
+ {
+ rtx tmp;
+
+ gcc_assert (GET_CODE (x) == MEM);
+
+ tmp = XEXP (x, 0);
+
+ /* Ugly hack because %y is overloaded. */
+ if (TARGET_E500 && GET_MODE_SIZE (GET_MODE (x)) == 8)
+ {
+ /* Handle [reg]. */
+ if (GET_CODE (tmp) == REG)
+ {
+ fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
+ break;
+ }
+ /* Handle [reg+UIMM]. */
+ else if (GET_CODE (tmp) == PLUS &&
+ GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ int x;
+
+ gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
+
+ x = INTVAL (XEXP (tmp, 1));
+ fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
+ break;
+ }
+
+ /* Fall through. Must be [reg+reg]. */
+ }
+ if (TARGET_ALTIVEC
+ && GET_CODE (tmp) == AND
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT
+ && INTVAL (XEXP (tmp, 1)) == -16)
+ tmp = XEXP (tmp, 0);
+ if (GET_CODE (tmp) == REG)
+ fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
+ else
+ {
+ gcc_assert (GET_CODE (tmp) == PLUS
+ && REG_P (XEXP (tmp, 0))
+ && REG_P (XEXP (tmp, 1)));
+
+ if (REGNO (XEXP (tmp, 0)) == 0)
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
+ reg_names[ REGNO (XEXP (tmp, 0)) ]);
+ else
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
+ reg_names[ REGNO (XEXP (tmp, 1)) ]);
+ }
+ break;
+ }
+
+ case 0:
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (GET_CODE (x) == MEM)
+ {
+ /* We need to handle PRE_INC and PRE_DEC here, since we need to
+ know the width from the mode. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC)
+ fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
+ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
+ else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
+ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
+ else
+ output_address (XEXP (x, 0));
+ }
+ else
+ output_addr_const (file, x);
+ return;
+
+ case '&':
+ assemble_name (file, rs6000_get_some_local_dynamic_name ());
+ return;
+
+ default:
+ output_operand_lossage ("invalid %%xn code");
+ }
+}
+
+/* Print the address of an operand. */
+
+void
+print_operand_address (FILE *file, rtx x)
+{
+ if (GET_CODE (x) == REG)
+ fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
+ || GET_CODE (x) == LABEL_REF)
+ {
+ output_addr_const (file, x);
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ else
+ gcc_assert (!TARGET_TOC);
+ }
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
+ {
+ gcc_assert (REG_P (XEXP (x, 0)));
+ if (REGNO (XEXP (x, 0)) == 0)
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
+ reg_names[ REGNO (XEXP (x, 0)) ]);
+ else
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
+ reg_names[ REGNO (XEXP (x, 1)) ]);
+ }
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
+ INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
+#if TARGET_ELF
+ else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+#if TARGET_MACHO
+ else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ fprintf (file, "lo16(");
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+ else if (legitimate_constant_pool_address_p (x))
+ {
+ if (TARGET_AIX && (!TARGET_ELF || !TARGET_MINIMAL_TOC))
+ {
+ rtx contains_minus = XEXP (x, 1);
+ rtx minus, symref;
+ const char *name;
+
+ /* Find the (minus (sym) (toc)) buried in X, and temporarily
+ turn it into (sym) for output_addr_const. */
+ while (GET_CODE (XEXP (contains_minus, 0)) != MINUS)
+ contains_minus = XEXP (contains_minus, 0);
+
+ minus = XEXP (contains_minus, 0);
+ symref = XEXP (minus, 0);
+ XEXP (contains_minus, 0) = symref;
+ if (TARGET_ELF)
+ {
+ char *newname;
+
+ name = XSTR (symref, 0);
+ newname = alloca (strlen (name) + sizeof ("@toc"));
+ strcpy (newname, name);
+ strcat (newname, "@toc");
+ XSTR (symref, 0) = newname;
+ }
+ output_addr_const (file, XEXP (x, 1));
+ if (TARGET_ELF)
+ XSTR (symref, 0) = name;
+ XEXP (contains_minus, 0) = minus;
+ }
+ else
+ output_addr_const (file, XEXP (x, 1));
+
+ fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
+ }
+ else
+ gcc_unreachable ();
+}
+
+/* Target hook for assembling integer objects. The PowerPC version has
+ to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
+ is defined. It also needs to handle DI-mode objects on 64-bit
+ targets. */
+
+static bool
+rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
+{
+#ifdef RELOCATABLE_NEEDS_FIXUP
+ /* Special handling for SI values. */
+ if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
+ {
+ static int recurse = 0;
+
+ /* For -mrelocatable, we mark all addresses that need to be fixed up
+ in the .fixup section. */
+ if (TARGET_RELOCATABLE
+ && in_section != toc_section
+ && in_section != text_section
+ && !unlikely_text_section_p (in_section)
+ && !recurse
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x))
+ {
+ char buf[256];
+
+ recurse = 1;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
+ fixuplabelno++;
+ ASM_OUTPUT_LABEL (asm_out_file, buf);
+ fprintf (asm_out_file, "\t.long\t(");
+ output_addr_const (asm_out_file, x);
+ fprintf (asm_out_file, ")@fixup\n");
+ fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
+ ASM_OUTPUT_ALIGN (asm_out_file, 2);
+ fprintf (asm_out_file, "\t.long\t");
+ assemble_name (asm_out_file, buf);
+ fprintf (asm_out_file, "\n\t.previous\n");
+ recurse = 0;
+ return true;
+ }
+ /* Remove initial .'s to turn a -mcall-aixdesc function
+ address into the address of the descriptor, not the function
+ itself. */
+ else if (GET_CODE (x) == SYMBOL_REF
+ && XSTR (x, 0)[0] == '.'
+ && DEFAULT_ABI == ABI_AIX)
+ {
+ const char *name = XSTR (x, 0);
+ while (*name == '.')
+ name++;
+
+ fprintf (asm_out_file, "\t.long\t%s\n", name);
+ return true;
+ }
+ }
+#endif /* RELOCATABLE_NEEDS_FIXUP */
+ return default_assemble_integer (x, size, aligned_p);
+}
+
+#ifdef HAVE_GAS_HIDDEN
+/* Emit an assembler directive to set symbol visibility for DECL to
+ VISIBILITY_TYPE. */
+
+static void
+rs6000_assemble_visibility (tree decl, int vis)
+{
+ /* Functions need to have their entry point symbol visibility set as
+ well as their descriptor symbol visibility. */
+ if (DEFAULT_ABI == ABI_AIX
+ && DOT_SYMBOLS
+ && TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ static const char * const visibility_types[] = {
+ NULL, "internal", "hidden", "protected"
+ };
+
+ const char *name, *type;
+
+ name = ((* targetm.strip_name_encoding)
+ (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
+ type = visibility_types[vis];
+
+ fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
+ fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
+ }
+ else
+ default_assemble_visibility (decl, vis);
+}
+#endif
+
+enum rtx_code
+rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
+{
+ /* Reversal of FP compares takes care -- an ordered compare
+ becomes an unordered compare and vice versa. */
+ if (mode == CCFPmode
+ && (!flag_finite_math_only
+ || code == UNLT || code == UNLE || code == UNGT || code == UNGE
+ || code == UNEQ || code == LTGT))
+ return reverse_condition_maybe_unordered (code);
+ else
+ return reverse_condition (code);
+}
+
+/* Generate a compare for CODE. Return a brand-new rtx that
+ represents the result of the compare. */
+
+static rtx
+rs6000_generate_compare (enum rtx_code code)
+{
+ enum machine_mode comp_mode;
+ rtx compare_result;
+
+ if (rs6000_compare_fp_p)
+ comp_mode = CCFPmode;
+ else if (code == GTU || code == LTU
+ || code == GEU || code == LEU)
+ comp_mode = CCUNSmode;
+ else if ((code == EQ || code == NE)
+ && GET_CODE (rs6000_compare_op0) == SUBREG
+ && GET_CODE (rs6000_compare_op1) == SUBREG
+ && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op0)
+ && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op1))
+ /* These are unsigned values, perhaps there will be a later
+ ordering compare that can be shared with this one.
+ Unfortunately we cannot detect the signedness of the operands
+ for non-subregs. */
+ comp_mode = CCUNSmode;
+ else
+ comp_mode = CCmode;
+
+ /* First, the compare. */
+ compare_result = gen_reg_rtx (comp_mode);
+
+ /* E500 FP compare instructions on the GPRs. Yuck! */
+ if ((TARGET_E500 && !TARGET_FPRS && TARGET_HARD_FLOAT)
+ && rs6000_compare_fp_p)
+ {
+ rtx cmp, or_result, compare_result2;
+ enum machine_mode op_mode = GET_MODE (rs6000_compare_op0);
+
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (rs6000_compare_op1);
+
+ /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
+ This explains the following mess. */
+
+ switch (code)
+ {
+ case EQ: case UNEQ: case NE: case LTGT:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Synthesize LE and GE from LT/GT || EQ. */
+ if (code == LE || code == GE || code == LEU || code == GEU)
+ {
+ emit_insn (cmp);
+
+ switch (code)
+ {
+ case LE: code = LT; break;
+ case GE: code = GT; break;
+ case LEU: code = LT; break;
+ case GEU: code = GT; break;
+ default: gcc_unreachable ();
+ }
+
+ compare_result2 = gen_reg_rtx (CCFPmode);
+
+ /* Do the EQ. */
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstsfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = flag_unsafe_math_optimizations
+ ? gen_tstdfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ emit_insn (cmp);
+
+ /* OR them together. */
+ or_result = gen_reg_rtx (CCFPmode);
+ cmp = gen_e500_cr_ior_compare (or_result, compare_result,
+ compare_result2);
+ compare_result = or_result;
+ code = EQ;
+ }
+ else
+ {
+ if (code == NE || code == LTGT)
+ code = NE;
+ else
+ code = EQ;
+ }
+
+ emit_insn (cmp);
+ }
+ else
+ {
+ /* Generate XLC-compatible TFmode compare as PARALLEL with extra
+ CLOBBERs to match cmptf_internal2 pattern. */
+ if (comp_mode == CCFPmode && TARGET_XL_COMPAT
+ && GET_MODE (rs6000_compare_op0) == TFmode
+ && !TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (9,
+ gen_rtx_SET (VOIDmode,
+ compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
+ else if (GET_CODE (rs6000_compare_op1) == UNSPEC
+ && XINT (rs6000_compare_op1, 1) == UNSPEC_SP_TEST)
+ {
+ rtx op1 = XVECEXP (rs6000_compare_op1, 0, 0);
+ comp_mode = CCEQmode;
+ compare_result = gen_reg_rtx (CCEQmode);
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_testdi (compare_result,
+ rs6000_compare_op0, op1));
+ else
+ emit_insn (gen_stack_protect_testsi (compare_result,
+ rs6000_compare_op0, op1));
+ }
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)));
+ }
+
+ /* Some kinds of FP comparisons need an OR operation;
+ under flag_finite_math_only we don't bother. */
+ if (rs6000_compare_fp_p
+ && !flag_finite_math_only
+ && !(TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)
+ && (code == LE || code == GE
+ || code == UNEQ || code == LTGT
+ || code == UNGT || code == UNLT))
+ {
+ enum rtx_code or1, or2;
+ rtx or1_rtx, or2_rtx, compare2_rtx;
+ rtx or_result = gen_reg_rtx (CCEQmode);
+
+ switch (code)
+ {
+ case LE: or1 = LT; or2 = EQ; break;
+ case GE: or1 = GT; or2 = EQ; break;
+ case UNEQ: or1 = UNORDERED; or2 = EQ; break;
+ case LTGT: or1 = LT; or2 = GT; break;
+ case UNGT: or1 = UNORDERED; or2 = GT; break;
+ case UNLT: or1 = UNORDERED; or2 = LT; break;
+ default: gcc_unreachable ();
+ }
+ validate_condition_mode (or1, comp_mode);
+ validate_condition_mode (or2, comp_mode);
+ or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
+ or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
+ compare2_rtx = gen_rtx_COMPARE (CCEQmode,
+ gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
+ const_true_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
+
+ compare_result = or_result;
+ code = EQ;
+ }
+
+ validate_condition_mode (code, GET_MODE (compare_result));
+
+ return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
+}
+
+
+/* Emit the RTL for an sCOND pattern. */
+
+void
+rs6000_emit_sCOND (enum rtx_code code, rtx result)
+{
+ rtx condition_rtx;
+ enum machine_mode op_mode;
+ enum rtx_code cond_code;
+
+ condition_rtx = rs6000_generate_compare (code);
+ cond_code = GET_CODE (condition_rtx);
+
+ if (TARGET_E500 && rs6000_compare_fp_p
+ && !TARGET_FPRS && TARGET_HARD_FLOAT)
+ {
+ rtx t;
+
+ PUT_MODE (condition_rtx, SImode);
+ t = XEXP (condition_rtx, 0);
+
+ gcc_assert (cond_code == NE || cond_code == EQ);
+
+ if (cond_code == NE)
+ emit_insn (gen_e500_flip_gt_bit (t, t));
+
+ emit_insn (gen_move_from_CR_gt_bit (result, t));
+ return;
+ }
+
+ if (cond_code == NE
+ || cond_code == GE || cond_code == LE
+ || cond_code == GEU || cond_code == LEU
+ || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
+ {
+ rtx not_result = gen_reg_rtx (CCEQmode);
+ rtx not_op, rev_cond_rtx;
+ enum machine_mode cc_mode;
+
+ cc_mode = GET_MODE (XEXP (condition_rtx, 0));
+
+ rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
+ SImode, XEXP (condition_rtx, 0), const0_rtx);
+ not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
+ condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
+ }
+
+ op_mode = GET_MODE (rs6000_compare_op0);
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (rs6000_compare_op1);
+
+ if (TARGET_POWERPC64 && (op_mode == DImode || rs6000_compare_fp_p))
+ {
+ PUT_MODE (condition_rtx, DImode);
+ convert_move (result, condition_rtx, 0);
+ }
+ else
+ {
+ PUT_MODE (condition_rtx, SImode);
+ emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
+ }
+}
+
+/* Emit a branch of kind CODE to location LOC. */
+
+void
+rs6000_emit_cbranch (enum rtx_code code, rtx loc)
+{
+ rtx condition_rtx, loc_ref;
+
+ condition_rtx = rs6000_generate_compare (code);
+ loc_ref = gen_rtx_LABEL_REF (VOIDmode, loc);
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
+ loc_ref, pc_rtx)));
+}
+
+/* Return the string to output a conditional branch to LABEL, which is
+ the operand number of the label, or -1 if the branch is really a
+ conditional return.
+
+ OP is the conditional expression. XEXP (OP, 0) is assumed to be a
+ condition code register and its mode specifies what kind of
+ comparison we made.
+
+ REVERSED is nonzero if we should reverse the sense of the comparison.
+
+ INSN is the insn. */
+
+char *
+output_cbranch (rtx op, const char *label, int reversed, rtx insn)
+{
+ static char string[64];
+ enum rtx_code code = GET_CODE (op);
+ rtx cc_reg = XEXP (op, 0);
+ enum machine_mode mode = GET_MODE (cc_reg);
+ int cc_regno = REGNO (cc_reg) - CR0_REGNO;
+ int need_longbranch = label != NULL && get_attr_length (insn) == 8;
+ int really_reversed = reversed ^ need_longbranch;
+ char *s = string;
+ const char *ccode;
+ const char *pred;
+ rtx note;
+
+ validate_condition_mode (code, mode);
+
+ /* Work out which way this really branches. We could use
+ reverse_condition_maybe_unordered here always but this
+ makes the resulting assembler clearer. */
+ if (really_reversed)
+ {
+ /* Reversal of FP compares takes care -- an ordered compare
+ becomes an unordered compare and vice versa. */
+ if (mode == CCFPmode)
+ code = reverse_condition_maybe_unordered (code);
+ else
+ code = reverse_condition (code);
+ }
+
+ if ((TARGET_E500 && !TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
+ {
+ /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
+ to the GT bit. */
+ switch (code)
+ {
+ case EQ:
+ /* Opposite of GT. */
+ code = GT;
+ break;
+
+ case NE:
+ code = UNLE;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ switch (code)
+ {
+ /* Not all of these are actually distinct opcodes, but
+ we distinguish them for clarity of the resulting assembler. */
+ case NE: case LTGT:
+ ccode = "ne"; break;
+ case EQ: case UNEQ:
+ ccode = "eq"; break;
+ case GE: case GEU:
+ ccode = "ge"; break;
+ case GT: case GTU: case UNGT:
+ ccode = "gt"; break;
+ case LE: case LEU:
+ ccode = "le"; break;
+ case LT: case LTU: case UNLT:
+ ccode = "lt"; break;
+ case UNORDERED: ccode = "un"; break;
+ case ORDERED: ccode = "nu"; break;
+ case UNGE: ccode = "nl"; break;
+ case UNLE: ccode = "ng"; break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Maybe we have a guess as to how likely the branch is.
+ The old mnemonics don't have a way to specify this information. */
+ pred = "";
+ note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
+ if (note != NULL_RTX)
+ {
+ /* PROB is the difference from 50%. */
+ int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
+
+ /* Only hint for highly probable/improbable branches on newer
+ cpus as static prediction overrides processor dynamic
+ prediction. For older cpus we may as well always hint, but
+ assume not taken for branches that are very close to 50% as a
+ mispredicted taken branch is more expensive than a
+ mispredicted not-taken branch. */
+ if (rs6000_always_hint
+ || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
+ && br_prob_note_reliable_p (note)))
+ {
+ if (abs (prob) > REG_BR_PROB_BASE / 20
+ && ((prob > 0) ^ need_longbranch))
+ pred = "+";
+ else
+ pred = "-";
+ }
+ }
+
+ if (label == NULL)
+ s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
+ else
+ s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
+
+ /* We need to escape any '%' characters in the reg_names string.
+ Assume they'd only be the first character.... */
+ if (reg_names[cc_regno + CR0_REGNO][0] == '%')
+ *s++ = '%';
+ s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
+
+ if (label != NULL)
+ {
+ /* If the branch distance was too far, we may have to use an
+ unconditional branch to go the distance. */
+ if (need_longbranch)
+ s += sprintf (s, ",$+8\n\tb %s", label);
+ else
+ s += sprintf (s, ",%s", label);
+ }
+
+ return string;
+}
+
+/* Return the string to flip the GT bit on a CR. */
+char *
+output_e500_flip_gt_bit (rtx dst, rtx src)
+{
+ static char string[64];
+ int a, b;
+
+ gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
+ && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
+
+ /* GT bit. */
+ a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
+ b = 4 * (REGNO (src) - CR0_REGNO) + 1;
+
+ sprintf (string, "crnot %d,%d", a, b);
+ return string;
+}
+
+/* Return insn index for the vector compare instruction for given CODE,
+ and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
+ not available. */
+
+static int
+get_vec_cmp_insn (enum rtx_code code,
+ enum machine_mode dest_mode,
+ enum machine_mode op_mode)
+{
+ if (!TARGET_ALTIVEC)
+ return INSN_NOT_AVAILABLE;
+
+ switch (code)
+ {
+ case EQ:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPEQUB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPEQUH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPEQUW;
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPEQFP;
+ break;
+ case GE:
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPGEFP;
+ case GT:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPGTSB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPGTSH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPGTSW;
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPGTFP;
+ break;
+ case GTU:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPGTUB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPGTUH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPGTUW;
+ break;
+ default:
+ break;
+ }
+ return INSN_NOT_AVAILABLE;
+}
+
+/* Emit vector compare for operands OP0 and OP1 using code RCODE.
+ DMODE is expected destination mode. This is a recursive function. */
+
+static rtx
+rs6000_emit_vector_compare (enum rtx_code rcode,
+ rtx op0, rtx op1,
+ enum machine_mode dmode)
+{
+ int vec_cmp_insn;
+ rtx mask;
+ enum machine_mode dest_mode;
+ enum machine_mode op_mode = GET_MODE (op1);
+
+ gcc_assert (TARGET_ALTIVEC);
+ gcc_assert (GET_MODE (op0) == GET_MODE (op1));
+
+ /* Floating point vector compare instructions uses destination V4SImode.
+ Move destination to appropriate mode later. */
+ if (dmode == V4SFmode)
+ dest_mode = V4SImode;
+ else
+ dest_mode = dmode;
+
+ mask = gen_reg_rtx (dest_mode);
+ vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+
+ if (vec_cmp_insn == INSN_NOT_AVAILABLE)
+ {
+ bool swap_operands = false;
+ bool try_again = false;
+ switch (rcode)
+ {
+ case LT:
+ rcode = GT;
+ swap_operands = true;
+ try_again = true;
+ break;
+ case LTU:
+ rcode = GTU;
+ swap_operands = true;
+ try_again = true;
+ break;
+ case NE:
+ /* APPLE LOCAL begin mainline 2007-01-18 4875159 */
+ case UNLE:
+ case UNLT:
+ case UNGE:
+ case UNGT:
+ /* Invert condition and try again.
+ e.g., A != B becomes ~(A==B). */
+ {
+ enum rtx_code rev_code;
+ enum insn_code nor_code;
+ rtx eq_rtx;
+
+ rev_code = reverse_condition_maybe_unordered (rcode);
+ eq_rtx = rs6000_emit_vector_compare (rev_code, op0, op1,
+ dest_mode);
+
+ nor_code = one_cmpl_optab->handlers[(int)dest_mode].insn_code;
+ gcc_assert (nor_code != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
+
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+ }
+ break;
+ /* APPLE LOCAL end mainline 2007-01-18 4875159 */
+ case GE:
+ case GEU:
+ case LE:
+ case LEU:
+ /* Try GT/GTU/LT/LTU OR EQ */
+ {
+ rtx c_rtx, eq_rtx;
+ enum insn_code ior_code;
+ enum rtx_code new_code;
+
+ switch (rcode)
+ {
+ case GE:
+ new_code = GT;
+ break;
+
+ case GEU:
+ new_code = GTU;
+ break;
+
+ case LE:
+ new_code = LT;
+ break;
+
+ case LEU:
+ new_code = LTU;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ c_rtx = rs6000_emit_vector_compare (new_code,
+ op0, op1, dest_mode);
+ eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
+ dest_mode);
+
+ ior_code = ior_optab->handlers[(int)dest_mode].insn_code;
+ gcc_assert (ior_code != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (try_again)
+ {
+ vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+ /* You only get two chances. */
+ gcc_assert (vec_cmp_insn != INSN_NOT_AVAILABLE);
+ }
+
+ if (swap_operands)
+ {
+ rtx tmp;
+ tmp = op0;
+ op0 = op1;
+ op1 = tmp;
+ }
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, mask,
+ gen_rtx_UNSPEC (dest_mode,
+ gen_rtvec (2, op0, op1),
+ vec_cmp_insn)));
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+}
+
+/* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
+ valid insn doesn exist for given mode. */
+
+static int
+get_vsel_insn (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V4SImode:
+ return UNSPEC_VSEL4SI;
+ break;
+ case V4SFmode:
+ return UNSPEC_VSEL4SF;
+ break;
+ case V8HImode:
+ return UNSPEC_VSEL8HI;
+ break;
+ case V16QImode:
+ return UNSPEC_VSEL16QI;
+ break;
+ default:
+ return INSN_NOT_AVAILABLE;
+ break;
+ }
+ return INSN_NOT_AVAILABLE;
+}
+
+/* Emit vector select insn where DEST is destination using
+ operands OP1, OP2 and MASK. */
+
+static void
+rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
+{
+ rtx t, temp;
+ enum machine_mode dest_mode = GET_MODE (dest);
+ int vsel_insn_index = get_vsel_insn (GET_MODE (dest));
+
+ temp = gen_reg_rtx (dest_mode);
+
+ /* For each vector element, select op1 when mask is 1 otherwise
+ select op2. */
+ t = gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_UNSPEC (dest_mode,
+ gen_rtvec (3, op2, op1, mask),
+ vsel_insn_index));
+ emit_insn (t);
+ emit_move_insn (dest, temp);
+ return;
+}
+
+/* Emit vector conditional expression.
+ DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
+ CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
+
+int
+rs6000_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
+ rtx cond, rtx cc_op0, rtx cc_op1)
+{
+ enum machine_mode dest_mode = GET_MODE (dest);
+ enum rtx_code rcode = GET_CODE (cond);
+ rtx mask;
+
+ if (!TARGET_ALTIVEC)
+ return 0;
+
+ /* Get the vector mask for the given relational operations. */
+ mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
+
+ rs6000_emit_vector_select (dest, op1, op2, mask);
+
+ return 1;
+}
+
+/* Emit a conditional move: move TRUE_COND to DEST if OP of the
+ operands of the last comparison is nonzero/true, FALSE_COND if it
+ is zero/false. Return 0 if the hardware has no such operation. */
+
+int
+rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
+{
+ enum rtx_code code = GET_CODE (op);
+ rtx op0 = rs6000_compare_op0;
+ rtx op1 = rs6000_compare_op1;
+ REAL_VALUE_TYPE c1;
+ enum machine_mode compare_mode = GET_MODE (op0);
+ enum machine_mode result_mode = GET_MODE (dest);
+ rtx temp;
+ bool is_against_zero;
+
+ /* These modes should always match. */
+ if (GET_MODE (op1) != compare_mode
+ /* In the isel case however, we can use a compare immediate, so
+ op1 may be a small constant. */
+ && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
+ return 0;
+ if (GET_MODE (true_cond) != result_mode)
+ return 0;
+ if (GET_MODE (false_cond) != result_mode)
+ return 0;
+
+ /* First, work out if the hardware can do this at all, or
+ if it's too slow.... */
+ if (! rs6000_compare_fp_p)
+ {
+ if (TARGET_ISEL)
+ return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
+ return 0;
+ }
+ else if (TARGET_E500 && TARGET_HARD_FLOAT && !TARGET_FPRS
+ && SCALAR_FLOAT_MODE_P (compare_mode))
+ return 0;
+
+ is_against_zero = op1 == CONST0_RTX (compare_mode);
+
+ /* A floating-point subtract might overflow, underflow, or produce
+ an inexact result, thus changing the floating-point flags, so it
+ can't be generated if we care about that. It's safe if one side
+ of the construct is zero, since then no subtract will be
+ generated. */
+ if (SCALAR_FLOAT_MODE_P (compare_mode)
+ && flag_trapping_math && ! is_against_zero)
+ return 0;
+
+ /* Eliminate half of the comparisons by switching operands, this
+ makes the remaining code simpler. */
+ if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
+ || code == LTGT || code == LT || code == UNLE)
+ {
+ code = reverse_condition_maybe_unordered (code);
+ temp = true_cond;
+ true_cond = false_cond;
+ false_cond = temp;
+ }
+
+ /* UNEQ and LTGT take four instructions for a comparison with zero,
+ it'll probably be faster to use a branch here too. */
+ if (code == UNEQ && HONOR_NANS (compare_mode))
+ return 0;
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
+
+ /* We're going to try to implement comparisons by performing
+ a subtract, then comparing against zero. Unfortunately,
+ Inf - Inf is NaN which is not zero, and so if we don't
+ know that the operand is finite and the comparison
+ would treat EQ different to UNORDERED, we can't do it. */
+ if (HONOR_INFINITIES (compare_mode)
+ && code != GT && code != UNGE
+ && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
+ /* Constructs of the form (a OP b ? a : b) are safe. */
+ && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
+ || (! rtx_equal_p (op0, true_cond)
+ && ! rtx_equal_p (op1, true_cond))))
+ return 0;
+
+ /* At this point we know we can use fsel. */
+
+ /* Reduce the comparison to a comparison against zero. */
+ if (! is_against_zero)
+ {
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_MINUS (compare_mode, op0, op1)));
+ op0 = temp;
+ op1 = CONST0_RTX (compare_mode);
+ }
+
+ /* If we don't care about NaNs we can reduce some of the comparisons
+ down to faster ones. */
+ if (! HONOR_NANS (compare_mode))
+ switch (code)
+ {
+ case GT:
+ code = LE;
+ temp = true_cond;
+ true_cond = false_cond;
+ false_cond = temp;
+ break;
+ case UNGE:
+ code = GE;
+ break;
+ case UNEQ:
+ code = EQ;
+ break;
+ default:
+ break;
+ }
+
+ /* Now, reduce everything down to a GE. */
+ switch (code)
+ {
+ case GE:
+ break;
+
+ case LE:
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ case ORDERED:
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ case EQ:
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_NEG (compare_mode,
+ gen_rtx_ABS (compare_mode, op0))));
+ op0 = temp;
+ break;
+
+ case UNGE:
+ /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
+ temp = gen_reg_rtx (result_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ false_cond = true_cond;
+ true_cond = temp;
+
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ case GT:
+ /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
+ temp = gen_reg_rtx (result_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ true_cond = false_cond;
+ false_cond = temp;
+
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ return 1;
+}
+
+/* Same as above, but for ints (isel). */
+
+static int
+rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
+{
+ rtx condition_rtx, cr;
+
+ /* All isel implementations thus far are 32-bits. */
+ if (GET_MODE (rs6000_compare_op0) != SImode)
+ return 0;
+
+ /* We still have to do the compare, because isel doesn't do a
+ compare, it just looks at the CRx bits set by a previous compare
+ instruction. */
+ condition_rtx = rs6000_generate_compare (GET_CODE (op));
+ cr = XEXP (condition_rtx, 0);
+
+ if (GET_MODE (cr) == CCmode)
+ emit_insn (gen_isel_signed (dest, condition_rtx,
+ true_cond, false_cond, cr));
+ else
+ emit_insn (gen_isel_unsigned (dest, condition_rtx,
+ true_cond, false_cond, cr));
+
+ return 1;
+}
+
+const char *
+output_isel (rtx *operands)
+{
+ enum rtx_code code;
+
+ code = GET_CODE (operands[1]);
+ if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
+ {
+ PUT_CODE (operands[1], reverse_condition (code));
+ return "isel %0,%3,%2,%j1";
+ }
+ else
+ return "isel %0,%2,%3,%j1";
+}
+
+void
+rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
+{
+ enum machine_mode mode = GET_MODE (op0);
+ enum rtx_code c;
+ rtx target;
+
+ if (code == SMAX || code == SMIN)
+ c = GE;
+ else
+ c = GEU;
+
+ if (code == SMAX || code == UMAX)
+ target = emit_conditional_move (dest, c, op0, op1, mode,
+ op0, op1, mode, 0);
+ else
+ target = emit_conditional_move (dest, c, op0, op1, mode,
+ op1, op0, mode, 0);
+ /* APPLE LOCAL begin pragma fenv 3943021 */
+ if (flag_trapping_math && target == NULL_RTX)
+ return;
+ /* APPLE LOCAL end pragma fenv 3943021 */
+ gcc_assert (target);
+ if (target != dest)
+ emit_move_insn (dest, target);
+}
+
+/* Emit instructions to perform a load-reserved/store-conditional operation.
+ The operation performed is an atomic
+ (set M (CODE:MODE M OP))
+ If not NULL, BEFORE is atomically set to M before the operation, and
+ AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
+ If SYNC_P then a memory barrier is emitted before the operation.
+ Either OP or M may be wrapped in a NOT operation. */
+
+void
+rs6000_emit_sync (enum rtx_code code, enum machine_mode mode,
+ rtx m, rtx op, rtx before_param, rtx after_param,
+ bool sync_p)
+{
+ enum machine_mode used_mode;
+ rtx the_op, set_before, set_after, set_atomic, cc_scratch, before, after;
+ rtx used_m;
+ rtvec vec;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+ rtx shift = NULL_RTX;
+
+ if (sync_p)
+ emit_insn (gen_memory_barrier ());
+
+ if (GET_CODE (m) == NOT)
+ used_m = XEXP (m, 0);
+ else
+ used_m = m;
+
+ /* If this is smaller than SImode, we'll have to use SImode with
+ adjustments. */
+ if (mode == QImode || mode == HImode)
+ {
+ rtx newop, oldop;
+
+ if (MEM_ALIGN (used_m) >= 32)
+ {
+ int ishift = 0;
+ if (BYTES_BIG_ENDIAN)
+ ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
+
+ shift = GEN_INT (ishift);
+ }
+ else
+ {
+ rtx addrSI, aligned_addr;
+ int shift_mask = mode == QImode ? 0x18 : 0x10;
+
+ addrSI = force_reg (SImode, gen_lowpart_common (SImode,
+ XEXP (used_m, 0)));
+ shift = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ aligned_addr = expand_binop (Pmode, and_optab,
+ XEXP (used_m, 0),
+ GEN_INT (-4), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ used_m = change_address (used_m, SImode, aligned_addr);
+ set_mem_align (used_m, 32);
+ /* It's safe to keep the old alias set of USED_M, because
+ the operation is atomic and only affects the original
+ USED_M. */
+ if (GET_CODE (m) == NOT)
+ m = gen_rtx_NOT (SImode, used_m);
+ else
+ m = used_m;
+ }
+
+ if (GET_CODE (op) == NOT)
+ {
+ oldop = lowpart_subreg (SImode, XEXP (op, 0), mode);
+ oldop = gen_rtx_NOT (SImode, oldop);
+ }
+ else
+ oldop = lowpart_subreg (SImode, op, mode);
+
+ switch (code)
+ {
+ case IOR:
+ case XOR:
+ newop = expand_binop (SImode, and_optab,
+ oldop, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newop, newop, shift));
+ break;
+
+ case AND:
+ newop = expand_binop (SImode, ior_optab,
+ oldop, GEN_INT (~imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_rotlsi3 (newop, newop, shift));
+ break;
+
+ case PLUS:
+ case MINUS:
+ {
+ rtx mask;
+
+ newop = expand_binop (SImode, and_optab,
+ oldop, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newop, newop, shift));
+
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ if (code == PLUS)
+ newop = gen_rtx_PLUS (SImode, m, newop);
+ else
+ newop = gen_rtx_MINUS (SImode, m, newop);
+ newop = gen_rtx_AND (SImode, newop, mask);
+ newop = gen_rtx_IOR (SImode, newop,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask),
+ m));
+ break;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (GET_CODE (m) == NOT)
+ {
+ rtx mask, xorm;
+
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ xorm = gen_rtx_XOR (SImode, used_m, mask);
+ /* Depending on the value of 'op', the XOR or the operation might
+ be able to be simplified away. */
+ newop = simplify_gen_binary (code, SImode, xorm, newop);
+ }
+ op = newop;
+ used_mode = SImode;
+ before = gen_reg_rtx (used_mode);
+ after = gen_reg_rtx (used_mode);
+ }
+ else
+ {
+ used_mode = mode;
+ before = before_param;
+ after = after_param;
+
+ if (before == NULL_RTX)
+ before = gen_reg_rtx (used_mode);
+ if (after == NULL_RTX)
+ after = gen_reg_rtx (used_mode);
+ }
+
+ if ((code == PLUS || code == MINUS || GET_CODE (m) == NOT)
+ && used_mode != mode)
+ the_op = op; /* Computed above. */
+ else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
+ the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
+ else
+ the_op = gen_rtx_fmt_ee (code, used_mode, m, op);
+
+ set_after = gen_rtx_SET (VOIDmode, after, the_op);
+ set_before = gen_rtx_SET (VOIDmode, before, used_m);
+ set_atomic = gen_rtx_SET (VOIDmode, used_m,
+ gen_rtx_UNSPEC (used_mode,
+ gen_rtvec (1, the_op),
+ UNSPEC_SYNC_OP));
+ cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
+
+ if ((code == PLUS || code == MINUS) && used_mode != mode)
+ vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ else
+ vec = gen_rtvec (4, set_after, set_before, set_atomic, cc_scratch);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+
+ /* Shift and mask the return values properly. */
+ if (used_mode != mode && before_param)
+ {
+ emit_insn (gen_lshrsi3 (before, before, shift));
+ convert_move (before_param, before, 1);
+ }
+
+ if (used_mode != mode && after_param)
+ {
+ emit_insn (gen_lshrsi3 (after, after, shift));
+ convert_move (after_param, after, 1);
+ }
+
+ /* The previous sequence will end with a branch that's dependent on
+ the conditional store, so placing an isync will ensure that no
+ other instructions (especially, no load or store instructions)
+ can start before the atomic operation completes. */
+ if (sync_p)
+ emit_insn (gen_isync ());
+}
+
+/* A subroutine of the atomic operation splitters. Jump to LABEL if
+ COND is true. Mark the jump as unlikely to be taken. */
+
+static void
+emit_unlikely_jump (rtx cond, rtx label)
+{
+ rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
+ rtx x;
+
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
+}
+
+/* A subroutine of the atomic operation splitters. Emit a load-locked
+ instruction in MODE. */
+
+static void
+emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
+{
+ rtx (*fn) (rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_load_locked_si;
+ else if (mode == DImode)
+ fn = gen_load_locked_di;
+ emit_insn (fn (reg, mem));
+}
+
+/* A subroutine of the atomic operation splitters. Emit a store-conditional
+ instruction in MODE. */
+
+static void
+emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
+{
+ rtx (*fn) (rtx, rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_store_conditional_si;
+ else if (mode == DImode)
+ fn = gen_store_conditional_di;
+
+ /* Emit sync before stwcx. to address PPC405 Erratum. */
+ if (PPC405_ERRATUM77)
+ emit_insn (gen_memory_barrier ());
+
+ emit_insn (fn (res, mem, val));
+}
+
+/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+ to perform. MEM is the memory on which to operate. VAL is the second
+ operand of the binary operator. BEFORE and AFTER are optional locations to
+ return the value of MEM either before of after the operation. SCRATCH is
+ a scratch register. */
+
+void
+rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
+ rtx before, rtx after, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ label = gen_rtx_LABEL_REF (VOIDmode, label);
+
+ if (before == NULL_RTX)
+ before = scratch;
+ emit_load_locked (mode, before, mem);
+
+ if (code == NOT)
+ x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
+ else if (code == AND)
+ x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
+ else
+ x = gen_rtx_fmt_ee (code, mode, before, val);
+
+ if (after != NULL_RTX)
+ emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
+/* Expand an atomic compare and swap operation. MEM is the memory on which
+ to operate. OLDVAL is the old value to be compared. NEWVAL is the new
+ value to be stored. SCRATCH is a scratch GPR. */
+
+void
+rs6000_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
+ rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (mode, retval, mem);
+
+ x = gen_rtx_COMPARE (CCmode, retval, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ emit_move_insn (scratch, newval);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+/* Expand an atomic test and set operation. MEM is the memory on which
+ to operate. VAL is the value set. SCRATCH is a scratch GPR. */
+
+void
+rs6000_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+
+ label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label, 0));
+
+ emit_load_locked (mode, retval, mem);
+ emit_move_insn (scratch, val);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
+void
+rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx addrSI, align, wdst, shift, mask;
+ HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+
+ /* Shift amount for subword relative to aligned word. */
+ addrSI = force_reg (SImode, gen_lowpart_common (SImode, XEXP (mem, 0)));
+ shift = gen_reg_rtx (SImode);
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ /* Shift and mask old value into position within word. */
+ oldval = convert_modes (SImode, mode, oldval, 1);
+ oldval = expand_binop (SImode, and_optab,
+ oldval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (oldval, oldval, shift));
+
+ /* Shift and mask new value into position within word. */
+ newval = convert_modes (SImode, mode, newval, 1);
+ newval = expand_binop (SImode, and_optab,
+ newval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newval, newval, shift));
+
+ /* Mask for insertion. */
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ /* Address of aligned word containing subword. */
+ align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ mem = change_address (mem, SImode, align);
+ set_mem_align (mem, 32);
+ MEM_VOLATILE_P (mem) = 1;
+
+ wdst = gen_reg_rtx (SImode);
+ emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
+ oldval, newval, mem));
+
+ emit_move_insn (dst, gen_lowpart (mode, wdst));
+}
+
+void
+rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
+ rtx oldval, rtx newval, rtx mem,
+ rtx scratch)
+{
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_memory_barrier ());
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (SImode, scratch, mem);
+
+ /* Mask subword within loaded value for comparison with oldval.
+ Use UNSPEC_AND to avoid clobber.*/
+ emit_insn (gen_rtx_SET (SImode, dest,
+ gen_rtx_UNSPEC (SImode,
+ gen_rtvec (2, scratch, mask),
+ UNSPEC_AND)));
+
+ x = gen_rtx_COMPARE (CCmode, dest, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ /* Clear subword within loaded value for insertion of new value. */
+ emit_insn (gen_rtx_SET (SImode, scratch,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask), scratch)));
+ emit_insn (gen_iorsi3 (scratch, scratch, newval));
+ emit_store_conditional (SImode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+
+ /* Emit instructions to move SRC to DST. Called by splitters for
+ multi-register moves. It will emit at most one instruction for
+ each register that is accessed; that is, it won't emit li/lis pairs
+ (or equivalent for 64-bit code). One of SRC or DST must be a hard
+ register. */
+
+void
+rs6000_split_multireg_move (rtx dst, rtx src)
+{
+ /* The register number of the first register being moved. */
+ int reg;
+ /* The mode that is to be moved. */
+ enum machine_mode mode;
+ /* The mode that the move is being done in, and its size. */
+ enum machine_mode reg_mode;
+ int reg_mode_size;
+ /* The number of registers that will be moved. */
+ int nregs;
+
+ reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
+ mode = GET_MODE (dst);
+ nregs = hard_regno_nregs[reg][mode];
+ if (FP_REGNO_P (reg))
+ reg_mode = DFmode;
+ else if (ALTIVEC_REGNO_P (reg))
+ reg_mode = V16QImode;
+ else if (TARGET_E500_DOUBLE && mode == TFmode)
+ reg_mode = DFmode;
+ else
+ reg_mode = word_mode;
+ reg_mode_size = GET_MODE_SIZE (reg_mode);
+
+ gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
+
+ if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
+ {
+ /* Move register range backwards, if we might have destructive
+ overlap. */
+ int i;
+ for (i = nregs - 1; i >= 0; i--)
+ emit_insn (gen_rtx_SET (VOIDmode,
+ simplify_gen_subreg (reg_mode, dst, mode,
+ i * reg_mode_size),
+ simplify_gen_subreg (reg_mode, src, mode,
+ i * reg_mode_size)));
+ }
+ else
+ {
+ int i;
+ int j = -1;
+ bool used_update = false;
+
+ if (MEM_P (src) && INT_REGNO_P (reg))
+ {
+ rtx breg;
+
+ if (GET_CODE (XEXP (src, 0)) == PRE_INC
+ || GET_CODE (XEXP (src, 0)) == PRE_DEC)
+ {
+ rtx delta_rtx;
+ breg = XEXP (XEXP (src, 0), 0);
+ delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (breg, breg, delta_rtx)
+ : gen_adddi3 (breg, breg, delta_rtx));
+ src = replace_equiv_address (src, breg);
+ }
+ else if (! rs6000_offsettable_memref_p (src))
+ {
+ rtx basereg;
+ basereg = gen_rtx_REG (Pmode, reg);
+ emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
+ src = replace_equiv_address (src, basereg);
+ }
+
+ breg = XEXP (src, 0);
+ if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
+ breg = XEXP (breg, 0);
+
+ /* If the base register we are using to address memory is
+ also a destination reg, then change that register last. */
+ if (REG_P (breg)
+ && REGNO (breg) >= REGNO (dst)
+ && REGNO (breg) < REGNO (dst) + nregs)
+ j = REGNO (breg) - REGNO (dst);
+ }
+
+ if (GET_CODE (dst) == MEM && INT_REGNO_P (reg))
+ {
+ rtx breg;
+
+ if (GET_CODE (XEXP (dst, 0)) == PRE_INC
+ || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
+ {
+ rtx delta_rtx;
+ breg = XEXP (XEXP (dst, 0), 0);
+ delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
+
+ /* We have to update the breg before doing the store.
+ Use store with update, if available. */
+
+ if (TARGET_UPDATE)
+ {
+ rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
+ emit_insn (TARGET_32BIT
+ ? (TARGET_POWERPC64
+ ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
+ : gen_movsi_update (breg, breg, delta_rtx, nsrc))
+ : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
+ used_update = true;
+ }
+ else
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (breg, breg, delta_rtx)
+ : gen_adddi3 (breg, breg, delta_rtx));
+ dst = replace_equiv_address (dst, breg);
+ }
+ else
+ gcc_assert (rs6000_offsettable_memref_p (dst));
+ }
+
+ for (i = 0; i < nregs; i++)
+ {
+ /* Calculate index to next subword. */
+ ++j;
+ if (j == nregs)
+ j = 0;
+
+ /* If compiler already emitted move of first word by
+ store with update, no need to do anything. */
+ if (j == 0 && used_update)
+ continue;
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ simplify_gen_subreg (reg_mode, dst, mode,
+ j * reg_mode_size),
+ simplify_gen_subreg (reg_mode, src, mode,
+ j * reg_mode_size)));
+ }
+ }
+}
+
+
+/* This page contains routines that are used to determine what the
+ function prologue and epilogue code will do and write them out. */
+
+/* Return the first fixed-point register that is required to be
+ saved. 32 if none. */
+
+int
+first_reg_to_save (void)
+{
+ int first_reg;
+
+ /* Find lowest numbered live register. */
+ for (first_reg = 13; first_reg <= 31; first_reg++)
+ if (regs_ever_live[first_reg]
+ && (! call_used_regs[first_reg]
+ || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
+ break;
+
+#if TARGET_MACHO
+ if (flag_pic
+ && current_function_uses_pic_offset_table
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ && cfun->machine->substitute_pic_base_reg == INVALID_REGNUM
+ && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
+ return RS6000_PIC_OFFSET_TABLE_REGNUM;
+#endif
+
+ return first_reg;
+}
+
+/* Similar, for FP regs. */
+
+int
+first_fp_reg_to_save (void)
+{
+ int first_reg;
+
+ /* Find lowest numbered live register. */
+ for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
+ if (regs_ever_live[first_reg])
+ break;
+
+ return first_reg;
+}
+
+/* Similar, for AltiVec regs. */
+
+static int
+first_altivec_reg_to_save (void)
+{
+ int i;
+
+ /* Stack frame remains as is unless we are in AltiVec ABI. */
+ if (! TARGET_ALTIVEC_ABI)
+ return LAST_ALTIVEC_REGNO + 1;
+
+ /* On Darwin, the unwind routines are compiled without
+ TARGET_ALTIVEC, and use save_world to save/restore the
+ altivec registers when necessary. */
+ if (DEFAULT_ABI == ABI_DARWIN && current_function_calls_eh_return
+ && ! TARGET_ALTIVEC)
+ return FIRST_ALTIVEC_REGNO + 20;
+
+ /* Find lowest numbered live register. */
+ for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (regs_ever_live[i])
+ break;
+
+ return i;
+}
+
+/* Return a 32-bit mask of the AltiVec registers we need to set in
+ VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
+ the 32-bit word is 0. */
+
+static unsigned int
+compute_vrsave_mask (void)
+{
+ unsigned int i, mask = 0;
+
+ /* On Darwin, the unwind routines are compiled without
+ TARGET_ALTIVEC, and use save_world to save/restore the
+ call-saved altivec registers when necessary. */
+ if (DEFAULT_ABI == ABI_DARWIN && current_function_calls_eh_return
+ && ! TARGET_ALTIVEC)
+ mask |= 0xFFF;
+
+ /* First, find out if we use _any_ altivec registers. */
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (regs_ever_live[i])
+ mask |= ALTIVEC_REG_BIT (i);
+
+ if (mask == 0)
+ return mask;
+
+ /* Next, remove the argument registers from the set. These must
+ be in the VRSAVE mask set by the caller, so we don't need to add
+ them in again. More importantly, the mask we compute here is
+ used to generate CLOBBERs in the set_vrsave insn, and we do not
+ wish the argument registers to die. */
+ for (i = cfun->args_info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
+ mask &= ~ALTIVEC_REG_BIT (i);
+
+ /* Similarly, remove the return value from the set. */
+ {
+ bool yes = false;
+ diddle_return_value (is_altivec_return_reg, &yes);
+ if (yes)
+ mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
+ }
+
+ return mask;
+}
+
+/* For a very restricted set of circumstances, we can cut down the
+ size of prologues/epilogues by calling our own save/restore-the-world
+ routines. */
+
+static void
+compute_save_world_info (rs6000_stack_t *info_ptr)
+{
+ info_ptr->world_save_p = 1;
+ info_ptr->world_save_p
+ = (WORLD_SAVE_P (info_ptr)
+ && DEFAULT_ABI == ABI_DARWIN
+ && ! (current_function_calls_setjmp && flag_exceptions)
+ && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
+ && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
+ && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
+ && info_ptr->cr_save_p);
+
+ /* This will not work in conjunction with sibcalls. Make sure there
+ are none. (This check is expensive, but seldom executed.) */
+ if (WORLD_SAVE_P (info_ptr))
+ {
+ rtx insn;
+ for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
+ if ( GET_CODE (insn) == CALL_INSN
+ && SIBLING_CALL_P (insn))
+ {
+ info_ptr->world_save_p = 0;
+ break;
+ }
+ }
+
+ if (WORLD_SAVE_P (info_ptr))
+ {
+ /* Even if we're not touching VRsave, make sure there's room on the
+ stack for it, if it looks like we're calling SAVE_WORLD, which
+ will attempt to save it. */
+ info_ptr->vrsave_size = 4;
+
+ /* "Save" the VRsave register too if we're saving the world. */
+ if (info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+
+ /* Because the Darwin register save/restore routines only handle
+ F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
+ check. */
+ gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
+ && (info_ptr->first_altivec_reg_save
+ >= FIRST_SAVED_ALTIVEC_REGNO));
+ }
+ return;
+}
+
+
+static void
+is_altivec_return_reg (rtx reg, void *xyes)
+{
+ bool *yes = (bool *) xyes;
+ if (REGNO (reg) == ALTIVEC_ARG_RETURN)
+ *yes = true;
+}
+
+
+/* Calculate the stack information for the current function. This is
+ complicated by having two separate calling sequences, the AIX calling
+ sequence and the V.4 calling sequence.
+
+ AIX (and Darwin/Mac OS X) stack frames look like:
+ 32-bit 64-bit
+ SP----> +---------------------------------------+
+ | back chain to caller | 0 0
+ +---------------------------------------+
+ | saved CR | 4 8 (8-11)
+ +---------------------------------------+
+ | saved LR | 8 16
+ +---------------------------------------+
+ | reserved for compilers | 12 24
+ +---------------------------------------+
+ | reserved for binders | 16 32
+ +---------------------------------------+
+ | saved TOC pointer | 20 40
+ +---------------------------------------+
+ | Parameter save area (P) | 24 48
+ +---------------------------------------+
+ | Alloca space (A) | 24+P etc.
+ +---------------------------------------+
+ | Local variable space (L) | 24+P+A
+ +---------------------------------------+
+ | Float/int conversion temporary (X) | 24+P+A+L
+ +---------------------------------------+
+ | Save area for AltiVec registers (W) | 24+P+A+L+X
+ +---------------------------------------+
+ | AltiVec alignment padding (Y) | 24+P+A+L+X+W
+ +---------------------------------------+
+ | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
+ +---------------------------------------+
+ | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
+ +---------------------------------------+
+ | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
+ +---------------------------------------+
+ old SP->| back chain to caller's caller |
+ +---------------------------------------+
+
+ The required alignment for AIX configurations is two words (i.e., 8
+ or 16 bytes).
+
+
+ V.4 stack frames look like:
+
+ SP----> +---------------------------------------+
+ | back chain to caller | 0
+ +---------------------------------------+
+ | caller's saved LR | 4
+ +---------------------------------------+
+ | Parameter save area (P) | 8
+ +---------------------------------------+
+ | Alloca space (A) | 8+P
+ +---------------------------------------+
+ | Varargs save area (V) | 8+P+A
+ +---------------------------------------+
+ | Local variable space (L) | 8+P+A+V
+ +---------------------------------------+
+ | Float/int conversion temporary (X) | 8+P+A+V+L
+ +---------------------------------------+
+ | Save area for AltiVec registers (W) | 8+P+A+V+L+X
+ +---------------------------------------+
+ | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
+ +---------------------------------------+
+ | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
+ +---------------------------------------+
+ | SPE: area for 64-bit GP registers |
+ +---------------------------------------+
+ | SPE alignment padding |
+ +---------------------------------------+
+ | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
+ +---------------------------------------+
+ | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
+ +---------------------------------------+
+ | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
+ +---------------------------------------+
+ old SP->| back chain to caller's caller |
+ +---------------------------------------+
+
+ The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
+ given. (But note below and in sysv4.h that we require only 8 and
+ may round up the size of our stack frame anyways. The historical
+ reason is early versions of powerpc-linux which didn't properly
+ align the stack at program startup. A happy side-effect is that
+ -mno-eabi libraries can be used with -meabi programs.)
+
+ The EABI configuration defaults to the V.4 layout. However,
+ the stack alignment requirements may differ. If -mno-eabi is not
+ given, the required stack alignment is 8 bytes; if -mno-eabi is
+ given, the required alignment is 16 bytes. (But see V.4 comment
+ above.) */
+
+#ifndef ABI_STACK_BOUNDARY
+#define ABI_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+static rs6000_stack_t *
+rs6000_stack_info (void)
+{
+ static rs6000_stack_t info;
+ rs6000_stack_t *info_ptr = &info;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ int ehrd_size;
+ int save_align;
+ HOST_WIDE_INT non_fixed_size;
+
+ memset (&info, 0, sizeof (info));
+
+ if (TARGET_SPE)
+ {
+ /* Cache value so we don't rescan instruction chain over and over. */
+ if (cfun->machine->insn_chain_scanned_p == 0)
+ cfun->machine->insn_chain_scanned_p
+ = spe_func_has_64bit_regs_p () + 1;
+ info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
+ }
+
+ /* Select which calling sequence. */
+ info_ptr->abi = DEFAULT_ABI;
+
+ /* Calculate which registers need to be saved & save area size. */
+ info_ptr->first_gp_reg_save = first_reg_to_save ();
+ /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
+ even if it currently looks like we won't. */
+ if (((TARGET_TOC && TARGET_MINIMAL_TOC)
+ || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
+ || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
+ && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
+ info_ptr->gp_size = reg_size * (32 - RS6000_PIC_OFFSET_TABLE_REGNUM);
+ else
+ info_ptr->gp_size = reg_size * (32 - info_ptr->first_gp_reg_save);
+
+ /* For the SPE, we have an additional upper 32-bits on each GPR.
+ Ideally we should save the entire 64-bits only when the upper
+ half is used in SIMD instructions. Since we only record
+ registers live (not the size they are used in), this proves
+ difficult because we'd have to traverse the instruction chain at
+ the right time, taking reload into account. This is a real pain,
+ so we opt to save the GPRs in 64-bits always if but one register
+ gets used in 64-bits. Otherwise, all the registers in the frame
+ get saved in 32-bits.
+
+ So... since when we save all GPRs (except the SP) in 64-bits, the
+ traditional GP save area will be empty. */
+ if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
+ info_ptr->gp_size = 0;
+
+ info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
+ info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
+
+ info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
+ info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
+ - info_ptr->first_altivec_reg_save);
+
+ /* Does this function call anything? */
+ info_ptr->calls_p = (! current_function_is_leaf
+ || cfun->machine->ra_needs_full_frame);
+
+ /* Determine if we need to save the link register. */
+ if ((DEFAULT_ABI == ABI_AIX
+ && current_function_profile
+ && !TARGET_PROFILE_KERNEL)
+#ifdef TARGET_RELOCATABLE
+ || (TARGET_RELOCATABLE && (get_pool_size () != 0))
+#endif
+ || (info_ptr->first_fp_reg_save != 64
+ && !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
+ || info_ptr->first_altivec_reg_save <= LAST_ALTIVEC_REGNO
+ || (DEFAULT_ABI == ABI_V4 && current_function_calls_alloca)
+ || info_ptr->calls_p
+ || rs6000_ra_ever_killed ())
+ {
+ info_ptr->lr_save_p = 1;
+ regs_ever_live[LINK_REGISTER_REGNUM] = 1;
+ }
+
+ /* Determine if we need to save the condition code registers. */
+ if (regs_ever_live[CR2_REGNO]
+ || regs_ever_live[CR3_REGNO]
+ || regs_ever_live[CR4_REGNO])
+ {
+ info_ptr->cr_save_p = 1;
+ if (DEFAULT_ABI == ABI_V4)
+ info_ptr->cr_size = reg_size;
+ }
+
+ /* If the current function calls __builtin_eh_return, then we need
+ to allocate stack space for registers that will hold data for
+ the exception handler. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i;
+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
+ continue;
+
+ /* SPE saves EH registers in 64-bits. */
+ ehrd_size = i * (TARGET_SPE_ABI
+ && info_ptr->spe_64bit_regs_used != 0
+ ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
+ }
+ else
+ ehrd_size = 0;
+
+ /* Determine various sizes. */
+ info_ptr->reg_size = reg_size;
+ info_ptr->fixed_size = RS6000_SAVE_AREA;
+ info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
+ info_ptr->parm_size = RS6000_ALIGN (current_function_outgoing_args_size,
+ TARGET_ALTIVEC ? 16 : 8);
+ if (FRAME_GROWS_DOWNWARD)
+ info_ptr->vars_size
+ += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
+ + info_ptr->parm_size,
+ ABI_STACK_BOUNDARY / BITS_PER_UNIT)
+ - (info_ptr->fixed_size + info_ptr->vars_size
+ + info_ptr->parm_size);
+
+ if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
+ info_ptr->spe_gp_size = 8 * (32 - info_ptr->first_gp_reg_save);
+ else
+ info_ptr->spe_gp_size = 0;
+
+ if (TARGET_ALTIVEC_ABI)
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+ else
+ info_ptr->vrsave_mask = 0;
+
+ if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
+ info_ptr->vrsave_size = 4;
+ else
+ info_ptr->vrsave_size = 0;
+
+ compute_save_world_info (info_ptr);
+
+ /* Calculate the offsets. */
+ switch (DEFAULT_ABI)
+ {
+ case ABI_NONE:
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ case ABI_DARWIN:
+ info_ptr->fp_save_offset = - info_ptr->fp_size;
+ info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
+
+ if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_save_offset
+ = info_ptr->gp_save_offset - info_ptr->vrsave_size;
+
+ /* Align stack so vector save area is on a quadword boundary.
+ The padding goes above the vectors. */
+ if (info_ptr->altivec_size != 0)
+ info_ptr->altivec_padding_size
+ = info_ptr->vrsave_save_offset & 0xF;
+ else
+ info_ptr->altivec_padding_size = 0;
+
+ info_ptr->altivec_save_offset
+ = info_ptr->vrsave_save_offset
+ - info_ptr->altivec_padding_size
+ - info_ptr->altivec_size;
+ gcc_assert (info_ptr->altivec_size == 0
+ || info_ptr->altivec_save_offset % 16 == 0);
+
+ /* Adjust for AltiVec case. */
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
+ }
+ else
+ info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
+ info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
+ info_ptr->lr_save_offset = 2*reg_size;
+ break;
+
+ case ABI_V4:
+ info_ptr->fp_save_offset = - info_ptr->fp_size;
+ info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
+ info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
+
+ if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
+ {
+ /* Align stack so SPE GPR save area is aligned on a
+ double-word boundary. */
+ if (info_ptr->spe_gp_size != 0)
+ info_ptr->spe_padding_size
+ = 8 - (-info_ptr->cr_save_offset % 8);
+ else
+ info_ptr->spe_padding_size = 0;
+
+ info_ptr->spe_gp_save_offset
+ = info_ptr->cr_save_offset
+ - info_ptr->spe_padding_size
+ - info_ptr->spe_gp_size;
+
+ /* Adjust for SPE case. */
+ info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
+ }
+ else if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_save_offset
+ = info_ptr->cr_save_offset - info_ptr->vrsave_size;
+
+ /* Align stack so vector save area is on a quadword boundary. */
+ if (info_ptr->altivec_size != 0)
+ info_ptr->altivec_padding_size
+ = 16 - (-info_ptr->vrsave_save_offset % 16);
+ else
+ info_ptr->altivec_padding_size = 0;
+
+ info_ptr->altivec_save_offset
+ = info_ptr->vrsave_save_offset
+ - info_ptr->altivec_padding_size
+ - info_ptr->altivec_size;
+
+ /* Adjust for AltiVec case. */
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
+ }
+ else
+ info_ptr->ehrd_offset = info_ptr->cr_save_offset;
+ info_ptr->ehrd_offset -= ehrd_size;
+ info_ptr->lr_save_offset = reg_size;
+ break;
+ }
+
+ save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
+ info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
+ + info_ptr->gp_size
+ + info_ptr->altivec_size
+ + info_ptr->altivec_padding_size
+ + info_ptr->spe_gp_size
+ + info_ptr->spe_padding_size
+ + ehrd_size
+ + info_ptr->cr_size
+ + info_ptr->vrsave_size,
+ save_align);
+
+ non_fixed_size = (info_ptr->vars_size
+ + info_ptr->parm_size
+ + info_ptr->save_size);
+
+ /* APPLE LOCAL begin CW asm blocks */
+ /* If we have an assembly function, maybe use an explicit size. To
+ be consistent with CW behavior (and because it's safer), let
+ RS6000_ALIGN round the explicit size up if necessary. */
+ if (cfun->iasm_asm_function && cfun->iasm_frame_size != -2)
+ {
+ if (cfun->iasm_frame_size == -1)
+ non_fixed_size = 32;
+ else if (cfun->iasm_frame_size < 32)
+ error ("fralloc frame size must be at least 32");
+ else
+ non_fixed_size = cfun->iasm_frame_size;
+ non_fixed_size += 24;
+ info_ptr->total_size = RS6000_ALIGN (non_fixed_size,
+ ABI_STACK_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
+ ABI_STACK_BOUNDARY / BITS_PER_UNIT);
+ /* APPLE LOCAL end CW asm blocks */
+
+ /* Determine if we need to allocate any stack frame:
+
+ For AIX we need to push the stack if a frame pointer is needed
+ (because the stack might be dynamically adjusted), if we are
+ debugging, if we make calls, or if the sum of fp_save, gp_save,
+ and local variables are more than the space needed to save all
+ non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
+ + 18*8 = 288 (GPR13 reserved).
+
+ For V.4 we don't have the stack cushion that AIX uses, but assume
+ that the debugger can handle stackless frames. */
+
+ /* APPLE LOCAL CW asm blocks */
+ if (info_ptr->calls_p || (cfun->iasm_asm_function && cfun->iasm_frame_size != -2))
+ info_ptr->push_p = 1;
+
+ else if (DEFAULT_ABI == ABI_V4)
+ info_ptr->push_p = non_fixed_size != 0;
+
+ else if (frame_pointer_needed)
+ info_ptr->push_p = 1;
+
+ else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
+ info_ptr->push_p = 1;
+
+ else
+ info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
+
+ /* Zero offsets if we're not saving those registers. */
+ if (info_ptr->fp_size == 0)
+ info_ptr->fp_save_offset = 0;
+
+ if (info_ptr->gp_size == 0)
+ info_ptr->gp_save_offset = 0;
+
+ if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
+ info_ptr->altivec_save_offset = 0;
+
+ if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_save_offset = 0;
+
+ if (! TARGET_SPE_ABI
+ || info_ptr->spe_64bit_regs_used == 0
+ || info_ptr->spe_gp_size == 0)
+ info_ptr->spe_gp_save_offset = 0;
+
+ if (! info_ptr->lr_save_p)
+ info_ptr->lr_save_offset = 0;
+
+ if (! info_ptr->cr_save_p)
+ info_ptr->cr_save_offset = 0;
+
+ return info_ptr;
+}
+
+/* Return true if the current function uses any GPRs in 64-bit SIMD
+ mode. */
+
+static bool
+spe_func_has_64bit_regs_p (void)
+{
+ rtx insns, insn;
+
+ /* Functions that save and restore all the call-saved registers will
+ need to save/restore the registers in 64-bits. */
+ if (current_function_calls_eh_return
+ || current_function_calls_setjmp
+ || current_function_has_nonlocal_goto)
+ return true;
+
+ insns = get_insns ();
+
+ for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn))
+ {
+ rtx i;
+
+ /* FIXME: This should be implemented with attributes...
+
+ (set_attr "spe64" "true")....then,
+ if (get_spe64(insn)) return true;
+
+ It's the only reliable way to do the stuff below. */
+
+ i = PATTERN (insn);
+ if (GET_CODE (i) == SET)
+ {
+ enum machine_mode mode = GET_MODE (SET_SRC (i));
+
+ if (SPE_VECTOR_MODE (mode))
+ return true;
+ if (TARGET_E500_DOUBLE && mode == DFmode)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void
+debug_stack_info (rs6000_stack_t *info)
+{
+ const char *abi_string;
+
+ if (! info)
+ info = rs6000_stack_info ();
+
+ fprintf (stderr, "\nStack information for function %s:\n",
+ ((current_function_decl && DECL_NAME (current_function_decl))
+ ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
+ : "<unknown>"));
+
+ switch (info->abi)
+ {
+ default: abi_string = "Unknown"; break;
+ case ABI_NONE: abi_string = "NONE"; break;
+ case ABI_AIX: abi_string = "AIX"; break;
+ case ABI_DARWIN: abi_string = "Darwin"; break;
+ case ABI_V4: abi_string = "V.4"; break;
+ }
+
+ fprintf (stderr, "\tABI = %5s\n", abi_string);
+
+ if (TARGET_ALTIVEC_ABI)
+ fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
+
+ if (TARGET_SPE_ABI)
+ fprintf (stderr, "\tSPE ABI extensions enabled.\n");
+
+ if (info->first_gp_reg_save != 32)
+ fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
+
+ if (info->first_fp_reg_save != 64)
+ fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
+
+ if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
+ fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
+ info->first_altivec_reg_save);
+
+ if (info->lr_save_p)
+ fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
+
+ if (info->cr_save_p)
+ fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
+
+ if (info->vrsave_mask)
+ fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
+
+ if (info->push_p)
+ fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
+
+ if (info->calls_p)
+ fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
+
+ if (info->gp_save_offset)
+ fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
+
+ if (info->fp_save_offset)
+ fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
+
+ if (info->altivec_save_offset)
+ fprintf (stderr, "\taltivec_save_offset = %5d\n",
+ info->altivec_save_offset);
+
+ if (info->spe_gp_save_offset)
+ fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
+ info->spe_gp_save_offset);
+
+ if (info->vrsave_save_offset)
+ fprintf (stderr, "\tvrsave_save_offset = %5d\n",
+ info->vrsave_save_offset);
+
+ if (info->lr_save_offset)
+ fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
+
+ if (info->cr_save_offset)
+ fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
+
+ if (info->varargs_save_offset)
+ fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
+
+ if (info->total_size)
+ fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
+ info->total_size);
+
+ if (info->vars_size)
+ fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
+ info->vars_size);
+
+ if (info->parm_size)
+ fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
+
+ if (info->fixed_size)
+ fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
+
+ if (info->gp_size)
+ fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
+
+ if (info->spe_gp_size)
+ fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
+
+ if (info->fp_size)
+ fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
+
+ if (info->altivec_size)
+ fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
+
+ if (info->vrsave_size)
+ fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
+
+ if (info->altivec_padding_size)
+ fprintf (stderr, "\taltivec_padding_size= %5d\n",
+ info->altivec_padding_size);
+
+ if (info->spe_padding_size)
+ fprintf (stderr, "\tspe_padding_size = %5d\n",
+ info->spe_padding_size);
+
+ if (info->cr_size)
+ fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
+
+ if (info->save_size)
+ fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
+
+ if (info->reg_size != 4)
+ fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
+
+ fprintf (stderr, "\n");
+}
+
+rtx
+rs6000_return_addr (int count, rtx frame)
+{
+ /* Currently we don't optimize very well between prolog and body
+ code and for PIC code the code can be actually quite bad, so
+ don't try to be too clever here. */
+ if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
+ {
+ cfun->machine->ra_needs_full_frame = 1;
+
+ return
+ gen_rtx_MEM
+ (Pmode,
+ memory_address
+ (Pmode,
+ plus_constant (copy_to_reg
+ (gen_rtx_MEM (Pmode,
+ memory_address (Pmode, frame))),
+ RETURN_ADDRESS_OFFSET)));
+ }
+
+ cfun->machine->ra_need_lr = 1;
+ return get_hard_reg_initial_val (Pmode, LINK_REGISTER_REGNUM);
+}
+
+/* Say whether a function is a candidate for sibcall handling or not.
+ APPLE LOCAL sibling calls
+
+ Also, we can't do it if there are any vector parameters; there's
+ nowhere to put the VRsave code so it works; note that functions with
+ vector parameters are required to have a prototype, so the argument
+ type info must be available here. (The tail recursion case can work
+ with vector parameters, but there's no way to distinguish here.) */
+
+/* APPLE LOCAL begin sibling calls
+ On Darwin only, indirect calls may be sibcalls. This is enabled
+ primarily by target-specific logic in calls.c.
+ APPLE LOCAL end sibling calls */
+static bool
+rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ tree type;
+ /* APPLE LOCAL begin long-branch */
+ /* This is wrong, short functions are ok for sibcall. (mrs) */
+ if (rs6000_default_long_calls)
+ return false;
+ /* APPLE LOCAL end long-branch */
+
+ /* APPLE LOCAL begin indirect sibcalls */
+ /* This goes with a lot of local changes in expand_call. */
+ if (DEFAULT_ABI == ABI_DARWIN && !decl)
+ return true;
+ /* APPLE LOCAL end indirect sibcalls */
+
+ if (decl)
+ {
+ if (TARGET_ALTIVEC_VRSAVE)
+ {
+ for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ type; type = TREE_CHAIN (type))
+ {
+ if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
+ return false;
+ }
+ }
+ if (DEFAULT_ABI == ABI_DARWIN
+ || ((*targetm.binds_local_p) (decl)
+ && (DEFAULT_ABI != ABI_AIX || !DECL_EXTERNAL (decl))))
+ {
+ tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+
+ if (!lookup_attribute ("longcall", attr_list)
+ || lookup_attribute ("shortcall", attr_list))
+ return true;
+ }
+ }
+ return false;
+}
+
+/* NULL if INSN insn is valid within a low-overhead loop.
+ Otherwise return why doloop cannot be applied.
+ PowerPC uses the COUNT register for branch on table instructions. */
+
+static const char *
+rs6000_invalid_within_doloop (rtx insn)
+{
+ if (CALL_P (insn))
+ return "Function call in the loop.";
+
+ if (JUMP_P (insn)
+ && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC))
+ return "Computed branch in the loop.";
+
+ return NULL;
+}
+
+static int
+rs6000_ra_ever_killed (void)
+{
+ rtx top;
+ rtx reg;
+ rtx insn;
+
+ if (current_function_is_thunk)
+ return 0;
+
+ /* regs_ever_live has LR marked as used if any sibcalls are present,
+ but this should not force saving and restoring in the
+ pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
+ clobbers LR, so that is inappropriate. */
+
+ /* Also, the prologue can generate a store into LR that
+ doesn't really count, like this:
+
+ move LR->R0
+ bcl to set PIC register
+ move LR->R31
+ move R0->LR
+
+ When we're called from the epilogue, we need to avoid counting
+ this as a store. */
+
+ push_topmost_sequence ();
+ top = get_insns ();
+ pop_topmost_sequence ();
+ reg = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+
+ for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn))
+ {
+ if (CALL_P (insn))
+ {
+ if (!SIBLING_CALL_P (insn))
+ return 1;
+ }
+ else if (find_regno_note (insn, REG_INC, LINK_REGISTER_REGNUM))
+ return 1;
+ else if (set_of (reg, insn) != NULL_RTX
+ && !prologue_epilogue_contains (insn))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Add a REG_MAYBE_DEAD note to the insn. */
+static void
+rs6000_maybe_dead (rtx insn)
+{
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
+ const0_rtx,
+ REG_NOTES (insn));
+}
+
+/* Emit instructions needed to load the TOC register.
+ This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
+ a constant pool; or for SVR4 -fpic. */
+
+void
+rs6000_emit_load_toc_table (int fromprolog)
+{
+ rtx dest, insn;
+ dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+
+ if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
+ {
+ char buf[30];
+ rtx lab, tmp1, tmp2, got, tempLR;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ if (flag_pic == 2)
+ got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
+ else
+ got = rs6000_got_sym ();
+ tmp1 = tmp2 = dest;
+ if (!fromprolog)
+ {
+ tmp1 = gen_reg_rtx (Pmode);
+ tmp2 = gen_reg_rtx (Pmode);
+ }
+ tempLR = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+ insn = emit_insn (gen_load_toc_v4_PIC_1 (tempLR, lab));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_move_insn (tmp1, tempLR);
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ }
+ else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ {
+ rtx tempLR = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+
+ insn = emit_insn (gen_load_toc_v4_pic_si (tempLR));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_move_insn (dest, tempLR);
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ }
+ else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
+ {
+ char buf[30];
+ rtx tempLR = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+ rtx temp0 = (fromprolog
+ ? gen_rtx_REG (Pmode, 0)
+ : gen_reg_rtx (Pmode));
+
+ if (fromprolog)
+ {
+ rtx symF, symL;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
+ symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ rs6000_maybe_dead (emit_insn (gen_load_toc_v4_PIC_1 (tempLR,
+ symF)));
+ rs6000_maybe_dead (emit_move_insn (dest, tempLR));
+ rs6000_maybe_dead (emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest,
+ symL,
+ symF)));
+ }
+ else
+ {
+ rtx tocsym;
+
+ tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
+ emit_insn (gen_load_toc_v4_PIC_1b (tempLR, tocsym));
+ emit_move_insn (dest, tempLR);
+ emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
+ }
+ insn = emit_insn (gen_addsi3 (dest, temp0, dest));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ }
+ else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
+ {
+ /* This is for AIX code running in non-PIC ELF32. */
+ char buf[30];
+ rtx realsym;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
+ realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ insn = emit_insn (gen_elf_high (dest, realsym));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ insn = emit_insn (gen_elf_low (dest, dest, realsym));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ }
+ else
+ {
+ gcc_assert (DEFAULT_ABI == ABI_AIX);
+
+ if (TARGET_32BIT)
+ insn = emit_insn (gen_load_toc_aix_si (dest));
+ else
+ insn = emit_insn (gen_load_toc_aix_di (dest));
+ if (fromprolog)
+ rs6000_maybe_dead (insn);
+ }
+}
+
+/* Emit instructions to restore the link register after determining where
+ its value has been stored. */
+
+void
+rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ rtx operands[2];
+
+ operands[0] = source;
+ operands[1] = scratch;
+
+ if (info->lr_save_p)
+ {
+ rtx frame_rtx = stack_pointer_rtx;
+ HOST_WIDE_INT sp_offset = 0;
+ rtx tmp;
+
+ if (frame_pointer_needed
+ || current_function_calls_alloca
+ || info->total_size > 32767)
+ {
+ tmp = gen_frame_mem (Pmode, frame_rtx);
+ emit_move_insn (operands[1], tmp);
+ frame_rtx = operands[1];
+ }
+ else if (info->push_p)
+ sp_offset = info->total_size;
+
+ tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
+ tmp = gen_frame_mem (Pmode, tmp);
+ emit_move_insn (tmp, operands[0]);
+ }
+ else
+ emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM), operands[0]);
+}
+
+static GTY(()) int set = -1;
+
+int
+get_TOC_alias_set (void)
+{
+ if (set == -1)
+ set = new_alias_set ();
+ return set;
+}
+
+/* This returns nonzero if the current function uses the TOC. This is
+ determined by the presence of (use (unspec ... UNSPEC_TOC)), which
+ is generated by the ABI_V4 load_toc_* patterns. */
+#if TARGET_ELF
+static int
+uses_TOC (void)
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ {
+ rtx pat = PATTERN (insn);
+ int i;
+
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx sub = XVECEXP (pat, 0, i);
+ if (GET_CODE (sub) == USE)
+ {
+ sub = XEXP (sub, 0);
+ if (GET_CODE (sub) == UNSPEC
+ && XINT (sub, 1) == UNSPEC_TOC)
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+rtx
+create_TOC_reference (rtx symbol)
+{
+ if (no_new_pseudos)
+ regs_ever_live[TOC_REGISTER] = 1;
+ return gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, TOC_REGISTER),
+ gen_rtx_CONST (Pmode,
+ gen_rtx_MINUS (Pmode, symbol,
+ gen_rtx_SYMBOL_REF (Pmode, toc_label_name))));
+}
+
+/* If _Unwind_* has been called from within the same module,
+ toc register is not guaranteed to be saved to 40(1) on function
+ entry. Save it there in that case. */
+
+void
+rs6000_aix_emit_builtin_unwind_init (void)
+{
+ rtx mem;
+ rtx stack_top = gen_reg_rtx (Pmode);
+ rtx opcode_addr = gen_reg_rtx (Pmode);
+ rtx opcode = gen_reg_rtx (SImode);
+ rtx tocompare = gen_reg_rtx (SImode);
+ rtx no_toc_save_needed = gen_label_rtx ();
+
+ mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
+ emit_move_insn (stack_top, mem);
+
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (opcode_addr, mem);
+ emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
+ emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
+ : 0xE8410028, SImode));
+
+ do_compare_rtx_and_jump (opcode, tocompare, EQ, 1,
+ SImode, NULL_RTX, NULL_RTX,
+ no_toc_save_needed);
+
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (5 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
+ emit_label (no_toc_save_needed);
+}
+
+/* This ties together stack memory (MEM with an alias set of frame_alias_set)
+ and the change to the stack pointer. */
+
+static void
+rs6000_emit_stack_tie (void)
+{
+ rtx mem = gen_frame_mem (BLKmode,
+ gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
+
+ emit_insn (gen_stack_tie (mem));
+}
+
+/* Emit the correct code for allocating stack space, as insns.
+ If COPY_R12, make sure a copy of the old frame is left in r12.
+ The generated code may use hard register 0 as a temporary. */
+
+static void
+rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12)
+{
+ rtx insn;
+ rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx tmp_reg = gen_rtx_REG (Pmode, 0);
+ rtx todec = gen_int_mode (-size, Pmode);
+
+ if (INTVAL (todec) != -size)
+ {
+ warning (0, "stack frame too large");
+ emit_insn (gen_trap ());
+ return;
+ }
+
+ if (current_function_limit_stack)
+ {
+ if (REG_P (stack_limit_rtx)
+ && REGNO (stack_limit_rtx) > 1
+ && REGNO (stack_limit_rtx) <= 31)
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (tmp_reg,
+ stack_limit_rtx,
+ GEN_INT (size))
+ : gen_adddi3 (tmp_reg,
+ stack_limit_rtx,
+ GEN_INT (size)));
+
+ emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
+ const0_rtx));
+ }
+ else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
+ && TARGET_32BIT
+ && DEFAULT_ABI == ABI_V4)
+ {
+ rtx toload = gen_rtx_CONST (VOIDmode,
+ gen_rtx_PLUS (Pmode,
+ stack_limit_rtx,
+ GEN_INT (size)));
+
+ emit_insn (gen_elf_high (tmp_reg, toload));
+ emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
+ emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
+ const0_rtx));
+ }
+ else
+ warning (0, "stack limit expression is not supported");
+ }
+
+ if (copy_r12 || ! TARGET_UPDATE)
+ emit_move_insn (gen_rtx_REG (Pmode, 12), stack_reg);
+
+ if (TARGET_UPDATE)
+ {
+ if (size > 32767)
+ {
+ /* Need a note here so that try_split doesn't get confused. */
+ if (get_last_insn () == NULL_RTX)
+ emit_note (NOTE_INSN_DELETED);
+ insn = emit_move_insn (tmp_reg, todec);
+ try_split (PATTERN (insn), insn, 0);
+ todec = tmp_reg;
+ }
+
+ insn = emit_insn (TARGET_32BIT
+ ? gen_movsi_update (stack_reg, stack_reg,
+ todec, stack_reg)
+ : gen_movdi_di_update (stack_reg, stack_reg,
+ todec, stack_reg));
+ }
+ else
+ {
+ insn = emit_insn (TARGET_32BIT
+ ? gen_addsi3 (stack_reg, stack_reg, todec)
+ : gen_adddi3 (stack_reg, stack_reg, todec));
+ emit_move_insn (gen_rtx_MEM (Pmode, stack_reg),
+ gen_rtx_REG (Pmode, 12));
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) =
+ gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_reg,
+ gen_rtx_PLUS (Pmode, stack_reg,
+ GEN_INT (-size))),
+ REG_NOTES (insn));
+}
+
+/* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
+ with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
+ is not NULL. It would be nice if dwarf2out_frame_debug_expr could
+ deduce these equivalences by itself so it wasn't necessary to hold
+ its hand so much. */
+
+static void
+rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
+ rtx reg2, rtx rreg)
+{
+ rtx real, temp;
+
+ /* copy_rtx will not make unique copies of registers, so we need to
+ ensure we don't have unwanted sharing here. */
+ if (reg == reg2)
+ reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
+
+ if (reg == rreg)
+ reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
+
+ real = copy_rtx (PATTERN (insn));
+
+ if (reg2 != NULL_RTX)
+ real = replace_rtx (real, reg2, rreg);
+
+ real = replace_rtx (real, reg,
+ gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
+ STACK_POINTER_REGNUM),
+ GEN_INT (val)));
+
+ /* We expect that 'real' is either a SET or a PARALLEL containing
+ SETs (and possibly other stuff). In a PARALLEL, all the SETs
+ are important so they all have to be marked RTX_FRAME_RELATED_P. */
+
+ if (GET_CODE (real) == SET)
+ {
+ rtx set = real;
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+ }
+ else
+ {
+ int i;
+
+ gcc_assert (GET_CODE (real) == PARALLEL);
+ for (i = 0; i < XVECLEN (real, 0); i++)
+ if (GET_CODE (XVECEXP (real, 0, i)) == SET)
+ {
+ rtx set = XVECEXP (real, 0, i);
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+ RTX_FRAME_RELATED_P (set) = 1;
+ }
+ }
+
+ if (TARGET_SPE)
+ real = spe_synthesize_frame_save (real);
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ real,
+ REG_NOTES (insn));
+}
+
+/* Given an SPE frame note, return a PARALLEL of SETs with the
+ original note, plus a synthetic register save. */
+
+static rtx
+spe_synthesize_frame_save (rtx real)
+{
+ rtx synth, offset, reg, real2;
+
+ if (GET_CODE (real) != SET
+ || GET_MODE (SET_SRC (real)) != V2SImode)
+ return real;
+
+ /* For the SPE, registers saved in 64-bits, get a PARALLEL for their
+ frame related note. The parallel contains a set of the register
+ being saved, and another set to a synthetic register (n+1200).
+ This is so we can differentiate between 64-bit and 32-bit saves.
+ Words cannot describe this nastiness. */
+
+ gcc_assert (GET_CODE (SET_DEST (real)) == MEM
+ && GET_CODE (XEXP (SET_DEST (real), 0)) == PLUS
+ && GET_CODE (SET_SRC (real)) == REG);
+
+ /* Transform:
+ (set (mem (plus (reg x) (const y)))
+ (reg z))
+ into:
+ (set (mem (plus (reg x) (const y+4)))
+ (reg z+1200))
+ */
+
+ real2 = copy_rtx (real);
+ PUT_MODE (SET_DEST (real2), SImode);
+ reg = SET_SRC (real2);
+ real2 = replace_rtx (real2, reg, gen_rtx_REG (SImode, REGNO (reg)));
+ synth = copy_rtx (real2);
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ offset = XEXP (XEXP (SET_DEST (real2), 0), 1);
+ real2 = replace_rtx (real2, offset, GEN_INT (INTVAL (offset) + 4));
+ }
+
+ reg = SET_SRC (synth);
+
+ synth = replace_rtx (synth, reg,
+ gen_rtx_REG (SImode, REGNO (reg) + 1200));
+
+ offset = XEXP (XEXP (SET_DEST (synth), 0), 1);
+ synth = replace_rtx (synth, offset,
+ GEN_INT (INTVAL (offset)
+ + (BYTES_BIG_ENDIAN ? 0 : 4)));
+
+ RTX_FRAME_RELATED_P (synth) = 1;
+ RTX_FRAME_RELATED_P (real2) = 1;
+ if (BYTES_BIG_ENDIAN)
+ real = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, synth, real2));
+ else
+ real = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, real2, synth));
+
+ return real;
+}
+
+/* Returns an insn that has a vrsave set operation with the
+ appropriate CLOBBERs. */
+
+static rtx
+generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
+{
+ int nclobs, i;
+ rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
+ rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
+
+ clobs[0]
+ = gen_rtx_SET (VOIDmode,
+ vrsave,
+ gen_rtx_UNSPEC_VOLATILE (SImode,
+ gen_rtvec (2, reg, vrsave),
+ UNSPECV_SET_VRSAVE));
+
+ nclobs = 1;
+
+ /* We need to clobber the registers in the mask so the scheduler
+ does not move sets to VRSAVE before sets of AltiVec registers.
+
+ However, if the function receives nonlocal gotos, reload will set
+ all call saved registers live. We will end up with:
+
+ (set (reg 999) (mem))
+ (parallel [ (set (reg vrsave) (unspec blah))
+ (clobber (reg 999))])
+
+ The clobber will cause the store into reg 999 to be dead, and
+ flow will attempt to delete an epilogue insn. In this case, we
+ need an unspec use/set of the register. */
+
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ if (!epiloguep || call_used_regs [i])
+ clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (V4SImode, i));
+ else
+ {
+ rtx reg = gen_rtx_REG (V4SImode, i);
+
+ clobs[nclobs++]
+ = gen_rtx_SET (VOIDmode,
+ reg,
+ gen_rtx_UNSPEC (V4SImode,
+ gen_rtvec (1, reg), 27));
+ }
+ }
+
+ insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
+
+ for (i = 0; i < nclobs; ++i)
+ XVECEXP (insn, 0, i) = clobs[i];
+
+ return insn;
+}
+
+/* APPLE LOCAL begin special ObjC method use of R12 */
+/* Determine whether a name is an ObjC method. */
+
+static int name_encodes_objc_method_p (const char *piclabel_name)
+{
+ return (piclabel_name[0] == '+' || piclabel_name[0] == '-');
+}
+/* APPLE LOCAL end special ObjC method use of R12 */
+
+/* APPLE LOCAL begin recompute PIC register use */
+/* Sometimes a function has references that require the PIC register,
+ but optimization removes them all. To catch this case
+ recompute current_function_uses_pic_offset_table here.
+ This may allow us to eliminate the prologue and epilogue. */
+
+static int
+recompute_PIC_register_use (void)
+{
+ if (DEFAULT_ABI == ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table
+ && !cfun->machine->ra_needs_full_frame)
+ {
+ rtx insn;
+ current_function_uses_pic_offset_table = 0;
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ if ( reg_mentioned_p (pic_offset_table_rtx, insn))
+ {
+ current_function_uses_pic_offset_table = 1;
+ break;
+ }
+ pop_topmost_sequence ();
+ }
+ return 0;
+}
+/* APPLE LOCAL end recompute PIC register use */
+
+/* APPLE LOCAL begin volatile pic base reg in leaves */
+/* If this is a leaf function and we used any pic-based references,
+ see if there is an unused volatile reg we can use instead of R31.
+ If so set substitute_pic_base_reg to this reg, set its reg_ever_used
+ bit (to avoid confusing later calls to alloc_volatile_reg), and
+ make a pass through the existing RTL, substituting the new reg for
+ the old one wherever it appears.
+ Logically this is a void function; it is int so it can be used to
+ initialize a dummy variable, thus getting executed ahead of other
+ initializations. Technicolour yawn. */
+
+/* ALLOC_VOLATILE_REG allocates a volatile register AFTER all gcc
+ register allocations have been done; we use it to reserve an
+ unused reg for holding VRsave. Returns -1 in case of failure (all
+ volatile regs are in use.) */
+/* Note, this is called from both the prologue and epilogue code,
+ with the assumption that it will return the same result both
+ times! Since the register arrays are not changed in between
+ this is valid, if a bit fragile. */
+/* In future we may also use this to grab an unused volatile reg to
+ hold the PIC base reg in the event that the current function makes
+ no procedure calls; this was done in 2.95. */
+static int
+alloc_volatile_reg (void)
+{
+ if (current_function_is_leaf
+ && reload_completed
+ && !cfun->machine->ra_needs_full_frame)
+ {
+ int r;
+ for (r = 10; r >= 2; --r)
+ if (! fixed_regs[r] && ! regs_ever_live[r])
+ return r;
+ }
+
+ return -1; /* fail */
+}
+
+extern rtx replace_regs (rtx x, rtx *reg_map, unsigned int nregs, int replace_dest);
+
+static int
+try_leaf_pic_optimization (void)
+{
+ if ( DEFAULT_ABI==ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table
+ && current_function_is_leaf
+ && !cfun->machine->ra_needs_full_frame )
+ {
+ int reg = alloc_volatile_reg ();
+ if ( reg != -1 )
+ {
+ /* Run through the insns, changing references to the original
+ PIC_OFFSET_TABLE_REGNUM to our new one. */
+ rtx insn;
+ const int nregs = PIC_OFFSET_TABLE_REGNUM + 1;
+ rtx *reg_map = (rtx *) xmalloc (nregs * sizeof (rtx));
+ memset (reg_map, 0, nregs * sizeof (rtx));
+ reg_map[PIC_OFFSET_TABLE_REGNUM] = gen_rtx_REG (SImode, reg);
+
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ replace_regs (PATTERN (insn), reg_map, nregs, 1);
+ replace_regs (REG_NOTES (insn), reg_map, nregs, 1);
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ if ( !SIBLING_CALL_P (insn))
+ abort ();
+ }
+ }
+ pop_topmost_sequence ();
+ free (reg_map);
+
+ regs_ever_live[reg] = 1;
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 0;
+ cfun->machine->substitute_pic_base_reg = reg;
+ }
+ }
+ return 0;
+}
+/* APPLE LOCAL end volatile pic base reg in leaves */
+
+/* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
+ Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
+
+static void
+emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
+ unsigned int regno, int offset, HOST_WIDE_INT total_size)
+{
+ rtx reg, offset_rtx, insn, mem, addr, int_rtx;
+ rtx replacea, replaceb;
+
+ int_rtx = GEN_INT (offset);
+
+ /* Some cases that need register indexed addressing. */
+ if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ || (TARGET_E500_DOUBLE && mode == DFmode)
+ || (TARGET_SPE_ABI
+ && SPE_VECTOR_MODE (mode)
+ && !SPE_CONST_OFFSET_OK (offset)))
+ {
+ /* Whomever calls us must make sure r11 is available in the
+ flow path of instructions in the prologue. */
+ offset_rtx = gen_rtx_REG (Pmode, 11);
+ emit_move_insn (offset_rtx, int_rtx);
+
+ replacea = offset_rtx;
+ replaceb = int_rtx;
+ }
+ else
+ {
+ offset_rtx = int_rtx;
+ replacea = NULL_RTX;
+ replaceb = NULL_RTX;
+ }
+
+ reg = gen_rtx_REG (mode, regno);
+ addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
+ mem = gen_frame_mem (mode, addr);
+
+ insn = emit_move_insn (mem, reg);
+
+ rs6000_frame_related (insn, frame_ptr, total_size, replacea, replaceb);
+}
+
+/* Emit an offset memory reference suitable for a frame store, while
+ converting to a valid addressing mode. */
+
+static rtx
+gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
+{
+ rtx int_rtx, offset_rtx;
+
+ int_rtx = GEN_INT (offset);
+
+ if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
+ || (TARGET_E500_DOUBLE && mode == DFmode))
+ {
+ offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
+ emit_move_insn (offset_rtx, int_rtx);
+ }
+ else
+ offset_rtx = int_rtx;
+
+ return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
+}
+
+/* Look for user-defined global regs. We should not save and restore these,
+ and cannot use stmw/lmw if there are any in its range. */
+
+static bool
+no_global_regs_above (int first_greg)
+{
+ int i;
+ for (i = 0; i < 32 - first_greg; i++)
+ if (global_regs[first_greg + i])
+ return false;
+ return true;
+}
+
+#ifndef TARGET_FIX_AND_CONTINUE
+#define TARGET_FIX_AND_CONTINUE 0
+#endif
+
+/* Emit function prologue as insns. */
+
+void
+rs6000_emit_prologue (void)
+{
+ /* APPLE LOCAL recompute PIC register use */
+ int dummy ATTRIBUTE_UNUSED = recompute_PIC_register_use ();
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ int ignored ATTRIBUTE_UNUSED = try_leaf_pic_optimization ();
+ rs6000_stack_t *info = rs6000_stack_info ();
+ enum machine_mode reg_mode = Pmode;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
+ rtx frame_reg_rtx = sp_reg_rtx;
+ rtx cr_save_rtx = NULL_RTX;
+ rtx insn;
+ int saving_FPRs_inline;
+ int using_store_multiple;
+ HOST_WIDE_INT sp_offset = 0;
+ /* APPLE LOCAL begin callers_lr_already_saved */
+ int callers_lr_already_saved = 0;
+#if TARGET_MACHO
+ int lr_already_set_up_for_pic = 0;
+#endif
+ /* APPLE LOCAL end callers_lr_already_saved */
+ /* APPLE LOCAL special ObjC method use of R12 */
+ int objc_method_using_pic = 0;
+
+ /* APPLE LOCAL begin CW asm block */
+ if (cfun->iasm_asm_function && cfun->iasm_frame_size == -2)
+ return;
+ /* APPLE LOCAL end CW asm block */
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN
+ && current_function_uses_pic_offset_table && flag_pic
+ && current_function_decl
+ && DECL_ASSEMBLER_NAME_SET_P (current_function_decl))
+ {
+ /* At -O0, this will not be set yet, so we won't do this opt. */
+ const char *piclabel_name
+ = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl));
+
+ if (name_encodes_objc_method_p (piclabel_name)
+ /* If we're saving vector or FP regs via a function call,
+ then don't bother with this ObjC R12 optimization.
+ This test also eliminates world_save. */
+ && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO
+ || VECTOR_SAVE_INLINE (info->first_altivec_reg_save))
+ && (info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save)))
+ {
+ rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+ rtx src = machopic_function_base_sym ();
+ objc_method_using_pic = 1;
+ rs6000_maybe_dead (emit_insn (gen_load_macho_picbase_label (lr,
+ src)));
+ }
+ }
+#endif /* TARGET_MACHO */
+ /* APPLE LOCAL end special ObjC method use of R12 */
+
+ if (TARGET_FIX_AND_CONTINUE)
+ {
+ /* gdb on darwin arranges to forward a function from the old
+ address by modifying the first 5 instructions of the function
+ to branch to the overriding function. This is necessary to
+ permit function pointers that point to the old function to
+ actually forward to the new function. */
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ }
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ reg_mode = V2SImode;
+ reg_size = 8;
+ }
+
+ using_store_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
+ && (!TARGET_SPE_ABI
+ || info->spe_64bit_regs_used == 0)
+ && info->first_gp_reg_save < 31
+ && no_global_regs_above (info->first_gp_reg_save));
+ saving_FPRs_inline = (info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save)
+ || current_function_calls_eh_return
+ || cfun->machine->ra_need_lr);
+
+ /* For V.4, update stack before we do any saving and set back pointer. */
+ if (! WORLD_SAVE_P (info)
+ && info->push_p
+ && (DEFAULT_ABI == ABI_V4
+ || current_function_calls_eh_return))
+ {
+ if (info->total_size < 32767)
+ sp_offset = info->total_size;
+ else
+ frame_reg_rtx = frame_ptr_rtx;
+ rs6000_emit_allocate_stack (info->total_size,
+ (frame_reg_rtx != sp_reg_rtx
+ && (info->cr_save_p
+ || info->lr_save_p
+ || info->first_fp_reg_save < 64
+ || info->first_gp_reg_save < 32
+ )));
+ if (frame_reg_rtx != sp_reg_rtx)
+ rs6000_emit_stack_tie ();
+ }
+
+ /* Handle world saves specially here. */
+ if (WORLD_SAVE_P (info))
+ {
+ int i, j, sz;
+ rtx treg;
+ rtvec p;
+ rtx reg0;
+
+ /* save_world expects lr in r0. */
+ reg0 = gen_rtx_REG (Pmode, 0);
+ if (info->lr_save_p)
+ {
+ insn = emit_move_insn (reg0,
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
+ assumptions about the offsets of various bits of the stack
+ frame. */
+ gcc_assert (info->gp_save_offset == -220
+ && info->fp_save_offset == -144
+ && info->lr_save_offset == 8
+ && info->cr_save_offset == 4
+ && info->push_p
+ && info->lr_save_p
+ && (!current_function_calls_eh_return
+ || info->ehrd_offset == -432)
+ && info->vrsave_save_offset == -224
+ && info->altivec_save_offset == -416);
+
+ treg = gen_rtx_REG (SImode, 11);
+ emit_move_insn (treg, GEN_INT (-info->total_size));
+
+ /* SAVE_WORLD takes the caller's LR in R0 and the frame size
+ in R11. It also clobbers R12, so beware! */
+
+ /* Preserve CR2 for save_world prologues */
+ sz = 5;
+ sz += 32 - info->first_gp_reg_save;
+ sz += 64 - info->first_fp_reg_save;
+ sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
+ p = rtvec_alloc (sz);
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ "*save_world"));
+ /* We do floats first so that the instruction pattern matches
+ properly. */
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset + 8 * i));
+ rtx mem = gen_frame_mem (DFmode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + sp_offset + 16 * i));
+ rtx mem = gen_frame_mem (V4SImode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset
+ + sp_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ /* Explain about use of R0. */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset
+ + sp_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
+ }
+ /* Explain what happens to the stack pointer. */
+ {
+ rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
+ }
+
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ treg, GEN_INT (-info->total_size));
+ sp_offset = info->total_size;
+ }
+
+ /* APPLE LOCAL mainline */
+ /* Moved altivec save/restore. */
+
+ /* If we use the link register, get it into r0. */
+ if (!WORLD_SAVE_P (info) && info->lr_save_p)
+ {
+ insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* If we need to save CR, put it into r12. */
+ if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
+ {
+ rtx set;
+
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+ /* For Darwin, use R2, so we don't clobber the special ObjC
+ method use of R12. R11 has a special meaning for Ada, so we
+ can't use that. */
+ cr_save_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 2 : 12);
+ /* APPLE LOCAL end special ObjC method use of R12 */
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ /* Now, there's no way that dwarf2out_frame_debug_expr is going
+ to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
+ But that's OK. All we have to do is specify that _one_ condition
+ code register is saved in this stack slot. The thrower's epilogue
+ will then restore all the call-saved registers.
+ We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
+ set = gen_rtx_SET (VOIDmode, cr_save_rtx,
+ gen_rtx_REG (SImode, CR2_REGNO));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ set,
+ REG_NOTES (insn));
+ }
+
+ /* Do any required saving of fpr's. If only one or two to save, do
+ it ourselves. Otherwise, call function. */
+ if (!WORLD_SAVE_P (info) && saving_FPRs_inline)
+ {
+ int i;
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ if ((regs_ever_live[info->first_fp_reg_save+i]
+ && ! call_used_regs[info->first_fp_reg_save+i]))
+ emit_frame_save (frame_reg_rtx, frame_ptr_rtx, DFmode,
+ info->first_fp_reg_save + i,
+ info->fp_save_offset + sp_offset + 8 * i,
+ info->total_size);
+ }
+ else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
+ {
+ int i;
+ char rname[30];
+ const char *alloc_rname;
+ rtvec p;
+ /* APPLE LOCAL begin reduce code size */
+
+ int gen_following_label = 0;
+ int count = 0;
+
+ if (current_function_uses_pic_offset_table && flag_pic
+#ifdef INSN_SCHEDULING
+ /* Prevent the compiler from crashing
+ while scheduling insns after global_alloc! */
+ && (optimize == 0 || !flag_schedule_insns_after_reload)
+#endif
+ /* If this is the last CALL in the prolog, then we've got our PC.
+ If we're saving AltiVec regs via a function, we're not last. */
+ && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO
+ || VECTOR_SAVE_INLINE (info->first_altivec_reg_save)))
+ gen_following_label = lr_already_set_up_for_pic = 1;
+ /* APPLE LOCAL end reduce code size */
+
+ /* APPLE LOCAL begin +2 (could be conditionalized) */
+ p = rtvec_alloc (2 + 64 - info->first_fp_reg_save + 2
+ + gen_following_label);
+ /* APPLE LOCAL end +2 (could be conditionalized) */
+
+ /* APPLE LOCAL begin reduce code size */
+ /* 0 -> count++ */
+ RTVEC_ELT (p, count++) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+#if TARGET_MACHO
+ /* We have to calculate the offset into saveFP to where we must
+ call (!!) SAVEFP also saves the caller's LR -- placed into
+ R0 above -- into 8(R1). SAVEFP/RESTOREFP should never be
+ called to save or restore only F31. */
+
+ if (info->lr_save_offset != (POINTER_SIZE / 4) || info->first_fp_reg_save == 63)
+ abort ();
+
+ sprintf (rname, "*saveFP%s%.0d ; save f%d-f31",
+ (info->first_fp_reg_save - 32 == 14 ? "" : "+"),
+ (info->first_fp_reg_save - 46) * 4,
+ info->first_fp_reg_save - 32);
+#else
+ /* APPLE LOCAL end reduce code size */
+ sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
+ info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
+ /* APPLE LOCAL reduce code size */
+#endif /* TARGET_MACHO */
+ alloc_rname = ggc_strdup (rname);
+ /* APPLE LOCAL reduce code size */
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ alloc_rname));
+ /* APPLE LOCAL begin reduce code size */
+ if (gen_following_label)
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, const0_rtx);
+ /* APPLE LOCAL end reduce code size */
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset + 8*i));
+ mem = gen_frame_mem (DFmode, addr);
+
+ /* APPLE LOCAL reduce code size */
+ RTVEC_ELT (p, count++) = gen_rtx_SET (VOIDmode, mem, reg);
+ /* APPLE LOCAL begin C++ EH and setjmp (radar 2866661) */
+ }
+#if TARGET_MACHO
+ /* Darwin version of these functions stores R0. */
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
+
+ /* If we saved LR, *tell* people about it! */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ rtx mem = gen_rtx_MEM (Pmode, addr);
+ /* This should not be of rs6000_sr_alias_set, because of
+ __builtin_return_address. */
+ RTVEC_ELT (p, count++) = gen_rtx_SET (Pmode, mem,
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ }
+#endif
+ /* APPLE LOCAL end C++ EH and setjmp (radar 2866661) */
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ /* APPLE LOCAL callers_lr_already_saved */
+ callers_lr_already_saved = 1;
+ }
+
+ /* Save GPRs. This is done as a PARALLEL if we are using
+ the store-multiple instructions. */
+ if (!WORLD_SAVE_P (info) && using_store_multiple)
+ {
+ rtvec p;
+ int i;
+ p = rtvec_alloc (32 - info->first_gp_reg_save);
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ else if (!WORLD_SAVE_P (info))
+ {
+ int i;
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if ((regs_ever_live[info->first_gp_reg_save + i]
+ && (!call_used_regs[info->first_gp_reg_save + i]
+ || (i + info->first_gp_reg_save
+ == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && TARGET_TOC && TARGET_MINIMAL_TOC)))
+ || (i + info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && ((current_function_uses_pic_offset_table
+ && cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM)
+ || cfun->machine->ra_needs_full_frame)))))
+ /* APPLE LOCAL end volatile pic base reg in leaves */
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ int offset = info->spe_gp_save_offset + sp_offset + 8 * i;
+ rtx b;
+
+ if (!SPE_CONST_OFFSET_OK (offset))
+ {
+ b = gen_rtx_REG (Pmode, FIXED_SCRATCH);
+ emit_move_insn (b, GEN_INT (offset));
+ }
+ else
+ b = GEN_INT (offset);
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, b);
+ mem = gen_frame_mem (V2SImode, addr);
+ insn = emit_move_insn (mem, reg);
+
+ if (GET_CODE (b) == CONST_INT)
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ else
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ b, GEN_INT (offset));
+ }
+ else
+ {
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ }
+ }
+
+ /* ??? There's no need to emit actual instructions here, but it's the
+ easiest way to get the frame unwind information emitted. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i, regno;
+
+ /* In AIX ABI we need to pretend we save r2 here. */
+ if (TARGET_AIX)
+ {
+ rtx addr, reg, mem;
+
+ reg = gen_rtx_REG (reg_mode, 2);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (sp_offset + 5 * reg_size));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ PATTERN (insn) = gen_blockage ();
+ }
+
+ for (i = 0; ; ++i)
+ {
+ regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, regno,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i,
+ info->total_size);
+ }
+ }
+
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+ if (objc_method_using_pic)
+ rs6000_maybe_dead (
+ emit_move_insn (gen_rtx_REG (Pmode,
+ cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM
+ ? PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg),
+ gen_rtx_REG (Pmode, 12)));
+ /* APPLE LOCAL end special ObjC method use of R12 */
+
+ /* Save lr if we used it. */
+ /* APPLE LOCAL callers_lr_already_saved */
+ if (!WORLD_SAVE_P (info) && info->lr_save_p && !callers_lr_already_saved)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ rtx reg = gen_rtx_REG (Pmode, 0);
+ rtx mem = gen_rtx_MEM (Pmode, addr);
+ /* This should not be of frame_alias_set, because of
+ __builtin_return_address. */
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* Save CR if we use any that must be preserved. */
+ if (!WORLD_SAVE_P (info) && info->cr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_frame_mem (SImode, addr);
+ /* See the large comment above about why CR2_REGNO is used. */
+ rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
+
+ /* If r12 was used to hold the original sp, copy cr into r0 now
+ that it's free. */
+ if (REGNO (frame_reg_rtx) == 12)
+ {
+ rtx set;
+
+ cr_save_rtx = gen_rtx_REG (SImode, 0);
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ set,
+ REG_NOTES (insn));
+
+ }
+ insn = emit_move_insn (mem, cr_save_rtx);
+
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* Update stack and set back pointer unless this is V.4,
+ for which it was done previously. */
+ /* APPLE LOCAL begin mainline */
+ if (!WORLD_SAVE_P (info) && info->push_p
+ && !(DEFAULT_ABI == ABI_V4 || current_function_calls_eh_return))
+ {
+ if (info->total_size < 32767)
+ sp_offset = info->total_size;
+ else
+ frame_reg_rtx = frame_ptr_rtx;
+
+ rs6000_emit_allocate_stack (info->total_size,
+ (frame_reg_rtx != sp_reg_rtx
+ && ((info->altivec_size != 0)
+ || (info->vrsave_mask != 0))));
+
+ if (frame_reg_rtx != sp_reg_rtx)
+ rs6000_emit_stack_tie ();
+ }
+ /* APPLE LOCAL end mainline */
+
+ /* Set frame pointer, if needed. */
+ if (frame_pointer_needed)
+ {
+ insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
+ sp_reg_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* APPLE LOCAL begin mainline */
+ /* Save AltiVec registers if needed. Save here because the red zone does
+ not include AltiVec registers. */
+ if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ {
+ int i;
+
+ /* There should be a non inline version of this, for when we
+ are saving lots of vector registers. */
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx areg, savereg, mem;
+ int offset;
+
+ offset = info->altivec_save_offset + sp_offset
+ + 16 * (i - info->first_altivec_reg_save);
+
+ savereg = gen_rtx_REG (V4SImode, i);
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn (areg, GEN_INT (offset));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ mem = gen_frame_mem (V4SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
+
+ insn = emit_move_insn (mem, savereg);
+
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ areg, GEN_INT (offset));
+ }
+ }
+
+ /* VRSAVE is a bit vector representing which AltiVec registers
+ are used. The OS uses this to determine which vector
+ registers to save on a context switch. We need to save
+ VRSAVE on the stack frame, add whatever AltiVec registers we
+ used in this function, and do the corresponding magic in the
+ epilogue. */
+
+ if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
+ && info->vrsave_mask != 0)
+ {
+ rtx reg, mem, vrsave;
+ int offset;
+
+ /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
+ as frame_reg_rtx and r11 as the static chain pointer for
+ nested functions. */
+ reg = gen_rtx_REG (SImode, 0);
+ vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
+ if (TARGET_MACHO)
+ emit_insn (gen_get_vrsave_internal (reg));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
+
+ if (!WORLD_SAVE_P (info))
+ {
+ /* Save VRSAVE. */
+ offset = info->vrsave_save_offset + sp_offset;
+ /* APPLE LOCAL begin 5774356 */
+ debug_vrsave_offset = offset;
+ debug_sp_offset = sp_offset;
+ /* APPLE LOCAL end 5774356 */
+ mem = gen_frame_mem (SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (offset)));
+ insn = emit_move_insn (mem, reg);
+ }
+
+ /* Include the registers in the mask. */
+ emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
+
+ insn = emit_insn (generate_set_vrsave (reg, info, 0));
+ }
+
+ /* APPLE LOCAL end mainline */
+ /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
+ if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
+ || (DEFAULT_ABI == ABI_V4
+ && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
+ && regs_ever_live[RS6000_PIC_OFFSET_TABLE_REGNUM]))
+ {
+ /* If emit_load_toc_table will use the link register, we need to save
+ it. We use R12 for this purpose because emit_load_toc_table
+ can use register 0. This allows us to use a plain 'blr' to return
+ from the procedure more often. */
+ int save_LR_around_toc_setup = (TARGET_ELF
+ && DEFAULT_ABI != ABI_AIX
+ && flag_pic
+ && ! info->lr_save_p
+ && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
+ if (save_LR_around_toc_setup)
+ {
+ rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+
+ insn = emit_move_insn (frame_ptr_rtx, lr);
+ rs6000_maybe_dead (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ rs6000_emit_load_toc_table (TRUE);
+
+ insn = emit_move_insn (lr, frame_ptr_rtx);
+ rs6000_maybe_dead (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ rs6000_emit_load_toc_table (TRUE);
+ }
+
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN
+ /* APPLE LOCAL special ObjC method use of R12 */
+ && !objc_method_using_pic
+ && flag_pic && current_function_uses_pic_offset_table)
+ {
+ rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+ rtx src = machopic_function_base_sym ();
+
+ /* Save and restore LR locally around this call (in R0). */
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (gen_rtx_REG (Pmode, 0), lr));
+
+ /* APPLE LOCAL begin performance enhancement */
+ if (!lr_already_set_up_for_pic)
+ rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (lr, src)));
+ /* APPLE LOCAL end performance enhancement */
+
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ insn = emit_move_insn (gen_rtx_REG (Pmode,
+ (cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM)
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg),
+ lr);
+ rs6000_maybe_dead (insn);
+ /* APPLE LOCAL end volatile pic base reg in leaves */
+
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (lr, gen_rtx_REG (Pmode, 0)));
+ }
+#endif
+}
+
+/* Write function prologue. */
+
+static void
+rs6000_output_function_prologue (FILE *file,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (TARGET_DEBUG_STACK)
+ debug_stack_info (info);
+
+ /* APPLE LOCAL do not extern fp save/restore */
+#if !TARGET_MACHO
+ /* Write .extern for any function we will call to save and restore
+ fp values. */
+ if (info->first_fp_reg_save < 64
+ && !FP_SAVE_INLINE (info->first_fp_reg_save))
+ fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
+ SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
+ RESTORE_FP_PREFIX, info->first_fp_reg_save - 32,
+ RESTORE_FP_SUFFIX);
+ /* APPLE LOCAL do not extern fp save/restore */
+#endif /* !TARGET_MACHO */
+
+ /* Write .extern for AIX common mode routines, if needed. */
+ if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
+ {
+ fputs ("\t.extern __mulh\n", file);
+ fputs ("\t.extern __mull\n", file);
+ fputs ("\t.extern __divss\n", file);
+ fputs ("\t.extern __divus\n", file);
+ fputs ("\t.extern __quoss\n", file);
+ fputs ("\t.extern __quous\n", file);
+ common_mode_defined = 1;
+ }
+
+ if (! HAVE_prologue)
+ {
+ start_sequence ();
+
+ /* A NOTE_INSN_DELETED is supposed to be at the start and end of
+ the "toplevel" insn chain. */
+ emit_note (NOTE_INSN_DELETED);
+ rs6000_emit_prologue ();
+ emit_note (NOTE_INSN_DELETED);
+
+ /* Expand INSN_ADDRESSES so final() doesn't crash. */
+ {
+ rtx insn;
+ unsigned addr = 0;
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ {
+ INSN_ADDRESSES_NEW (insn, addr);
+ addr += 4;
+ }
+ }
+
+ if (TARGET_DEBUG_STACK)
+ debug_rtx_list (get_insns (), 100);
+ final (get_insns (), file, FALSE);
+ end_sequence ();
+ }
+
+ rs6000_pic_labelno++;
+}
+
+/* Emit function epilogue as insns.
+
+ At present, dwarf2out_frame_debug_expr doesn't understand
+ register restores, so we don't bother setting RTX_FRAME_RELATED_P
+ anywhere in the epilogue. Most of the insns below would in any case
+ need special notes to explain where r11 is in relation to the stack. */
+
+void
+rs6000_emit_epilogue (int sibcall)
+{
+ rs6000_stack_t *info;
+ int restoring_FPRs_inline;
+ int using_load_multiple;
+ int using_mfcr_multiple;
+ int use_backchain_to_restore_sp;
+ int sp_offset = 0;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
+ rtx frame_reg_rtx = sp_reg_rtx;
+ enum machine_mode reg_mode = Pmode;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ int i;
+
+ /* APPLE LOCAL begin CW asm block */
+ if (cfun->iasm_asm_function && cfun->iasm_frame_size == -2)
+ {
+
+ rtvec p = rtvec_alloc (2);
+
+ RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ return;
+ }
+ /* APPLE LOCAL end CW asm block */
+
+ info = rs6000_stack_info ();
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ reg_mode = V2SImode;
+ reg_size = 8;
+ }
+
+ using_load_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
+ && (!TARGET_SPE_ABI
+ || info->spe_64bit_regs_used == 0)
+ && info->first_gp_reg_save < 31
+ && no_global_regs_above (info->first_gp_reg_save));
+ restoring_FPRs_inline = (sibcall
+ || current_function_calls_eh_return
+ || info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save));
+ use_backchain_to_restore_sp = (frame_pointer_needed
+ || current_function_calls_alloca
+ || info->total_size > 32767);
+ using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
+ || rs6000_cpu == PROCESSOR_PPC603
+ || rs6000_cpu == PROCESSOR_PPC750
+ /* APPLE LOCAL ? */
+ || rs6000_cpu == PROCESSOR_PPC7400
+ || optimize_size);
+
+ if (WORLD_SAVE_P (info))
+ {
+ int i, j;
+ char rname[30];
+ const char *alloc_rname;
+ rtvec p;
+
+ /* eh_rest_world_r10 will return to the location saved in the LR
+ stack slot (which is not likely to be our caller.)
+ Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
+ rest_world is similar, except any R10 parameter is ignored.
+ The exception-handling stuff that was here in 2.95 is no
+ longer necessary. */
+
+ p = rtvec_alloc (9
+ + 1
+ + 32 - info->first_gp_reg_save
+ + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ + 63 + 1 - info->first_fp_reg_save);
+
+ strcpy (rname, ((current_function_calls_eh_return) ?
+ "*eh_rest_world_r10" : "*rest_world"));
+ alloc_rname = ggc_strdup (rname);
+
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
+ /* The instruction pattern requires a clobber here;
+ it is shared with the restVEC helper. */
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + 16 * i));
+ rtx mem = gen_frame_mem (V4SImode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_fp_reg_save + i <= 63; i++)
+ {
+ rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + 8 * i));
+ rtx mem = gen_frame_mem (DFmode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+
+ return;
+ }
+
+ /* APPLE LOCAL begin mainline */
+ /* Set sp_offset based on the stack push from the prologue. */
+ /* APPLE LOCAL begin 5774356 */
+ if (info->push_p
+ && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN || current_function_calls_eh_return)
+ /* APPLE LOCAL end 5664356 */
+ && info->total_size < 32767)
+ sp_offset = info->total_size;
+
+ /* Restore AltiVec registers if needed. */
+ if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ {
+ int i;
+
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx addr, areg, mem;
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn
+ (areg, GEN_INT (info->altivec_save_offset
+ + sp_offset
+ + 16 * (i - info->first_altivec_reg_save)));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
+ mem = gen_frame_mem (V4SImode, addr);
+
+ emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
+ }
+ }
+
+ /* Restore VRSAVE if needed. */
+ if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
+ && info->vrsave_mask != 0)
+ {
+ rtx addr, mem, reg;
+
+ /* APPLE LOCAL begin 5774356 */
+ gcc_assert (debug_sp_offset == sp_offset);
+ gcc_assert (debug_vrsave_offset == (info->vrsave_save_offset + sp_offset));
+ /* APPLE LOCAL end 5774356 */
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->vrsave_save_offset + sp_offset));
+ mem = gen_frame_mem (SImode, addr);
+ reg = gen_rtx_REG (SImode, 12);
+ emit_move_insn (reg, mem);
+
+ emit_insn (generate_set_vrsave (reg, info, 1));
+ }
+
+ sp_offset = 0;
+
+ /* If we have a frame pointer, a call to alloca, or a large stack
+ frame, restore the old stack pointer using the backchain. Otherwise,
+ we know what size to update it with. */
+ if (use_backchain_to_restore_sp)
+ {
+ /* Under V.4, don't reset the stack pointer until after we're done
+ loading the saved registers. */
+ if (DEFAULT_ABI == ABI_V4)
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+
+ emit_move_insn (frame_reg_rtx,
+ gen_rtx_MEM (Pmode, sp_reg_rtx));
+ }
+ else if (info->push_p)
+ {
+ if (DEFAULT_ABI == ABI_V4
+ || current_function_calls_eh_return)
+ sp_offset = info->total_size;
+ else
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (info->total_size))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (info->total_size)));
+ }
+ }
+
+ /* APPLE LOCAL end mainline */
+ /* Get the old lr if we saved it. */
+ if (info->lr_save_p)
+ {
+ rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
+ info->lr_save_offset + sp_offset);
+
+ emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
+ }
+
+ /* Get the old cr if we saved it. */
+ if (info->cr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_frame_mem (SImode, addr);
+
+ /* APPLE LOCAL begin use R11 because of ObjC use of R12 in sibcall to CTR */
+ emit_move_insn (gen_rtx_REG (SImode,
+ DEFAULT_ABI == ABI_DARWIN ? 11 : 12), mem);
+ /* APPLE LOCAL end use R11 because of ObjC use of R12 in sibcall to CTR */
+ }
+
+ /* Set LR here to try to overlap restores below. */
+ if (info->lr_save_p)
+ emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM),
+ gen_rtx_REG (Pmode, 0));
+
+ /* Load exception handler data registers, if needed. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i, regno;
+
+ if (TARGET_AIX)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (sp_offset + 5 * reg_size));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
+ }
+
+ for (i = 0; ; ++i)
+ {
+ rtx mem;
+
+ regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i);
+
+ emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
+ }
+ }
+
+ /* Restore GPRs. This is done as a PARALLEL if we are using
+ the load-multiple instructions. */
+ if (using_load_multiple)
+ {
+ rtvec p;
+ p = rtvec_alloc (32 - info->first_gp_reg_save);
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, i) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ mem);
+ }
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ }
+ else
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if ((regs_ever_live[info->first_gp_reg_save + i]
+ && (!call_used_regs[info->first_gp_reg_save + i]
+ || (i + info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && TARGET_TOC && TARGET_MINIMAL_TOC)))
+ || (i + info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
+ /* APPLE LOCAL begin darwin native */
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && ((current_function_uses_pic_offset_table
+ && cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM)
+ || cfun->machine->ra_needs_full_frame)))))
+
+ /* APPLE LOCAL end darwin native */
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ /* Restore 64-bit quantities for SPE. */
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ int offset = info->spe_gp_save_offset + sp_offset + 8 * i;
+ rtx b;
+
+ if (!SPE_CONST_OFFSET_OK (offset))
+ {
+ b = gen_rtx_REG (Pmode, FIXED_SCRATCH);
+ emit_move_insn (b, GEN_INT (offset));
+ }
+ else
+ b = GEN_INT (offset);
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, b);
+ mem = gen_frame_mem (V2SImode, addr);
+ }
+
+ emit_move_insn (gen_rtx_REG (reg_mode,
+ info->first_gp_reg_save + i), mem);
+ }
+
+ /* Restore fpr's if we need to do it without calling a function. */
+ if (restoring_FPRs_inline)
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ if ((regs_ever_live[info->first_fp_reg_save+i]
+ && ! call_used_regs[info->first_fp_reg_save+i]))
+ {
+ rtx addr, mem;
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset
+ + 8 * i));
+ mem = gen_frame_mem (DFmode, addr);
+
+ emit_move_insn (gen_rtx_REG (DFmode,
+ info->first_fp_reg_save + i),
+ mem);
+ }
+
+ /* If we saved cr, restore it here. Just those that were used. */
+ if (info->cr_save_p)
+ {
+ /* APPLE LOCAL use R11 because of ObjC use of R12 in sibcall to CTR */
+ /* APPLE LOCAL silly name retained to minimize deviation from FSF */
+ rtx r12_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 11 : 12);
+ int count = 0;
+
+ if (using_mfcr_multiple)
+ {
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
+ count++;
+ gcc_assert (count);
+ }
+
+ if (using_mfcr_multiple && count > 1)
+ {
+ rtvec p;
+ int ndx;
+
+ p = rtvec_alloc (count);
+
+ ndx = 0;
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
+ {
+ rtvec r = rtvec_alloc (2);
+ RTVEC_ELT (r, 0) = r12_rtx;
+ RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
+ RTVEC_ELT (p, ndx) =
+ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
+ gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
+ ndx++;
+ }
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ gcc_assert (ndx == count);
+ }
+ else
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
+ {
+ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
+ CR0_REGNO+i),
+ r12_rtx));
+ }
+ }
+
+ /* If this is V.4, unwind the stack pointer after all of the loads
+ have been done. */
+ if (frame_reg_rtx != sp_reg_rtx)
+ {
+ /* This blockage is needed so that sched doesn't decide to move
+ the sp change before the register restores. */
+ rs6000_emit_stack_tie ();
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
+ }
+ else if (sp_offset != 0)
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset)));
+
+ if (current_function_calls_eh_return)
+ {
+ rtx sa = EH_RETURN_STACKADJ_RTX;
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx, sa)
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx, sa));
+ }
+
+ if (!sibcall)
+ {
+ rtvec p;
+ if (! restoring_FPRs_inline)
+ p = rtvec_alloc (3 + 64 - info->first_fp_reg_save);
+ else
+ p = rtvec_alloc (2);
+
+ RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+
+ /* If we have to restore more than two FP registers, branch to the
+ restore function. It will return to our caller. */
+ if (! restoring_FPRs_inline)
+ {
+ int i;
+ char rname[30];
+ const char *alloc_rname;
+
+ /* APPLE LOCAL begin Reduce code size / improve performance */
+#if TARGET_MACHO
+ /* We have to calculate the offset into RESTFP to where we must
+ call (!!) RESTFP also restores the caller's LR from 8(R1).
+ RESTFP should *never* be called to restore only F31. */
+
+ if (info->lr_save_offset != (POINTER_SIZE / 4) || info->first_fp_reg_save == 63)
+ abort ();
+
+ sprintf (rname, "*restFP%s%.0d ; restore f%d-f31",
+ (info->first_fp_reg_save - 32 == 14 ? "" : "+"),
+ (info->first_fp_reg_save - 46) * 4,
+ info->first_fp_reg_save - 32);
+#else
+ /* APPLE LOCAL end Reduce code size / improve performance */
+ sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
+ info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
+ /* APPLE LOCAL Reduce code size / improve performance */
+#endif /* TARGET_MACHO */
+ alloc_rname = ggc_strdup (rname);
+ RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ alloc_rname));
+
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx addr, mem;
+ addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
+ GEN_INT (info->fp_save_offset + 8*i));
+ mem = gen_frame_mem (DFmode, addr);
+
+ RTVEC_ELT (p, i+3) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
+ mem);
+ }
+ }
+
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ }
+}
+
+/* Write function epilogue. */
+
+static void
+rs6000_output_function_epilogue (FILE *file,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ if (! HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ /* If the last insn was a BARRIER, we don't have to write anything except
+ the trace table. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn == 0 || GET_CODE (insn) != BARRIER)
+ {
+ /* This is slightly ugly, but at least we don't have two
+ copies of the epilogue-emitting code. */
+ start_sequence ();
+
+ /* A NOTE_INSN_DELETED is supposed to be at the start
+ and end of the "toplevel" insn chain. */
+ emit_note (NOTE_INSN_DELETED);
+ rs6000_emit_epilogue (FALSE);
+ emit_note (NOTE_INSN_DELETED);
+
+ /* Expand INSN_ADDRESSES so final() doesn't crash. */
+ {
+ rtx insn;
+ unsigned addr = 0;
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ {
+ INSN_ADDRESSES_NEW (insn, addr);
+ addr += 4;
+ }
+ }
+
+ if (TARGET_DEBUG_STACK)
+ debug_rtx_list (get_insns (), 100);
+ final (get_insns (), file, FALSE);
+ end_sequence ();
+ }
+ }
+
+#if TARGET_MACHO
+ macho_branch_islands ();
+ /* Mach-O doesn't support labels at the end of objects, so if
+ it looks like we might want one, insert a NOP. */
+ {
+ rtx insn = get_last_insn ();
+ while (insn
+ && NOTE_P (insn)
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
+ insn = PREV_INSN (insn);
+ if (insn
+ && (LABEL_P (insn)
+ || (NOTE_P (insn)
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
+ fputs ("\tnop\n", file);
+ }
+#endif
+
+ /* Output a traceback table here. See /usr/include/sys/debug.h for info
+ on its format.
+
+ We don't output a traceback table if -finhibit-size-directive was
+ used. The documentation for -finhibit-size-directive reads
+ ``don't output a @code{.size} assembler directive, or anything
+ else that would cause trouble if the function is split in the
+ middle, and the two halves are placed at locations far apart in
+ memory.'' The traceback table has this property, since it
+ includes the offset from the start of the function to the
+ traceback table itself.
+
+ System V.4 Powerpc's (and the embedded ABI derived from it) use a
+ different traceback table. */
+ if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
+ && rs6000_traceback != traceback_none && !current_function_is_thunk)
+ {
+ const char *fname = NULL;
+ const char *language_string = lang_hooks.name;
+ int fixed_parms = 0, float_parms = 0, parm_info = 0;
+ int i;
+ int optional_tbtab;
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (rs6000_traceback == traceback_full)
+ optional_tbtab = 1;
+ else if (rs6000_traceback == traceback_part)
+ optional_tbtab = 0;
+ else
+ optional_tbtab = !optimize_size && !TARGET_ELF;
+
+ if (optional_tbtab)
+ {
+ fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ while (*fname == '.') /* V.4 encodes . in the name */
+ fname++;
+
+ /* Need label immediately before tbtab, so we can compute
+ its offset from the function start. */
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
+ ASM_OUTPUT_LABEL (file, fname);
+ }
+
+ /* The .tbtab pseudo-op can only be used for the first eight
+ expressions, since it can't handle the possibly variable
+ length fields that follow. However, if you omit the optional
+ fields, the assembler outputs zeros for all optional fields
+ anyways, giving each variable length field is minimum length
+ (as defined in sys/debug.h). Thus we can not use the .tbtab
+ pseudo-op at all. */
+
+ /* An all-zero word flags the start of the tbtab, for debuggers
+ that have to find it by searching forward from the entry
+ point or from the current pc. */
+ fputs ("\t.long 0\n", file);
+
+ /* Tbtab format type. Use format type 0. */
+ fputs ("\t.byte 0,", file);
+
+ /* Language type. Unfortunately, there does not seem to be any
+ official way to discover the language being compiled, so we
+ use language_string.
+ C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
+ Java is 13. Objective-C is 14. Objective-C++ isn't assigned
+ a number, so for now use 9. */
+ if (! strcmp (language_string, "GNU C"))
+ i = 0;
+ else if (! strcmp (language_string, "GNU F77")
+ || ! strcmp (language_string, "GNU F95"))
+ i = 1;
+ else if (! strcmp (language_string, "GNU Pascal"))
+ i = 2;
+ else if (! strcmp (language_string, "GNU Ada"))
+ i = 3;
+ else if (! strcmp (language_string, "GNU C++")
+ || ! strcmp (language_string, "GNU Objective-C++"))
+ i = 9;
+ else if (! strcmp (language_string, "GNU Java"))
+ i = 13;
+ else if (! strcmp (language_string, "GNU Objective-C"))
+ i = 14;
+ else
+ gcc_unreachable ();
+ fprintf (file, "%d,", i);
+
+ /* 8 single bit fields: global linkage (not set for C extern linkage,
+ apparently a PL/I convention?), out-of-line epilogue/prologue, offset
+ from start of procedure stored in tbtab, internal function, function
+ has controlled storage, function has no toc, function uses fp,
+ function logs/aborts fp operations. */
+ /* Assume that fp operations are used if any fp reg must be saved. */
+ fprintf (file, "%d,",
+ (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
+
+ /* 6 bitfields: function is interrupt handler, name present in
+ proc table, function calls alloca, on condition directives
+ (controls stack walks, 3 bits), saves condition reg, saves
+ link reg. */
+ /* The `function calls alloca' bit seems to be set whenever reg 31 is
+ set up as a frame pointer, even when there is no alloca call. */
+ fprintf (file, "%d,",
+ ((optional_tbtab << 6)
+ | ((optional_tbtab & frame_pointer_needed) << 5)
+ | (info->cr_save_p << 1)
+ | (info->lr_save_p)));
+
+ /* 3 bitfields: saves backchain, fixup code, number of fpr saved
+ (6 bits). */
+ fprintf (file, "%d,",
+ (info->push_p << 7) | (64 - info->first_fp_reg_save));
+
+ /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
+ fprintf (file, "%d,", (32 - first_reg_to_save ()));
+
+ if (optional_tbtab)
+ {
+ /* Compute the parameter info from the function decl argument
+ list. */
+ tree decl;
+ int next_parm_info_bit = 31;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ {
+ rtx parameter = DECL_INCOMING_RTL (decl);
+ enum machine_mode mode = GET_MODE (parameter);
+
+ if (GET_CODE (parameter) == REG)
+ {
+ if (SCALAR_FLOAT_MODE_P (mode))
+ {
+ int bits;
+
+ float_parms++;
+
+ switch (mode)
+ {
+ case SFmode:
+ bits = 0x2;
+ break;
+
+ case DFmode:
+ case TFmode:
+ bits = 0x3;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If only one bit will fit, don't or in this entry. */
+ if (next_parm_info_bit > 0)
+ parm_info |= (bits << (next_parm_info_bit - 1));
+ next_parm_info_bit -= 2;
+ }
+ else
+ {
+ fixed_parms += ((GET_MODE_SIZE (mode)
+ + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD);
+ next_parm_info_bit -= 1;
+ }
+ }
+ }
+ }
+
+ /* Number of fixed point parameters. */
+ /* This is actually the number of words of fixed point parameters; thus
+ an 8 byte struct counts as 2; and thus the maximum value is 8. */
+ fprintf (file, "%d,", fixed_parms);
+
+ /* 2 bitfields: number of floating point parameters (7 bits), parameters
+ all on stack. */
+ /* This is actually the number of fp registers that hold parameters;
+ and thus the maximum value is 13. */
+ /* Set parameters on stack bit if parameters are not in their original
+ registers, regardless of whether they are on the stack? Xlc
+ seems to set the bit when not optimizing. */
+ fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
+
+ if (! optional_tbtab)
+ return;
+
+ /* Optional fields follow. Some are variable length. */
+
+ /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
+ 11 double float. */
+ /* There is an entry for each parameter in a register, in the order that
+ they occur in the parameter list. Any intervening arguments on the
+ stack are ignored. If the list overflows a long (max possible length
+ 34 bits) then completely leave off all elements that don't fit. */
+ /* Only emit this long if there was at least one parameter. */
+ if (fixed_parms || float_parms)
+ fprintf (file, "\t.long %d\n", parm_info);
+
+ /* Offset from start of code to tb table. */
+ fputs ("\t.long ", file);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+ putc ('-', file);
+ rs6000_output_function_entry (file, fname);
+ putc ('\n', file);
+
+ /* Interrupt handler mask. */
+ /* Omit this long, since we never set the interrupt handler bit
+ above. */
+
+ /* Number of CTL (controlled storage) anchors. */
+ /* Omit this long, since the has_ctl bit is never set above. */
+
+ /* Displacement into stack of each CTL anchor. */
+ /* Omit this list of longs, because there are no CTL anchors. */
+
+ /* Length of function name. */
+ if (*fname == '*')
+ ++fname;
+ fprintf (file, "\t.short %d\n", (int) strlen (fname));
+
+ /* Function name. */
+ assemble_string (fname, strlen (fname));
+
+ /* Register for alloca automatic storage; this is always reg 31.
+ Only emit this if the alloca bit was set above. */
+ if (frame_pointer_needed)
+ fputs ("\t.byte 31\n", file);
+
+ fputs ("\t.align 2\n", file);
+ }
+}
+
+/* A C compound statement that outputs the assembler code for a thunk
+ function, used to implement C++ virtual function calls with
+ multiple inheritance. The thunk acts as a wrapper around a virtual
+ function, adjusting the implicit object parameter before handing
+ control off to the real function.
+
+ First, emit code to add the integer DELTA to the location that
+ contains the incoming first argument. Assume that this argument
+ contains a pointer, and is the one used to pass the `this' pointer
+ in C++. This is the incoming argument *before* the function
+ prologue, e.g. `%o0' on a sparc. The addition must preserve the
+ values of all other incoming arguments.
+
+ After the addition, emit code to jump to FUNCTION, which is a
+ `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
+ not touch the return address. Hence returning from FUNCTION will
+ return to whoever called the current `thunk'.
+
+ The effect must be as if FUNCTION had been called directly with the
+ adjusted first argument. This macro is responsible for emitting
+ all of the code for a thunk function; output_function_prologue()
+ and output_function_epilogue() are not invoked.
+
+ The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
+ been extracted from it.) It might possibly be useful on some
+ targets, but probably not.
+
+ If you do not define this macro, the target-independent code in the
+ C++ frontend will generate a less efficient heavyweight thunk that
+ calls FUNCTION instead of jumping to it. The generic approach does
+ not support varargs. */
+
+static void
+rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ rtx this, insn, funexp;
+ /* APPLE LOCAL begin 4299630 */
+ bool is_longcall_p;
+ rtx symbol_ref;
+ /* APPLE LOCAL end 4299630 */
+
+ reload_completed = 1;
+ epilogue_completed = 1;
+ no_new_pseudos = 1;
+ reset_block_changes ();
+
+ /* Mark the end of the (empty) prologue. */
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ /* Find the "this" pointer. If the function returns a structure,
+ the structure return pointer is in r3. */
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this = gen_rtx_REG (Pmode, 4);
+ else
+ this = gen_rtx_REG (Pmode, 3);
+
+ /* Apply the constant offset, if required. */
+ if (delta)
+ {
+ rtx delta_rtx = GEN_INT (delta);
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (this, this, delta_rtx)
+ : gen_adddi3 (this, this, delta_rtx));
+ }
+
+ /* Apply the offset from the vtable, if required. */
+ if (vcall_offset)
+ {
+ rtx vcall_offset_rtx = GEN_INT (vcall_offset);
+ rtx tmp = gen_rtx_REG (Pmode, 12);
+
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
+ if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (tmp, tmp, vcall_offset_rtx)
+ : gen_adddi3 (tmp, tmp, vcall_offset_rtx));
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
+ }
+ else
+ {
+ rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
+
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
+ }
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (this, this, tmp)
+ : gen_adddi3 (this, this, tmp));
+ }
+
+ /* Generate a tail call to the target function. */
+ if (!TREE_USED (function))
+ {
+ assemble_external (function);
+ TREE_USED (function) = 1;
+ }
+ funexp = XEXP (DECL_RTL (function), 0);
+ /* APPLE LOCAL 4299630 */
+ symbol_ref = funexp;
+ funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
+
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ funexp = machopic_indirect_call_target (funexp);
+#endif
+
+ /* APPLE LOCAL begin 4299630 */
+ if (DEFAULT_ABI == ABI_DARWIN
+ || (*targetm.binds_local_p) (function))
+ {
+ tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (function));
+ if (lookup_attribute ("shortcall", attr_list))
+ is_longcall_p = FALSE;
+ else if (lookup_attribute ("longcall", attr_list))
+ is_longcall_p = TRUE;
+ else
+ is_longcall_p = (rs6000_default_long_calls);
+ }
+ if (!is_longcall_p)
+ {
+ /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
+ generate sibcall RTL explicitly. */
+ insn = emit_call_insn (
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (4,
+ gen_rtx_CALL (VOIDmode,
+ funexp, const0_rtx),
+ gen_rtx_USE (VOIDmode, const0_rtx),
+ gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (SImode,
+ LINK_REGISTER_REGNUM)),
+ gen_rtx_RETURN (VOIDmode))));
+ SIBLING_CALL_P (insn) = 1;
+ }
+ else
+ {
+ /* APPLE LOCAL begin 4380289 */
+ tree label_decl;
+ int line_number = 0;
+ /* APPLE LOCAL end 4380289 */
+ /* APPLE LOCAL begin 3910248, 3915171 */
+ for (insn = get_last_insn ();
+ insn && (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) < 0);
+ insn = PREV_INSN (insn))
+ ;
+ /* APPLE LOCAL end 3910248, 3915171 */
+ if (insn)
+ line_number = NOTE_LINE_NUMBER (insn);
+ /* APPLE LOCAL begin 4380289 */
+ /* This JMP is in a coalesced section, and Mach-O forbids us to
+ directly reference anything else in a coalesced section; if
+ our target gets coalesced away, the linker (static or
+ dynamic) won't know where to send our JMP. Ergo, force a
+ stub. */
+ label_decl = add_compiler_branch_island (function, line_number);
+ /* Emit "jmp <function>, L42", and define L42 as a branch island. */
+ insn = emit_jump_insn (gen_longjump (label_rtx (label_decl),
+ XEXP (DECL_RTL (function), 0)));
+ /* APPLE LOCAL end 4380289 */
+ }
+ /* APPLE LOCAL end 4299630 */
+ emit_barrier ();
+
+ /* Run just enough of rest_of_compilation to get the insns emitted.
+ There's not really enough bulk here to make other passes such as
+ instruction scheduling worth while. Note that use_thunk calls
+ assemble_start_function and assemble_end_function. */
+ insn = get_insns ();
+ insn_locators_initialize ();
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+
+ reload_completed = 0;
+ epilogue_completed = 0;
+ no_new_pseudos = 0;
+}
+
+/* A quick summary of the various types of 'constant-pool tables'
+ under PowerPC:
+
+ Target Flags Name One table per
+ AIX (none) AIX TOC object file
+ AIX -mfull-toc AIX TOC object file
+ AIX -mminimal-toc AIX minimal TOC translation unit
+ SVR4/EABI (none) SVR4 SDATA object file
+ SVR4/EABI -fpic SVR4 pic object file
+ SVR4/EABI -fPIC SVR4 PIC translation unit
+ SVR4/EABI -mrelocatable EABI TOC function
+ SVR4/EABI -maix AIX TOC object file
+ SVR4/EABI -maix -mminimal-toc
+ AIX minimal TOC translation unit
+
+ Name Reg. Set by entries contains:
+ made by addrs? fp? sum?
+
+ AIX TOC 2 crt0 as Y option option
+ AIX minimal TOC 30 prolog gcc Y Y option
+ SVR4 SDATA 13 crt0 gcc N Y N
+ SVR4 pic 30 prolog ld Y not yet N
+ SVR4 PIC 30 prolog gcc Y option option
+ EABI TOC 30 prolog gcc Y option option
+
+*/
+
+/* Hash functions for the hash table. */
+
+static unsigned
+rs6000_hash_constant (rtx k)
+{
+ enum rtx_code code = GET_CODE (k);
+ enum machine_mode mode = GET_MODE (k);
+ unsigned result = (code << 3) ^ mode;
+ const char *format;
+ int flen, fidx;
+
+ format = GET_RTX_FORMAT (code);
+ flen = strlen (format);
+ fidx = 0;
+
+ switch (code)
+ {
+ case LABEL_REF:
+ return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
+
+ case CONST_DOUBLE:
+ if (mode != VOIDmode)
+ return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
+ flen = 2;
+ break;
+
+ case CODE_LABEL:
+ fidx = 3;
+ break;
+
+ default:
+ break;
+ }
+
+ for (; fidx < flen; fidx++)
+ switch (format[fidx])
+ {
+ case 's':
+ {
+ unsigned i, len;
+ const char *str = XSTR (k, fidx);
+ len = strlen (str);
+ result = result * 613 + len;
+ for (i = 0; i < len; i++)
+ result = result * 613 + (unsigned) str[i];
+ break;
+ }
+ case 'u':
+ case 'e':
+ result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
+ break;
+ case 'i':
+ case 'n':
+ result = result * 613 + (unsigned) XINT (k, fidx);
+ break;
+ case 'w':
+ if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
+ result = result * 613 + (unsigned) XWINT (k, fidx);
+ else
+ {
+ size_t i;
+ for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
+ result = result * 613 + (unsigned) (XWINT (k, fidx)
+ >> CHAR_BIT * i);
+ }
+ break;
+ case '0':
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return result;
+}
+
+static unsigned
+toc_hash_function (const void *hash_entry)
+{
+ const struct toc_hash_struct *thc =
+ (const struct toc_hash_struct *) hash_entry;
+ return rs6000_hash_constant (thc->key) ^ thc->key_mode;
+}
+
+/* Compare H1 and H2 for equivalence. */
+
+static int
+toc_hash_eq (const void *h1, const void *h2)
+{
+ rtx r1 = ((const struct toc_hash_struct *) h1)->key;
+ rtx r2 = ((const struct toc_hash_struct *) h2)->key;
+
+ if (((const struct toc_hash_struct *) h1)->key_mode
+ != ((const struct toc_hash_struct *) h2)->key_mode)
+ return 0;
+
+ return rtx_equal_p (r1, r2);
+}
+
+/* These are the names given by the C++ front-end to vtables, and
+ vtable-like objects. Ideally, this logic should not be here;
+ instead, there should be some programmatic way of inquiring as
+ to whether or not an object is a vtable. */
+
+#define VTABLE_NAME_P(NAME) \
+ (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
+ || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
+ || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
+ || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
+ || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
+
+void
+rs6000_output_symbol_ref (FILE *file, rtx x)
+{
+ /* Currently C++ toc references to vtables can be emitted before it
+ is decided whether the vtable is public or private. If this is
+ the case, then the linker will eventually complain that there is
+ a reference to an unknown section. Thus, for vtables only,
+ we emit the TOC reference to reference the symbol and not the
+ section. */
+ const char *name = XSTR (x, 0);
+
+ if (VTABLE_NAME_P (name))
+ {
+ RS6000_OUTPUT_BASENAME (file, name);
+ }
+ else
+ assemble_name (file, name);
+}
+
+/* Output a TOC entry. We derive the entry name from what is being
+ written. */
+
+void
+output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
+{
+ char buf[256];
+ const char *name = buf;
+ const char *real_name;
+ rtx base = x;
+ HOST_WIDE_INT offset = 0;
+
+ gcc_assert (!TARGET_NO_TOC);
+
+ /* When the linker won't eliminate them, don't output duplicate
+ TOC entries (this happens on AIX if there is any kind of TOC,
+ and on SVR4 under -fPIC or -mrelocatable). Don't do this for
+ CODE_LABELs. */
+ if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
+ {
+ struct toc_hash_struct *h;
+ void * * found;
+
+ /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
+ time because GGC is not initialized at that point. */
+ if (toc_hash_table == NULL)
+ toc_hash_table = htab_create_ggc (1021, toc_hash_function,
+ toc_hash_eq, NULL);
+
+ h = ggc_alloc (sizeof (*h));
+ h->key = x;
+ h->key_mode = mode;
+ h->labelno = labelno;
+
+ found = htab_find_slot (toc_hash_table, h, 1);
+ if (*found == NULL)
+ *found = h;
+ else /* This is indeed a duplicate.
+ Set this label equal to that label. */
+ {
+ fputs ("\t.set ", file);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
+ fprintf (file, "%d,", labelno);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
+ fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
+ found)->labelno));
+ return;
+ }
+ }
+
+ /* If we're going to put a double constant in the TOC, make sure it's
+ aligned properly when strict alignment is on. */
+ if (GET_CODE (x) == CONST_DOUBLE
+ && STRICT_ALIGNMENT
+ && GET_MODE_BITSIZE (mode) >= 64
+ && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
+ ASM_OUTPUT_ALIGN (file, 3);
+ }
+
+ (*targetm.asm_out.internal_label) (file, "LC", labelno);
+
+ /* Handle FP constants specially. Note that if we have a minimal
+ TOC, things we put here aren't actually in the TOC, so we can allow
+ FP constants. */
+ if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
+ {
+ REAL_VALUE_TYPE rv;
+ long k[4];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ return;
+ }
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
+ {
+ REAL_VALUE_TYPE rv;
+ long k[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FD_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ fprintf (file, "0x%lx%08lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FD_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ fprintf (file, "0x%lx,0x%lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ return;
+ }
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
+ {
+ REAL_VALUE_TYPE rv;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
+ else
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
+ fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
+ fprintf (file, "0x%lx\n", l & 0xffffffff);
+ return;
+ }
+ }
+ else if (GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
+ {
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
+
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ low = CONST_DOUBLE_LOW (x);
+ high = CONST_DOUBLE_HIGH (x);
+ }
+ else
+#if HOST_BITS_PER_WIDE_INT == 32
+ {
+ low = INTVAL (x);
+ high = (low & 0x80000000) ? ~0 : 0;
+ }
+#else
+ {
+ low = INTVAL (x) & 0xffffffff;
+ high = (HOST_WIDE_INT) INTVAL (x) >> 32;
+ }
+#endif
+
+ /* TOC entries are always Pmode-sized, but since this
+ is a bigendian machine then if we're putting smaller
+ integer constants in the TOC we have to pad them.
+ (This is still a win over putting the constants in
+ a separate constant pool, because then we'd have
+ to have both a TOC entry _and_ the actual constant.)
+
+ For a 32-bit target, CONST_INT values are loaded and shifted
+ entirely within `low' and can be stored in one TOC entry. */
+
+ /* It would be easy to make this work, but it doesn't now. */
+ gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
+
+ if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
+ {
+#if HOST_BITS_PER_WIDE_INT == 32
+ lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
+ POINTER_SIZE, &low, &high, 0);
+#else
+ low |= high << 32;
+ low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
+ high = (HOST_WIDE_INT) low >> 32;
+ low &= 0xffffffff;
+#endif
+ }
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ fprintf (file, "0x%lx%08lx\n",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ fprintf (file, "0x%lx,0x%lx\n",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
+ fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
+ }
+ return;
+ }
+ }
+
+ if (GET_CODE (x) == CONST)
+ {
+ gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS);
+
+ base = XEXP (XEXP (x, 0), 0);
+ offset = INTVAL (XEXP (XEXP (x, 0), 1));
+ }
+
+ switch (GET_CODE (base))
+ {
+ case SYMBOL_REF:
+ name = XSTR (base, 0);
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L",
+ CODE_LABEL_NUMBER (XEXP (base, 0)));
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ real_name = (*targetm.strip_name_encoding) (name);
+ if (TARGET_MINIMAL_TOC)
+ fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
+ else
+ {
+ fprintf (file, "\t.tc %s", real_name);
+
+ if (offset < 0)
+ fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
+ else if (offset)
+ fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
+
+ fputs ("[TC],", file);
+ }
+
+ /* Currently C++ toc references to vtables can be emitted before it
+ is decided whether the vtable is public or private. If this is
+ the case, then the linker will eventually complain that there is
+ a TOC reference to an unknown section. Thus, for vtables only,
+ we emit the TOC reference to reference the symbol and not the
+ section. */
+ if (VTABLE_NAME_P (name))
+ {
+ RS6000_OUTPUT_BASENAME (file, name);
+ if (offset < 0)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
+ else if (offset > 0)
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
+ }
+ else
+ output_addr_const (file, x);
+ putc ('\n', file);
+}
+
+/* Output an assembler pseudo-op to write an ASCII string of N characters
+ starting at P to FILE.
+
+ On the RS/6000, we have to do this using the .byte operation and
+ write out special characters outside the quoted string.
+ Also, the assembler is broken; very long strings are truncated,
+ so we must artificially break them up early. */
+
+void
+output_ascii (FILE *file, const char *p, int n)
+{
+ char c;
+ int i, count_string;
+ const char *for_string = "\t.byte \"";
+ const char *for_decimal = "\t.byte ";
+ const char *to_close = NULL;
+
+ count_string = 0;
+ for (i = 0; i < n; i++)
+ {
+ c = *p++;
+ if (c >= ' ' && c < 0177)
+ {
+ if (for_string)
+ fputs (for_string, file);
+ putc (c, file);
+
+ /* Write two quotes to get one. */
+ if (c == '"')
+ {
+ putc (c, file);
+ ++count_string;
+ }
+
+ for_string = NULL;
+ for_decimal = "\"\n\t.byte ";
+ to_close = "\"\n";
+ ++count_string;
+
+ if (count_string >= 512)
+ {
+ fputs (to_close, file);
+
+ for_string = "\t.byte \"";
+ for_decimal = "\t.byte ";
+ to_close = NULL;
+ count_string = 0;
+ }
+ }
+ else
+ {
+ if (for_decimal)
+ fputs (for_decimal, file);
+ fprintf (file, "%d", c);
+
+ for_string = "\n\t.byte \"";
+ for_decimal = ", ";
+ to_close = "\n";
+ count_string = 0;
+ }
+ }
+
+ /* Now close the string if we have written one. Then end the line. */
+ if (to_close)
+ fputs (to_close, file);
+}
+
+/* Generate a unique section name for FILENAME for a section type
+ represented by SECTION_DESC. Output goes into BUF.
+
+ SECTION_DESC can be any string, as long as it is different for each
+ possible section type.
+
+ We name the section in the same manner as xlc. The name begins with an
+ underscore followed by the filename (after stripping any leading directory
+ names) with the last period replaced by the string SECTION_DESC. If
+ FILENAME does not contain a period, SECTION_DESC is appended to the end of
+ the name. */
+
+void
+rs6000_gen_section_name (char **buf, const char *filename,
+ const char *section_desc)
+{
+ const char *q, *after_last_slash, *last_period = 0;
+ char *p;
+ int len;
+
+ after_last_slash = filename;
+ for (q = filename; *q; q++)
+ {
+ if (*q == '/')
+ after_last_slash = q + 1;
+ else if (*q == '.')
+ last_period = q;
+ }
+
+ len = strlen (after_last_slash) + strlen (section_desc) + 2;
+ *buf = (char *) xmalloc (len);
+
+ p = *buf;
+ *p++ = '_';
+
+ for (q = after_last_slash; *q; q++)
+ {
+ if (q == last_period)
+ {
+ strcpy (p, section_desc);
+ p += strlen (section_desc);
+ break;
+ }
+
+ else if (ISALNUM (*q))
+ *p++ = *q;
+ }
+
+ if (last_period == 0)
+ strcpy (p, section_desc);
+ else
+ *p = '\0';
+}
+
+/* Emit profile function. */
+
+void
+output_profile_hook (int labelno ATTRIBUTE_UNUSED)
+{
+ /* Non-standard profiling for kernels, which just saves LR then calls
+ _mcount without worrying about arg saves. The idea is to change
+ the function prologue as little as possible as it isn't easy to
+ account for arg save/restore code added just for _mcount. */
+ if (TARGET_PROFILE_KERNEL)
+ return;
+
+ if (DEFAULT_ABI == ABI_AIX)
+ {
+#ifndef NO_PROFILE_COUNTERS
+# define NO_PROFILE_COUNTERS 0
+#endif
+ if (NO_PROFILE_COUNTERS)
+ emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 0);
+ else
+ {
+ char buf[30];
+ const char *label_name;
+ rtx fun;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
+ fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
+
+ emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 1,
+ fun, Pmode);
+ }
+ }
+ else if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ const char *mcount_name = RS6000_MCOUNT;
+ int caller_addr_regno = LINK_REGISTER_REGNUM;
+
+ /* Be conservative and always set this, at least for now. */
+ current_function_uses_pic_offset_table = 1;
+
+#if TARGET_MACHO
+ /* For PIC code, set up a stub and collect the caller's address
+ from r0, which is where the prologue puts it. */
+ if (MACHOPIC_INDIRECT
+ && current_function_uses_pic_offset_table)
+ caller_addr_regno = 0;
+#endif
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
+ 0, VOIDmode, 1,
+ gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
+ }
+}
+
+/* Write function profiler code. */
+
+void
+output_function_profiler (FILE *file, int labelno)
+{
+ char buf[100];
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_V4:
+ if (!TARGET_32BIT)
+ {
+ warning (0, "no profiling of 64-bit code for this ABI");
+ return;
+ }
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ fprintf (file, "\tmflr %s\n", reg_names[0]);
+ if (NO_PROFILE_COUNTERS)
+ {
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ }
+ else if (TARGET_SECURE_PLT && flag_pic)
+ {
+ asm_fprintf (file, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
+ asm_fprintf (file, "\t{cau|addis} %s,%s,",
+ reg_names[12], reg_names[12]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
+ }
+ else if (flag_pic == 1)
+ {
+ fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
+ asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "@got(%s)\n", reg_names[12]);
+ }
+ else if (flag_pic > 1)
+ {
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ /* Now, we need to get the address of the label. */
+ fputs ("\tbcl 20,31,1f\n\t.long ", file);
+ assemble_name (file, buf);
+ fputs ("-.\n1:", file);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
+ asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
+ reg_names[0], reg_names[11]);
+ asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
+ reg_names[0], reg_names[0], reg_names[11]);
+ }
+ else
+ {
+ asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
+ assemble_name (file, buf);
+ fputs ("@ha\n", file);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "@l(%s)\n", reg_names[12]);
+ }
+
+ /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
+ fprintf (file, "\tbl %s%s\n",
+ RS6000_MCOUNT, flag_pic ? "@plt" : "");
+ break;
+
+ case ABI_AIX:
+ case ABI_DARWIN:
+ if (!TARGET_PROFILE_KERNEL)
+ {
+ /* Don't do anything, done in output_profile_hook (). */
+ }
+ else
+ {
+ gcc_assert (!TARGET_32BIT);
+
+ asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
+ asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
+
+ if (cfun->static_chain_decl != NULL)
+ {
+ asm_fprintf (file, "\tstd %s,24(%s)\n",
+ reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
+ fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
+ asm_fprintf (file, "\tld %s,24(%s)\n",
+ reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
+ }
+ else
+ fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
+ }
+ break;
+ }
+}
+
+
+/* Power4 load update and store update instructions are cracked into a
+ load or store and an integer insn which are executed in the same cycle.
+ Branches have their own dispatch slot which does not count against the
+ GCC issue rate, but it changes the program flow so there are no other
+ instructions to issue in this cycle. */
+
+static int
+rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ rtx insn, int more)
+{
+ if (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return more;
+
+ if (rs6000_sched_groups)
+ {
+ if (is_microcoded_insn (insn))
+ return 0;
+ else if (is_cracked_insn (insn))
+ return more > 2 ? more - 2 : 0;
+ }
+
+ return more - 1;
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+static int
+rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ if (! recog_memoized (insn))
+ return 0;
+
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ /* Data dependency; DEP_INSN writes a register that INSN reads
+ some cycles later. */
+
+ /* Separate a load from a narrower, dependent store. */
+ if (rs6000_sched_groups
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
+ && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
+ && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
+ > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
+ return cost + 14;
+
+ switch (get_attr_type (insn))
+ {
+ case TYPE_JMPREG:
+ /* Tell the first scheduling pass about the latency between
+ a mtctr and bctr (and mtlr and br/blr). The first
+ scheduling pass will not know about this latency since
+ the mtctr instruction, which has the latency associated
+ to it, will be generated by reload. */
+ return TARGET_POWER ? 5 : 4;
+ case TYPE_BRANCH:
+ /* Leave some extra cycles between a compare and its
+ dependent branch, to inhibit expensive mispredicts. */
+ if ((rs6000_cpu_attr == CPU_PPC603
+ || rs6000_cpu_attr == CPU_PPC604
+ || rs6000_cpu_attr == CPU_PPC604E
+ || rs6000_cpu_attr == CPU_PPC620
+ || rs6000_cpu_attr == CPU_PPC630
+ || rs6000_cpu_attr == CPU_PPC750
+ || rs6000_cpu_attr == CPU_PPC7400
+ || rs6000_cpu_attr == CPU_PPC7450
+ || rs6000_cpu_attr == CPU_POWER4
+ || rs6000_cpu_attr == CPU_POWER5)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0)
+ && (get_attr_type (dep_insn) == TYPE_CMP
+ || get_attr_type (dep_insn) == TYPE_COMPARE
+ || get_attr_type (dep_insn) == TYPE_DELAYED_COMPARE
+ || get_attr_type (dep_insn) == TYPE_IMUL_COMPARE
+ || get_attr_type (dep_insn) == TYPE_LMUL_COMPARE
+ || get_attr_type (dep_insn) == TYPE_FPCOMPARE
+ || get_attr_type (dep_insn) == TYPE_CR_LOGICAL
+ || get_attr_type (dep_insn) == TYPE_DELAYED_CR))
+ return cost + 2;
+ default:
+ break;
+ }
+ /* Fall out to return default cost. */
+ }
+
+ return cost;
+}
+
+/* The function returns a true if INSN is microcoded.
+ Return false otherwise. */
+
+static bool
+is_microcoded_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ if (rs6000_sched_groups)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_LOAD_EXT_U
+ || type == TYPE_LOAD_EXT_UX
+ || type == TYPE_LOAD_UX
+ || type == TYPE_STORE_UX
+ || type == TYPE_MFCR)
+ return true;
+ }
+
+ return false;
+}
+
+/* The function returns a nonzero value if INSN can be scheduled only
+ as the first insn in a dispatch group ("dispatch-slot restricted").
+ In this case, the returned value indicates how many dispatch slots
+ the insn occupies (at the beginning of the group).
+ Return 0 otherwise. */
+
+static int
+is_dispatch_slot_restricted (rtx insn)
+{
+ enum attr_type type;
+
+ if (!rs6000_sched_groups)
+ return 0;
+
+ if (!insn
+ || insn == NULL_RTX
+ || GET_CODE (insn) == NOTE
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return 0;
+
+ type = get_attr_type (insn);
+
+ switch (type)
+ {
+ case TYPE_MFCR:
+ case TYPE_MFCRF:
+ case TYPE_MTCR:
+ case TYPE_DELAYED_CR:
+ case TYPE_CR_LOGICAL:
+ case TYPE_MTJMPR:
+ case TYPE_MFJMPR:
+ return 1;
+ case TYPE_IDIV:
+ case TYPE_LDIV:
+ return 2;
+ case TYPE_LOAD_L:
+ case TYPE_STORE_C:
+ case TYPE_ISYNC:
+ case TYPE_SYNC:
+ return 4;
+ default:
+ if (rs6000_cpu == PROCESSOR_POWER5
+ && is_cracked_insn (insn))
+ return 2;
+ return 0;
+ }
+}
+
+/* The function returns true if INSN is cracked into 2 instructions
+ by the processor (and therefore occupies 2 issue slots). */
+
+static bool
+is_cracked_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ if (rs6000_sched_groups)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_LOAD_U || type == TYPE_STORE_U
+ || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
+ || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
+ || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
+ || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
+ || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
+ || type == TYPE_IDIV || type == TYPE_LDIV
+ || type == TYPE_INSERT_WORD)
+ return true;
+ }
+
+ return false;
+}
+
+/* The function returns true if INSN can be issued only from
+ the branch slot. */
+
+static bool
+is_branch_slot_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ if (rs6000_sched_groups)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_BRANCH || type == TYPE_JMPREG)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
+/* A C statement (sans semicolon) to update the integer scheduling
+ priority INSN_PRIORITY (INSN). Increase the priority to execute the
+ INSN earlier, reduce the priority to execute INSN later. Do not
+ define this macro if you do not need to adjust the scheduling
+ priorities of insns. */
+
+static int
+rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
+{
+ /* On machines (like the 750) which have asymmetric integer units,
+ where one integer unit can do multiply and divides and the other
+ can't, reduce the priority of multiply/divide so it is scheduled
+ before other integer operations. */
+
+#if 0
+ if (! INSN_P (insn))
+ return priority;
+
+ if (GET_CODE (PATTERN (insn)) == USE)
+ return priority;
+
+ switch (rs6000_cpu_attr) {
+ case CPU_PPC750:
+ switch (get_attr_type (insn))
+ {
+ default:
+ break;
+
+ case TYPE_IMUL:
+ case TYPE_IDIV:
+ fprintf (stderr, "priority was %#x (%d) before adjustment\n",
+ priority, priority);
+ if (priority >= 0 && priority < 0x01000000)
+ priority >>= 3;
+ break;
+ }
+ }
+#endif
+
+ if (is_dispatch_slot_restricted (insn)
+ && reload_completed
+ && current_sched_info->sched_max_insns_priority
+ && rs6000_sched_restricted_insns_priority)
+ {
+
+ /* Prioritize insns that can be dispatched only in the first
+ dispatch slot. */
+ if (rs6000_sched_restricted_insns_priority == 1)
+ /* Attach highest priority to insn. This means that in
+ haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
+ precede 'priority' (critical path) considerations. */
+ return current_sched_info->sched_max_insns_priority;
+ else if (rs6000_sched_restricted_insns_priority == 2)
+ /* Increase priority of insn by a minimal amount. This means that in
+ haifa-sched.c:ready_sort(), only 'priority' (critical path)
+ considerations precede dispatch-slot restriction considerations. */
+ return (priority + 1);
+ }
+
+ return priority;
+}
+
+/* Return how many instructions the machine can issue per cycle. */
+
+static int
+rs6000_issue_rate (void)
+{
+ /* Use issue rate of 1 for first scheduling pass to decrease degradation. */
+ if (!reload_completed)
+ return 1;
+
+ switch (rs6000_cpu_attr) {
+ case CPU_RIOS1: /* ? */
+ case CPU_RS64A:
+ case CPU_PPC601: /* ? */
+ case CPU_PPC7450:
+ return 3;
+ case CPU_PPC440:
+ case CPU_PPC603:
+ case CPU_PPC750:
+ case CPU_PPC7400:
+ case CPU_PPC8540:
+ return 2;
+ case CPU_RIOS2:
+ case CPU_PPC604:
+ case CPU_PPC604E:
+ case CPU_PPC620:
+ case CPU_PPC630:
+ return 4;
+ case CPU_POWER4:
+ case CPU_POWER5:
+ return 5;
+ default:
+ return 1;
+ }
+}
+
+/* Return how many instructions to look ahead for better insn
+ scheduling. */
+
+static int
+rs6000_use_sched_lookahead (void)
+{
+ if (rs6000_cpu_attr == CPU_PPC8540)
+ return 4;
+ return 0;
+}
+
+/* Determine is PAT refers to memory. */
+
+static bool
+is_mem_ref (rtx pat)
+{
+ const char * fmt;
+ int i, j;
+ bool ret = false;
+
+ if (GET_CODE (pat) == MEM)
+ return true;
+
+ /* Recursively process the pattern. */
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
+ {
+ if (fmt[i] == 'e')
+ ret |= is_mem_ref (XEXP (pat, i));
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ ret |= is_mem_ref (XVECEXP (pat, i, j));
+ }
+
+ return ret;
+}
+
+/* Determine if PAT is a PATTERN of a load insn. */
+
+static bool
+is_load_insn1 (rtx pat)
+{
+ if (!pat || pat == NULL_RTX)
+ return false;
+
+ if (GET_CODE (pat) == SET)
+ return is_mem_ref (SET_SRC (pat));
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ if (is_load_insn1 (XVECEXP (pat, 0, i)))
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine if INSN loads from memory. */
+
+static bool
+is_load_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn))
+ return false;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ return false;
+
+ return is_load_insn1 (PATTERN (insn));
+}
+
+/* Determine if PAT is a PATTERN of a store insn. */
+
+static bool
+is_store_insn1 (rtx pat)
+{
+ if (!pat || pat == NULL_RTX)
+ return false;
+
+ if (GET_CODE (pat) == SET)
+ return is_mem_ref (SET_DEST (pat));
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ if (is_store_insn1 (XVECEXP (pat, 0, i)))
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine if INSN stores to memory. */
+
+static bool
+is_store_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn))
+ return false;
+
+ return is_store_insn1 (PATTERN (insn));
+}
+
+/* Returns whether the dependence between INSN and NEXT is considered
+ costly by the given target. */
+
+static bool
+rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost,
+ int distance)
+{
+ /* If the flag is not enabled - no dependence is considered costly;
+ allow all dependent insns in the same group.
+ This is the most aggressive option. */
+ if (rs6000_sched_costly_dep == no_dep_costly)
+ return false;
+
+ /* If the flag is set to 1 - a dependence is always considered costly;
+ do not allow dependent instructions in the same group.
+ This is the most conservative option. */
+ if (rs6000_sched_costly_dep == all_deps_costly)
+ return true;
+
+ if (rs6000_sched_costly_dep == store_to_load_dep_costly
+ && is_load_insn (next)
+ && is_store_insn (insn))
+ /* Prevent load after store in the same group. */
+ return true;
+
+ if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
+ && is_load_insn (next)
+ && is_store_insn (insn)
+ && (!link || (int) REG_NOTE_KIND (link) == 0))
+ /* Prevent load after store in the same group if it is a true
+ dependence. */
+ /* APPLE LOCAL begin nop on true-dependence. */
+ {
+ if (GET_CODE (PATTERN (next)) == SET && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx load_mem = SET_SRC (PATTERN (next));
+ rtx sto_mem = SET_DEST (PATTERN (insn));
+ if (GET_CODE (load_mem) == ZERO_EXTEND
+ || GET_CODE (load_mem) == SIGN_EXTEND)
+ load_mem = XEXP (load_mem, 0);
+ if (GET_CODE (sto_mem) == ZERO_EXTEND
+ || GET_CODE (sto_mem) == SIGN_EXTEND)
+ load_mem = XEXP (sto_mem, 0);
+ if (GET_CODE (load_mem) == MEM && GET_CODE (sto_mem) == MEM)
+ /* Only consider those true-depenedence cases that memory conflict
+ can be determined. Exclude cases, where true-dependency was
+ decided because memory conflict could not be determined from
+ aliasing info. */
+ return must_true_dependence (load_mem, sto_mem);
+ }
+ return true;
+ }
+ /* APPLE LOCAL end nop on true-dependence. */
+
+ /* The flag is set to X; dependences with latency >= X are considered costly,
+ and will not be scheduled in the same group. */
+ if (rs6000_sched_costly_dep <= max_dep_latency
+ && ((cost - distance) >= (int)rs6000_sched_costly_dep))
+ return true;
+
+ return false;
+}
+
+/* Return the next insn after INSN that is found before TAIL is reached,
+ skipping any "non-active" insns - insns that will not actually occupy
+ an issue slot. Return NULL_RTX if such an insn is not found. */
+
+static rtx
+get_next_active_insn (rtx insn, rtx tail)
+{
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
+
+ while (1)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
+
+ if (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && INSN_CODE (insn) != CODE_FOR_stack_tie))
+ break;
+ }
+ return insn;
+}
+
+/* Return whether the presence of INSN causes a dispatch group termination
+ of group WHICH_GROUP.
+
+ If WHICH_GROUP == current_group, this function will return true if INSN
+ causes the termination of the current group (i.e, the dispatch group to
+ which INSN belongs). This means that INSN will be the last insn in the
+ group it belongs to.
+
+ If WHICH_GROUP == previous_group, this function will return true if INSN
+ causes the termination of the previous group (i.e, the dispatch group that
+ precedes the group to which INSN belongs). This means that INSN will be
+ the first insn in the group it belongs to). */
+
+static bool
+insn_terminates_group_p (rtx insn, enum group_termination which_group)
+{
+ enum attr_type type;
+
+ if (! insn)
+ return false;
+
+ type = get_attr_type (insn);
+
+ if (is_microcoded_insn (insn))
+ return true;
+
+ if (which_group == current_group)
+ {
+ if (is_branch_slot_insn (insn))
+ return true;
+ return false;
+ }
+ else if (which_group == previous_group)
+ {
+ if (is_dispatch_slot_restricted (insn))
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
+/* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
+ dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
+
+static bool
+is_costly_group (rtx *group_insns, rtx next_insn)
+{
+ int i;
+ rtx link;
+ int cost;
+ int issue_rate = rs6000_issue_rate ();
+
+ for (i = 0; i < issue_rate; i++)
+ {
+ rtx insn = group_insns[i];
+ if (!insn)
+ continue;
+ for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
+ {
+ rtx next = XEXP (link, 0);
+ if (next == next_insn)
+ {
+ cost = insn_cost (insn, link, next_insn);
+ if (rs6000_is_costly_dependence (insn, next_insn, link, cost, 0))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/* Utility of the function redefine_groups.
+ Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
+ in the same dispatch group. If so, insert nops before NEXT_INSN, in order
+ to keep it "far" (in a separate group) from GROUP_INSNS, following
+ one of the following schemes, depending on the value of the flag
+ -minsert_sched_nops = X:
+ (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
+ in order to force NEXT_INSN into a separate group.
+ (2) X < sched_finish_regroup_exact: insert exactly X nops.
+ GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
+ insertion (has a group just ended, how many vacant issue slots remain in the
+ last group, and how many dispatch groups were encountered so far). */
+
+static int
+force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
+ rtx next_insn, bool *group_end, int can_issue_more,
+ int *group_count)
+{
+ rtx nop;
+ bool force;
+ int issue_rate = rs6000_issue_rate ();
+ bool end = *group_end;
+ int i;
+
+ if (next_insn == NULL_RTX)
+ return can_issue_more;
+
+ if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
+ return can_issue_more;
+
+ force = is_costly_group (group_insns, next_insn);
+ if (!force)
+ return can_issue_more;
+
+ if (sched_verbose > 6)
+ fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
+ *group_count ,can_issue_more);
+
+ if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
+ {
+ if (*group_end)
+ can_issue_more = 0;
+
+ /* Since only a branch can be issued in the last issue_slot, it is
+ sufficient to insert 'can_issue_more - 1' nops if next_insn is not
+ a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
+ in this case the last nop will start a new group and the branch
+ will be forced to the new group. */
+ if (can_issue_more && !is_branch_slot_insn (next_insn))
+ can_issue_more--;
+
+ while (can_issue_more > 0)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
+
+ *group_end = true;
+ return 0;
+ }
+
+ if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
+ {
+ int n_nops = rs6000_sched_insert_nops;
+
+ /* Nops can't be issued from the branch slot, so the effective
+ issue_rate for nops is 'issue_rate - 1'. */
+ if (can_issue_more == 0)
+ can_issue_more = issue_rate;
+ can_issue_more--;
+ if (can_issue_more == 0)
+ {
+ can_issue_more = issue_rate - 1;
+ (*group_count)++;
+ end = true;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+
+ while (n_nops > 0)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ if (can_issue_more == issue_rate - 1) /* new group begins */
+ end = false;
+ can_issue_more--;
+ if (can_issue_more == 0)
+ {
+ can_issue_more = issue_rate - 1;
+ (*group_count)++;
+ end = true;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+ n_nops--;
+ }
+
+ /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
+ can_issue_more++;
+
+ /* Is next_insn going to start a new group? */
+ *group_end
+ = (end
+ || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
+ || (can_issue_more <= 2 && is_cracked_insn (next_insn))
+ || (can_issue_more < issue_rate &&
+ insn_terminates_group_p (next_insn, previous_group)));
+ if (*group_end && end)
+ (*group_count)--;
+
+ if (sched_verbose > 6)
+ fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
+ *group_count, can_issue_more);
+ return can_issue_more;
+ }
+
+ return can_issue_more;
+}
+
+/* This function tries to synch the dispatch groups that the compiler "sees"
+ with the dispatch groups that the processor dispatcher is expected to
+ form in practice. It tries to achieve this synchronization by forcing the
+ estimated processor grouping on the compiler (as opposed to the function
+ 'pad_goups' which tries to force the scheduler's grouping on the processor).
+
+ The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
+ examines the (estimated) dispatch groups that will be formed by the processor
+ dispatcher. It marks these group boundaries to reflect the estimated
+ processor grouping, overriding the grouping that the scheduler had marked.
+ Depending on the value of the flag '-minsert-sched-nops' this function can
+ force certain insns into separate groups or force a certain distance between
+ them by inserting nops, for example, if there exists a "costly dependence"
+ between the insns.
+
+ The function estimates the group boundaries that the processor will form as
+ follows: It keeps track of how many vacant issue slots are available after
+ each insn. A subsequent insn will start a new group if one of the following
+ 4 cases applies:
+ - no more vacant issue slots remain in the current dispatch group.
+ - only the last issue slot, which is the branch slot, is vacant, but the next
+ insn is not a branch.
+ - only the last 2 or less issue slots, including the branch slot, are vacant,
+ which means that a cracked insn (which occupies two issue slots) can't be
+ issued in this group.
+ - less than 'issue_rate' slots are vacant, and the next insn always needs to
+ start a new group. */
+
+static int
+redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
+{
+ rtx insn, next_insn;
+ int issue_rate;
+ int can_issue_more;
+ int slot, i;
+ bool group_end;
+ int group_count = 0;
+ rtx *group_insns;
+
+ /* Initialize. */
+ issue_rate = rs6000_issue_rate ();
+ group_insns = alloca (issue_rate * sizeof (rtx));
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ can_issue_more = issue_rate;
+ slot = 0;
+ insn = get_next_active_insn (prev_head_insn, tail);
+ group_end = false;
+
+ while (insn != NULL_RTX)
+ {
+ slot = (issue_rate - can_issue_more);
+ group_insns[slot] = insn;
+ can_issue_more =
+ rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
+ if (insn_terminates_group_p (insn, current_group))
+ can_issue_more = 0;
+
+ next_insn = get_next_active_insn (insn, tail);
+ if (next_insn == NULL_RTX)
+ return group_count + 1;
+
+ /* Is next_insn going to start a new group? */
+ group_end
+ = (can_issue_more == 0
+ || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
+ || (can_issue_more <= 2 && is_cracked_insn (next_insn))
+ || (can_issue_more < issue_rate &&
+ insn_terminates_group_p (next_insn, previous_group)));
+
+ can_issue_more = force_new_group (sched_verbose, dump, group_insns,
+ next_insn, &group_end, can_issue_more,
+ &group_count);
+
+ if (group_end)
+ {
+ group_count++;
+ can_issue_more = 0;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+
+ if (GET_MODE (next_insn) == TImode && can_issue_more)
+ PUT_MODE (next_insn, VOIDmode);
+ else if (!can_issue_more && GET_MODE (next_insn) != TImode)
+ PUT_MODE (next_insn, TImode);
+
+ insn = next_insn;
+ if (can_issue_more == 0)
+ can_issue_more = issue_rate;
+ } /* while */
+
+ return group_count;
+}
+
+/* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
+ dispatch group boundaries that the scheduler had marked. Pad with nops
+ any dispatch groups which have vacant issue slots, in order to force the
+ scheduler's grouping on the processor dispatcher. The function
+ returns the number of dispatch groups found. */
+
+static int
+pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
+{
+ rtx insn, next_insn;
+ rtx nop;
+ int issue_rate;
+ int can_issue_more;
+ int group_end;
+ int group_count = 0;
+
+ /* Initialize issue_rate. */
+ issue_rate = rs6000_issue_rate ();
+ can_issue_more = issue_rate;
+
+ insn = get_next_active_insn (prev_head_insn, tail);
+ next_insn = get_next_active_insn (insn, tail);
+
+ while (insn != NULL_RTX)
+ {
+ can_issue_more =
+ rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
+
+ group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
+
+ if (next_insn == NULL_RTX)
+ break;
+
+ if (group_end)
+ {
+ /* If the scheduler had marked group termination at this location
+ (between insn and next_indn), and neither insn nor next_insn will
+ force group termination, pad the group with nops to force group
+ termination. */
+ if (can_issue_more
+ && (rs6000_sched_insert_nops == sched_finish_pad_groups)
+ && !insn_terminates_group_p (insn, current_group)
+ && !insn_terminates_group_p (next_insn, previous_group))
+ {
+ if (!is_branch_slot_insn (next_insn))
+ can_issue_more--;
+
+ while (can_issue_more)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
+ }
+
+ can_issue_more = issue_rate;
+ group_count++;
+ }
+
+ insn = next_insn;
+ next_insn = get_next_active_insn (insn, tail);
+ }
+
+ return group_count;
+}
+
+/* The following function is called at the end of scheduling BB.
+ After reload, it inserts nops at insn group bundling. */
+
+static void
+rs6000_sched_finish (FILE *dump, int sched_verbose)
+{
+ int n_groups;
+
+ if (sched_verbose)
+ fprintf (dump, "=== Finishing schedule.\n");
+
+ if (reload_completed && rs6000_sched_groups)
+ {
+ if (rs6000_sched_insert_nops == sched_finish_none)
+ return;
+
+ if (rs6000_sched_insert_nops == sched_finish_pad_groups)
+ n_groups = pad_groups (dump, sched_verbose,
+ current_sched_info->prev_head,
+ current_sched_info->next_tail);
+ else
+ n_groups = redefine_groups (dump, sched_verbose,
+ current_sched_info->prev_head,
+ current_sched_info->next_tail);
+
+ if (sched_verbose >= 6)
+ {
+ fprintf (dump, "ngroups = %d\n", n_groups);
+ print_rtl (dump, current_sched_info->prev_head);
+ fprintf (dump, "Done finish_sched\n");
+ }
+ }
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+int
+rs6000_trampoline_size (void)
+{
+ int ret = 0;
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ ret = (TARGET_32BIT) ? 12 : 24;
+ break;
+
+ case ABI_DARWIN:
+ case ABI_V4:
+ ret = (TARGET_32BIT) ? 40 : 48;
+ break;
+ }
+
+ return ret;
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void
+rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
+{
+ int regsize = (TARGET_32BIT) ? 4 : 8;
+ rtx ctx_reg = force_reg (Pmode, cxt);
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+/* Macros to shorten the code expansions below. */
+#define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
+#define MEM_PLUS(addr,offset) \
+ gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
+
+ /* Under AIX, just build the 3 word function descriptor */
+ case ABI_AIX:
+ {
+ rtx fn_reg = gen_reg_rtx (Pmode);
+ rtx toc_reg = gen_reg_rtx (Pmode);
+ emit_move_insn (fn_reg, MEM_DEREF (fnaddr));
+ emit_move_insn (toc_reg, MEM_PLUS (fnaddr, regsize));
+ emit_move_insn (MEM_DEREF (addr), fn_reg);
+ emit_move_insn (MEM_PLUS (addr, regsize), toc_reg);
+ emit_move_insn (MEM_PLUS (addr, 2*regsize), ctx_reg);
+ }
+ break;
+
+ /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
+ case ABI_DARWIN:
+ case ABI_V4:
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
+ FALSE, VOIDmode, 4,
+ addr, Pmode,
+ GEN_INT (rs6000_trampoline_size ()), SImode,
+ fnaddr, Pmode,
+ ctx_reg, Pmode);
+ break;
+ }
+
+ return;
+}
+
+
+/* Table of valid machine attributes. */
+
+const struct attribute_spec rs6000_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
+ { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
+ { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
+ { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+ { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE,
+#endif
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Handle the "altivec" attribute. The attribute may have
+ arguments as follows:
+
+ __attribute__((altivec(vector__)))
+ __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
+ __attribute__((altivec(bool__))) (always followed by 'unsigned')
+
+ and may appear more than once (e.g., 'vector bool char') in a
+ given declaration. */
+
+static tree
+rs6000_handle_altivec_attribute (tree *node,
+ tree name ATTRIBUTE_UNUSED,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree type = *node, result = NULL_TREE;
+ enum machine_mode mode;
+ int unsigned_p;
+ char altivec_type
+ = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
+ && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
+ ? *IDENTIFIER_POINTER (TREE_VALUE (args))
+ : '?');
+
+ while (POINTER_TYPE_P (type)
+ || TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ mode = TYPE_MODE (type);
+
+ /* Check for invalid AltiVec type qualifiers. */
+ if (type == long_unsigned_type_node || type == long_integer_type_node)
+ {
+ if (TARGET_64BIT)
+ error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
+ else if (rs6000_warn_altivec_long)
+ warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
+ }
+ else if (type == long_long_unsigned_type_node
+ || type == long_long_integer_type_node)
+ error ("use of %<long long%> in AltiVec types is invalid");
+ else if (type == double_type_node)
+ error ("use of %<double%> in AltiVec types is invalid");
+ else if (type == long_double_type_node)
+ error ("use of %<long double%> in AltiVec types is invalid");
+ else if (type == boolean_type_node)
+ error ("use of boolean types in AltiVec types is invalid");
+ else if (TREE_CODE (type) == COMPLEX_TYPE)
+ error ("use of %<complex%> in AltiVec types is invalid");
+ else if (DECIMAL_FLOAT_MODE_P (mode))
+ error ("use of decimal floating point types in AltiVec types is invalid");
+
+ switch (altivec_type)
+ {
+ /* APPLE LOCAL begin AltiVec */
+ case 'e':
+ /* Return the constituent element type. */
+ result = (ALTIVEC_VECTOR_MODE (mode) ? TREE_TYPE (type) : type);
+ break;
+ /* APPLE LOCAL end AltiVec */
+
+ case 'v':
+ unsigned_p = TYPE_UNSIGNED (type);
+ switch (mode)
+ {
+ case SImode:
+ result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
+ break;
+ case HImode:
+ result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
+ break;
+ case QImode:
+ result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
+ break;
+ case SFmode: result = V4SF_type_node; break;
+ /* If the user says 'vector int bool', we may be handed the 'bool'
+ attribute _before_ the 'vector' attribute, and so select the
+ proper type in the 'b' case below. */
+ case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
+ result = type;
+ default: break;
+ }
+ break;
+ case 'b':
+ switch (mode)
+ {
+ case SImode: case V4SImode: result = bool_V4SI_type_node; break;
+ case HImode: case V8HImode: result = bool_V8HI_type_node; break;
+ case QImode: case V16QImode: result = bool_V16QI_type_node;
+ default: break;
+ }
+ break;
+ case 'p':
+ switch (mode)
+ {
+ case V8HImode: result = pixel_V8HI_type_node;
+ default: break;
+ }
+ default: break;
+ }
+
+ /* APPLE LOCAL begin AltiVec */
+ /* Propagate qualifiers attached to the element type
+ onto the vector type. */
+ if (result && result != type && TYPE_QUALS (type))
+ result = build_qualified_type (result, TYPE_QUALS (type));
+ /* APPLE LOCAL end AltiVec */
+
+ *no_add_attrs = true; /* No need to hang on to the attribute. */
+
+ if (result)
+ *node = reconstruct_complex_type (*node, result);
+
+ return NULL_TREE;
+}
+
+/* AltiVec defines four built-in scalar types that serve as vector
+ elements; we must teach the compiler how to mangle them. */
+
+static const char *
+/* APPLE LOCAL mangle_type 7105099 */
+rs6000_mangle_type (tree type)
+{
+ /* APPLE LOCAL begin mangle_type 7105099 */
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
+ && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
+ return NULL;
+
+ /* APPLE LOCAL end mangle_type 7105099 */
+ if (type == bool_char_type_node) return "U6__boolc";
+ if (type == bool_short_type_node) return "U6__bools";
+ if (type == pixel_type_node) return "u7__pixel";
+ if (type == bool_int_type_node) return "U6__booli";
+
+ /* Mangle IBM extended float long double as `g' (__float128) on
+ powerpc*-linux where long-double-64 previously was the default. */
+ if (TYPE_MAIN_VARIANT (type) == long_double_type_node
+ && TARGET_ELF
+ && TARGET_LONG_DOUBLE_128
+ && !TARGET_IEEEQUAD)
+ return "g";
+
+ /* For all other types, use normal C++ mangling. */
+ return NULL;
+}
+
+/* Handle a "longcall" or "shortcall" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+rs6000_handle_longcall_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE
+ && TREE_CODE (*node) != FIELD_DECL
+ && TREE_CODE (*node) != TYPE_DECL)
+ {
+ warning (OPT_Wattributes, "%qs attribute only applies to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+ /* APPLE LOCAL begin longcall */
+ else if (TARGET_64BIT && TARGET_MACHO)
+ *no_add_attrs = true;
+ /* APPLE LOCAL end longcall */
+
+ return NULL_TREE;
+}
+
+/* Set longcall attributes on all functions declared when
+ rs6000_default_long_calls is true. */
+static void
+rs6000_set_default_type_attributes (tree type)
+{
+ if (rs6000_default_long_calls
+ && (TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE))
+ TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
+ NULL_TREE,
+ TYPE_ATTRIBUTES (type));
+
+#if TARGET_MACHO
+ darwin_set_default_type_attributes (type);
+#endif
+}
+
+/* Return a reference suitable for calling a function with the
+ longcall attribute. */
+
+rtx
+rs6000_longcall_ref (rtx call_ref)
+{
+ const char *call_name;
+ tree node;
+
+ if (GET_CODE (call_ref) != SYMBOL_REF)
+ return call_ref;
+
+ /* System V adds '.' to the internal name, so skip them. */
+ call_name = XSTR (call_ref, 0);
+ if (*call_name == '.')
+ {
+ while (*call_name == '.')
+ call_name++;
+
+ node = get_identifier (call_name);
+ call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
+ }
+
+ return force_reg (Pmode, call_ref);
+}
+
+#ifndef TARGET_USE_MS_BITFIELD_LAYOUT
+#define TARGET_USE_MS_BITFIELD_LAYOUT 0
+#endif
+
+/* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+rs6000_handle_struct_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ tree *type = NULL;
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ type = &TREE_TYPE (*node);
+ }
+ else
+ type = node;
+
+ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
+ || TREE_CODE (*type) == UNION_TYPE)))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored", IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ else if ((is_attribute_p ("ms_struct", name)
+ && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
+ || ((is_attribute_p ("gcc_struct", name)
+ && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
+ {
+ warning (OPT_Wattributes, "%qs incompatible attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static bool
+rs6000_ms_bitfield_layout_p (tree record_type)
+{
+ return (TARGET_USE_MS_BITFIELD_LAYOUT &&
+ !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
+ || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
+}
+
+#ifdef USING_ELFOS_H
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
+
+static void
+rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_AIX
+ && TARGET_MINIMAL_TOC
+ && !TARGET_RELOCATABLE)
+ {
+ if (!toc_initialized)
+ {
+ toc_initialized = 1;
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
+ fprintf (asm_out_file, "\t.tc ");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, "\n");
+
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ }
+ else
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ }
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ else
+ {
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ if (!toc_initialized)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ toc_initialized = 1;
+ }
+ }
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+rs6000_elf_asm_init_sections (void)
+{
+ toc_section
+ = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
+
+ sdata2_section
+ = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
+ SDATA2_SECTION_ASM_OP);
+}
+
+/* Implement TARGET_SELECT_RTX_SECTION. */
+
+static section *
+rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
+ return toc_section;
+ else
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+/* For a SYMBOL_REF, set generic flags and then perform some
+ target-specific processing.
+
+ When the AIX ABI is requested on a non-AIX system, replace the
+ function name with the real name (with a leading .) rather than the
+ function descriptor name. This saves a lot of overriding code to
+ read the prefixes. */
+
+static void
+rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
+{
+ default_encode_section_info (decl, rtl, first);
+
+ if (first
+ && TREE_CODE (decl) == FUNCTION_DECL
+ && !TARGET_AIX
+ && DEFAULT_ABI == ABI_AIX)
+ {
+ rtx sym_ref = XEXP (rtl, 0);
+ size_t len = strlen (XSTR (sym_ref, 0));
+ char *str = alloca (len + 2);
+ str[0] = '.';
+ memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
+ XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
+ }
+}
+
+bool
+rs6000_elf_in_small_data_p (tree decl)
+{
+ if (rs6000_sdata == SDATA_NONE)
+ return false;
+
+ /* We want to merge strings, so we never consider them small data. */
+ if (TREE_CODE (decl) == STRING_CST)
+ return false;
+
+ /* Functions are never in the small data area. */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (strcmp (section, ".sdata") == 0
+ || strcmp (section, ".sdata2") == 0
+ || strcmp (section, ".sbss") == 0
+ || strcmp (section, ".sbss2") == 0
+ || strcmp (section, ".PPC.EMB.sdata0") == 0
+ || strcmp (section, ".PPC.EMB.sbss0") == 0)
+ return true;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
+
+ if (size > 0
+ && (unsigned HOST_WIDE_INT) size <= g_switch_value
+ /* If it's not public, and we're not going to reference it there,
+ there's no need to put it in the small data section. */
+ && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* USING_ELFOS_H */
+
+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
+
+static bool
+rs6000_use_blocks_for_constant_p (enum machine_mode mode, rtx x)
+{
+ return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
+}
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG.
+
+ r0 is special and we must not select it as an address
+ register by this routine since our caller will try to
+ increment the returned register via an "la" instruction. */
+
+rtx
+find_addr_reg (rtx addr)
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) != 0)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG
+ && REGNO (XEXP (addr, 1)) != 0)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ gcc_unreachable ();
+ }
+ gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
+ return addr;
+}
+
+void
+rs6000_fatal_bad_address (rtx op)
+{
+ fatal_insn ("bad address", op);
+}
+
+#if TARGET_MACHO
+
+/* APPLE LOCAL mlongcall long names 4271187 */
+static GTY (()) tree branch_island_list = 0;
+
+/* APPLE LOCAL begin 4380289 */
+/* Remember to generate a branch island for far calls to the given
+ function. Force the creation of a Mach-O stub. */
+
+static tree
+add_compiler_branch_island (tree function_name, int line_number)
+{
+ tree branch_island;
+ tree label_decl = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE);
+
+ branch_island = build_tree_list (function_name, label_decl);
+ TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
+ TREE_CHAIN (branch_island) = branch_island_list;
+ branch_island_list = branch_island;
+ return label_decl;
+}
+/* APPLE LOCAL end 4380289 */
+
+#define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
+#define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
+#define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
+ TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
+
+/* Generate far-jump branch islands for everything on the
+ branch_island_list. Invoked immediately after the last instruction
+ of the epilogue has been emitted; the branch-islands must be
+ appended to, and contiguous with, the function body. Mach-O stubs
+ are generated in machopic_output_stub(). */
+
+static void
+macho_branch_islands (void)
+{
+ /* APPLE LOCAL begin 4380289 */
+ tree branch_island;
+
+ for (branch_island = branch_island_list;
+ branch_island;
+ branch_island = TREE_CHAIN (branch_island))
+ {
+ rtx operands[2];
+ rtx decl_rtl;
+
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+ decl_rtl = DECL_RTL (BRANCH_ISLAND_FUNCTION_NAME (branch_island));
+ operands[0] = (in_section == text_section)
+ ? machopic_indirect_call_target (decl_rtl)
+ : machopic_force_indirect_call_target (decl_rtl);
+ operands[1] = label_rtx (BRANCH_ISLAND_LABEL_NAME (branch_island));
+ if (flag_pic)
+ {
+ output_asm_insn ("\n%1:\n\tmflr r0\n"
+ "\tbcl 20,31,%1_pic\n"
+ "%1_pic:\n"
+ "\tmflr r12\n"
+ "\taddis r12,r12,ha16(%0 - %1_pic)\n"
+ "\tmtlr r0\n"
+ "\taddi r12,r12,lo16(%0 - %1_pic)\n"
+ "\tmtctr r12\n"
+ "\tbctr",
+ operands);
+ }
+ else
+ {
+ output_asm_insn ("\n%1:\n"
+ "\tlis r12,hi16(%0)\n"
+ "\tori r12,r12,lo16(%0)\n"
+ "\tmtctr r12\n"
+ "\tbctr",
+ operands);
+ }
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+ }
+ /* APPLE LOCAL end 4380289 */
+
+ branch_island_list = 0;
+}
+
+/* NO_PREVIOUS_DEF checks in the link list whether the function name is
+ already there or not. */
+
+static int
+no_previous_def (tree function_name)
+{
+ tree branch_island;
+ for (branch_island = branch_island_list;
+ branch_island;
+ branch_island = TREE_CHAIN (branch_island))
+ if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
+ return 0;
+ return 1;
+}
+
+/* GET_PREV_LABEL gets the label name from the previous definition of
+ the function. */
+
+static tree
+get_prev_label (tree function_name)
+{
+ tree branch_island;
+ for (branch_island = branch_island_list;
+ branch_island;
+ branch_island = TREE_CHAIN (branch_island))
+ if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
+ return BRANCH_ISLAND_LABEL_NAME (branch_island);
+ return 0;
+}
+
+/* APPLE LOCAL begin axe stubs 5571540 */
+/* DARWIN_LINKER_GENERATES_ISLANDS and DARWIN_GENERATE_ISLANDS moved up */
+/* APPLE LOCAL end axe stubs 5571540 */
+
+/* INSN is either a function call or a millicode call. It may have an
+ unconditional jump in its delay slot.
+
+ CALL_DEST is the routine we are calling. */
+
+char *
+output_call (rtx insn, rtx *operands, int dest_operand_number,
+ int cookie_operand_number)
+{
+ static char buf[256];
+ if (DARWIN_GENERATE_ISLANDS
+ && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
+ && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
+ {
+ tree labelname;
+ /* APPLE LOCAL begin 4380289 */
+ tree funname = SYMBOL_REF_DECL (operands[dest_operand_number]);
+
+ if (!funname)
+ {
+ funname = build_decl_stat (FUNCTION_DECL,
+ get_identifier (XSTR (operands[dest_operand_number], 0)),
+ void_type_node);
+ set_decl_rtl (funname, operands[dest_operand_number]);
+ }
+
+ {
+ int line_number = 0;
+
+ /* APPLE LOCAL begin 3910248, 3915171 */
+ for (;
+ insn && (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) < 0);
+ insn = PREV_INSN (insn))
+ ;
+ /* APPLE LOCAL end 3910248, 3915171 */
+ if (insn)
+ line_number = NOTE_LINE_NUMBER (insn);
+
+ labelname = no_previous_def (funname)
+ ? add_compiler_branch_island (funname, line_number)
+ : get_prev_label (funname);
+ }
+
+ /* If we're generating a long call from the text section, we
+ can use the usual rules for Mach-O indirection. If we're
+ in a coalesced text section, we must always refer to a
+ Mach-O stub; if refer directly to our callee, and our
+ callee is also in a coalesced section, and is coalesced
+ away, the linkers (static and dynamic) won't know where
+ to send us. Ergo, when we're in a coalesced section, we
+ must always use a stub for all callees. */
+
+ operands[dest_operand_number] = (in_section == text_section)
+ ? machopic_indirect_call_target (operands[dest_operand_number])
+ : machopic_force_indirect_call_target (operands[dest_operand_number]);
+ operands[1+dest_operand_number] = label_rtx (labelname);
+
+ /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
+ instruction will reach 'foo', otherwise link as 'bl L42'".
+ "L42" should be a 'branch island', that will do a far jump to
+ 'foo'. Branch islands are generated in
+ macho_branch_islands(). */
+ sprintf (buf, "jbsr %%z%d,%%l%d",
+ dest_operand_number, 1+dest_operand_number);
+ /* APPLE LOCAL end 4380289 */
+ }
+ else
+ sprintf (buf, "bl %%z%d", dest_operand_number);
+ return buf;
+}
+
+/* Generate PIC and indirect symbol stubs. */
+
+void
+machopic_output_stub (FILE *file, const char *symb, const char *stub)
+{
+ unsigned int length;
+ char *symbol_name, *lazy_ptr_name;
+ char *local_label_0;
+ static int label = 0;
+
+ /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
+ symb = (*targetm.strip_name_encoding) (symb);
+
+
+ length = strlen (symb);
+ symbol_name = alloca (length + 32);
+ GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
+
+ lazy_ptr_name = alloca (length + 32);
+ GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
+
+ if (flag_pic == 2)
+ switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
+ else
+ switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
+
+ if (flag_pic == 2)
+ {
+ fprintf (file, "\t.align 5\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ label++;
+ local_label_0 = alloca (sizeof ("\"L00000000000$spb\""));
+ sprintf (local_label_0, "\"L%011d$spb\"", label);
+
+ fprintf (file, "\tmflr r0\n");
+ fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
+ fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
+ fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tmtlr r0\n");
+ fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
+ (TARGET_64BIT ? "ldu" : "lwzu"),
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\tbctr\n");
+ }
+ else
+ {
+ fprintf (file, "\t.align 4\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
+ fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
+ (TARGET_64BIT ? "ldu" : "lwzu"),
+ lazy_ptr_name);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\tbctr\n");
+ }
+
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
+ fprintf (file, "%s:\n", lazy_ptr_name);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+ fprintf (file, "%sdyld_stub_binding_helper\n",
+ (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
+}
+
+/* Legitimize PIC addresses. If the address is already
+ position-independent, we return ORIG. Newly generated
+ position-independent addresses go into a reg. This is REG if non
+ zero, otherwise we allocate register(s) as necessary. */
+
+#define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
+
+rtx
+rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
+ rtx reg)
+{
+ rtx base, offset;
+
+ if (reg == NULL && ! reload_in_progress && ! reload_completed)
+ reg = gen_reg_rtx (Pmode);
+
+ if (GET_CODE (orig) == CONST)
+ {
+ rtx reg_temp;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
+
+ /* Use a different reg for the intermediate value, as
+ it will be marked UNCHANGING. */
+ reg_temp = no_new_pseudos ? reg : gen_reg_rtx (Pmode);
+ base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
+ Pmode, reg_temp);
+ offset =
+ rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
+ Pmode, reg);
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if (SMALL_INT (offset))
+ return plus_constant (base, INTVAL (offset));
+ else if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ {
+ rtx mem = force_const_mem (Pmode, orig);
+ return machopic_legitimize_pic_address (mem, Pmode, reg);
+ }
+ }
+ return gen_rtx_PLUS (Pmode, base, offset);
+ }
+
+ /* Fall back on generic machopic code. */
+ return machopic_legitimize_pic_address (orig, mode, reg);
+}
+
+/* Output a .machine directive for the Darwin assembler, and call
+ the generic start_file routine. */
+
+static void
+rs6000_darwin_file_start (void)
+{
+ static const struct
+ {
+ const char *arg;
+ const char *name;
+ int if_set;
+ } mapping[] = {
+ { "ppc64", "ppc64", MASK_64BIT },
+ { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
+ { "power4", "ppc970", 0 },
+ { "G5", "ppc970", 0 },
+ { "7450", "ppc7450", 0 },
+ /* APPLE LOCAL radar 4161346 */
+ { "ppc", "ppc", MASK_PIM_ALTIVEC },
+ { "7400", "ppc7400", MASK_ALTIVEC },
+ { "G4", "ppc7400", 0 },
+ { "750", "ppc750", 0 },
+ { "740", "ppc750", 0 },
+ { "G3", "ppc750", 0 },
+ { "604e", "ppc604e", 0 },
+ { "604", "ppc604", 0 },
+ { "603e", "ppc603", 0 },
+ { "603", "ppc603", 0 },
+ { "601", "ppc601", 0 },
+ { NULL, "ppc", 0 } };
+ const char *cpu_id = "";
+ size_t i;
+
+ rs6000_file_start ();
+ darwin_file_start ();
+
+ /* Determine the argument to -mcpu=. Default to G3 if not specified. */
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ if (rs6000_select[i].set_arch_p && rs6000_select[i].string
+ && rs6000_select[i].string[0] != '\0')
+ cpu_id = rs6000_select[i].string;
+
+ /* Look through the mapping array. Pick the first name that either
+ matches the argument, has a bit set in IF_SET that is also set
+ in the target flags, or has a NULL name. */
+
+ i = 0;
+ while (mapping[i].arg != NULL
+ && strcmp (mapping[i].arg, cpu_id) != 0
+ && (mapping[i].if_set & target_flags) == 0)
+ i++;
+
+ fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
+}
+
+#endif /* TARGET_MACHO */
+
+#if TARGET_ELF
+static int
+rs6000_elf_reloc_rw_mask (void)
+{
+ if (flag_pic)
+ return 3;
+ else if (DEFAULT_ABI == ABI_AIX)
+ return 2;
+ else
+ return 0;
+}
+
+/* Record an element in the table of global constructors. SYMBOL is
+ a SYMBOL_REF of the function to be called; PRIORITY is a number
+ between 0 and MAX_INIT_PRIORITY.
+
+ This differs from default_named_section_asm_out_constructor in
+ that we have special handling for -mrelocatable. */
+
+static void
+rs6000_elf_asm_out_constructor (rtx symbol, int priority)
+{
+ const char *section = ".ctors";
+ char buf[16];
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ sprintf (buf, ".ctors.%.5u",
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
+ section = buf;
+ }
+
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
+ assemble_align (POINTER_SIZE);
+
+ if (TARGET_RELOCATABLE)
+ {
+ fputs ("\t.long (", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs (")@fixup\n", asm_out_file);
+ }
+ else
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+static void
+rs6000_elf_asm_out_destructor (rtx symbol, int priority)
+{
+ const char *section = ".dtors";
+ char buf[16];
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ sprintf (buf, ".dtors.%.5u",
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
+ section = buf;
+ }
+
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
+ assemble_align (POINTER_SIZE);
+
+ if (TARGET_RELOCATABLE)
+ {
+ fputs ("\t.long (", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs (")@fixup\n", asm_out_file);
+ }
+ else
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+void
+rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
+{
+ if (TARGET_64BIT)
+ {
+ fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
+ ASM_OUTPUT_LABEL (file, name);
+ fputs (DOUBLE_INT_ASM_OP, file);
+ rs6000_output_function_entry (file, name);
+ fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
+ if (DOT_SYMBOLS)
+ {
+ fputs ("\t.size\t", file);
+ assemble_name (file, name);
+ fputs (",24\n\t.type\t.", file);
+ assemble_name (file, name);
+ fputs (",@function\n", file);
+ if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
+ {
+ fputs ("\t.globl\t.", file);
+ assemble_name (file, name);
+ putc ('\n', file);
+ }
+ }
+ else
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
+ rs6000_output_function_entry (file, name);
+ fputs (":\n", file);
+ return;
+ }
+
+ if (TARGET_RELOCATABLE
+ && !TARGET_SECURE_PLT
+ && (get_pool_size () != 0 || current_function_profile)
+ && uses_TOC ())
+ {
+ char buf[256];
+
+ (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
+ fprintf (file, "\t.long ");
+ assemble_name (file, buf);
+ putc ('-', file);
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ assemble_name (file, buf);
+ putc ('\n', file);
+ }
+
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
+
+ if (DEFAULT_ABI == ABI_AIX)
+ {
+ const char *desc_name, *orig_name;
+
+ orig_name = (*targetm.strip_name_encoding) (name);
+ desc_name = orig_name;
+ while (*desc_name == '.')
+ desc_name++;
+
+ if (TREE_PUBLIC (decl))
+ fprintf (file, "\t.globl %s\n", desc_name);
+
+ fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ fprintf (file, "%s:\n", desc_name);
+ fprintf (file, "\t.long %s\n", orig_name);
+ fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
+ if (DEFAULT_ABI == ABI_AIX)
+ fputs ("\t.long 0\n", file);
+ fprintf (file, "\t.previous\n");
+ }
+ ASM_OUTPUT_LABEL (file, name);
+}
+
+static void
+rs6000_elf_end_indicate_exec_stack (void)
+{
+ if (TARGET_32BIT)
+ file_end_indicate_exec_stack ();
+}
+#endif
+
+#if TARGET_XCOFF
+static void
+rs6000_xcoff_asm_output_anchor (rtx symbol)
+{
+ char buffer[100];
+
+ sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
+ SYMBOL_REF_BLOCK_OFFSET (symbol));
+ ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
+}
+
+static void
+rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
+{
+ fputs (GLOBAL_ASM_OP, stream);
+ RS6000_OUTPUT_BASENAME (stream, name);
+ putc ('\n', stream);
+}
+
+/* A get_unnamed_decl callback, used for read-only sections. PTR
+ points to the section string variable. */
+
+static void
+rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RO],3\n",
+ *(const char *const *) directive);
+}
+
+/* Likewise for read-write sections. */
+
+static void
+rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RW],3\n",
+ *(const char *const *) directive);
+}
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
+
+static void
+rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (TARGET_MINIMAL_TOC)
+ {
+ /* toc_section is always selected at least once from
+ rs6000_xcoff_file_start, so this is guaranteed to
+ always be defined once and only once in each file. */
+ if (!toc_initialized)
+ {
+ fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
+ fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
+ toc_initialized = 1;
+ }
+ fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
+ (TARGET_32BIT ? "" : ",3"));
+ }
+ else
+ fputs ("\t.toc\n", asm_out_file);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+rs6000_xcoff_asm_init_sections (void)
+{
+ read_only_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_read_only_section_name);
+
+ private_data_section
+ = get_unnamed_section (SECTION_WRITE,
+ rs6000_xcoff_output_readwrite_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ read_only_private_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ toc_section
+ = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
+
+ readonly_data_section = read_only_data_section;
+ exception_section = data_section;
+}
+
+static int
+rs6000_xcoff_reloc_rw_mask (void)
+{
+ return 3;
+}
+
+static void
+rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ int smclass;
+ static const char * const suffix[3] = { "PR", "RO", "RW" };
+
+ if (flags & SECTION_CODE)
+ smclass = 0;
+ else if (flags & SECTION_WRITE)
+ smclass = 2;
+ else
+ smclass = 1;
+
+ fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
+ (flags & SECTION_CODE) ? "." : "",
+ name, suffix[smclass], flags & SECTION_ENTSIZE);
+}
+
+static section *
+rs6000_xcoff_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ if (decl_readonly_section (decl, reloc))
+ {
+ if (TREE_PUBLIC (decl))
+ return read_only_data_section;
+ else
+ return read_only_private_data_section;
+ }
+ else
+ {
+ if (TREE_PUBLIC (decl))
+ return data_section;
+ else
+ return private_data_section;
+ }
+}
+
+static void
+rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
+{
+ const char *name;
+
+ /* Use select_section for private and uninitialized data. */
+ if (!TREE_PUBLIC (decl)
+ || DECL_COMMON (decl)
+ || DECL_INITIAL (decl) == NULL_TREE
+ || DECL_INITIAL (decl) == error_mark_node
+ || (flag_zero_initialized_in_bss
+ && initializer_zerop (DECL_INITIAL (decl))))
+ return;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ name = (*targetm.strip_name_encoding) (name);
+ DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
+}
+
+/* Select section for constant in constant pool.
+
+ On RS/6000, all constants are in the private read-only data area.
+ However, if this is being placed in the TOC it must be output as a
+ toc entry. */
+
+static section *
+rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
+ return toc_section;
+ else
+ return read_only_private_data_section;
+}
+
+/* Remove any trailing [DS] or the like from the symbol name. */
+
+static const char *
+rs6000_xcoff_strip_name_encoding (const char *name)
+{
+ size_t len;
+ if (*name == '*')
+ name++;
+ len = strlen (name);
+ if (name[len - 1] == ']')
+ return ggc_alloc_string (name, len - 4);
+ else
+ return name;
+}
+
+/* Section attributes. AIX is always PIC. */
+
+static unsigned int
+rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int align;
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ /* Align to at least UNIT size. */
+ if (flags & SECTION_CODE)
+ align = MIN_UNITS_PER_WORD;
+ else
+ /* Increase alignment of large objects if not already stricter. */
+ align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
+ int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
+ ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
+
+ return flags | (exact_log2 (align) & SECTION_ENTSIZE);
+}
+
+/* Output at beginning of assembler file.
+
+ Initialize the section names for the RS/6000 at this point.
+
+ Specify filename, including full path, to assembler.
+
+ We want to go into the TOC section so at least one .toc will be emitted.
+ Also, in order to output proper .bs/.es pairs, we need at least one static
+ [RW] section emitted.
+
+ Finally, declare mcount when profiling to make the assembler happy. */
+
+static void
+rs6000_xcoff_file_start (void)
+{
+ rs6000_gen_section_name (&xcoff_bss_section_name,
+ main_input_filename, ".bss_");
+ rs6000_gen_section_name (&xcoff_private_data_section_name,
+ main_input_filename, ".rw_");
+ rs6000_gen_section_name (&xcoff_read_only_section_name,
+ main_input_filename, ".ro_");
+
+ fputs ("\t.file\t", asm_out_file);
+ output_quoted_string (asm_out_file, main_input_filename);
+ fputc ('\n', asm_out_file);
+ if (write_symbols != NO_DEBUG)
+ switch_to_section (private_data_section);
+ switch_to_section (text_section);
+ if (profile_flag)
+ fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
+ rs6000_file_start ();
+}
+
+/* Output at end of assembler file.
+ On the RS/6000, referencing data should automatically pull in text. */
+
+static void
+rs6000_xcoff_file_end (void)
+{
+ switch_to_section (text_section);
+ fputs ("_section_.text:\n", asm_out_file);
+ switch_to_section (data_section);
+ fputs (TARGET_32BIT
+ ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
+ asm_out_file);
+}
+#endif /* TARGET_XCOFF */
+
+/* APPLE LOCAL begin pragma reverse_bitfields */
+#if TARGET_MACHO
+/* Pragma reverse_bitfields. For compatibility with CW.
+ This feature is not well defined by CW, and results in
+ code that does not work in some cases! Bug compatibility
+ is the requirement, however. */
+
+static bool
+rs6000_reverse_bitfields_p (tree record_type ATTRIBUTE_UNUSED)
+{
+ return darwin_reverse_bitfields;
+}
+#endif
+/* APPLE LOCAL end prgama reverse_bitfields */
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+rs6000_rtx_costs (rtx x, int code, int outer_code, int *total)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ switch (code)
+ {
+ /* On the RS/6000, if it is valid in the insn, it is free. */
+ case CONST_INT:
+ if (((outer_code == SET
+ || outer_code == PLUS
+ || outer_code == MINUS)
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_L (x)))
+ || (outer_code == AND
+ && (satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))
+ || mask_operand (x, mode)
+ || (mode == DImode
+ && mask64_operand (x, DImode))))
+ || ((outer_code == IOR || outer_code == XOR)
+ && (satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))))
+ || outer_code == ASHIFT
+ || outer_code == ASHIFTRT
+ || outer_code == LSHIFTRT
+ || outer_code == ROTATE
+ || outer_code == ROTATERT
+ || outer_code == ZERO_EXTRACT
+ || (outer_code == MULT
+ && satisfies_constraint_I (x))
+ || ((outer_code == DIV || outer_code == UDIV
+ || outer_code == MOD || outer_code == UMOD)
+ && exact_log2 (INTVAL (x)) >= 0)
+ || (outer_code == COMPARE
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_K (x)))
+ || (outer_code == EQ
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))))
+ || (outer_code == GTU
+ && satisfies_constraint_I (x))
+ || (outer_code == LTU
+ && satisfies_constraint_P (x)))
+ {
+ *total = 0;
+ return true;
+ }
+ else if ((outer_code == PLUS
+ && reg_or_add_cint_operand (x, VOIDmode))
+ || (outer_code == MINUS
+ && reg_or_sub_cint_operand (x, VOIDmode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && (INTVAL (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST_DOUBLE:
+ if (mode == DImode && code == CONST_DOUBLE)
+ {
+ if ((outer_code == IOR || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0
+ && (CONST_DOUBLE_LOW (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
+ {
+ *total = 0;
+ return true;
+ }
+ else if ((outer_code == AND && and64_2_operand (x, DImode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ }
+ /* FALLTHRU */
+
+ case CONST:
+ case HIGH:
+ case SYMBOL_REF:
+ case MEM:
+ /* When optimizing for size, MEM should be slightly more expensive
+ than generating address, e.g., (plus (reg) (const)).
+ L1 cache latency is about two instructions. */
+ *total = optimize_size ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
+ return true;
+
+ case LABEL_REF:
+ *total = 0;
+ return true;
+
+ case PLUS:
+ if (mode == DFmode)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = rs6000_cost->dmul - rs6000_cost->fp;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
+ else if (mode == SFmode)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case MINUS:
+ if (mode == DFmode)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = 0;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
+ else if (mode == SFmode)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case MULT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && satisfies_constraint_I (XEXP (x, 1)))
+ {
+ if (INTVAL (XEXP (x, 1)) >= -256
+ && INTVAL (XEXP (x, 1)) <= 255)
+ *total = rs6000_cost->mulsi_const9;
+ else
+ *total = rs6000_cost->mulsi_const;
+ }
+ /* FMA accounted in outer PLUS/MINUS. */
+ else if ((mode == DFmode || mode == SFmode)
+ && (outer_code == PLUS || outer_code == MINUS))
+ *total = 0;
+ else if (mode == DFmode)
+ *total = rs6000_cost->dmul;
+ else if (mode == SFmode)
+ *total = rs6000_cost->fp;
+ else if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
+ return false;
+
+ case DIV:
+ case MOD:
+ if (FLOAT_MODE_P (mode))
+ {
+ *total = mode == DFmode ? rs6000_cost->ddiv
+ : rs6000_cost->sdiv;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case UDIV:
+ case UMOD:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ {
+ if (code == DIV || code == MOD)
+ /* Shift, addze */
+ *total = COSTS_N_INSNS (2);
+ else
+ /* Shift */
+ *total = COSTS_N_INSNS (1);
+ }
+ else
+ {
+ if (GET_MODE (XEXP (x, 1)) == DImode)
+ *total = rs6000_cost->divdi;
+ else
+ *total = rs6000_cost->divsi;
+ }
+ /* Add in shift and subtract for MOD. */
+ if (code == MOD || code == UMOD)
+ *total += COSTS_N_INSNS (2);
+ return false;
+
+ case FFS:
+ *total = COSTS_N_INSNS (4);
+ return false;
+
+ case NOT:
+ if (outer_code == AND || outer_code == IOR || outer_code == XOR)
+ {
+ *total = 0;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case AND:
+ case IOR:
+ case XOR:
+ case ZERO_EXTRACT:
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ /* Handle mul_highpart. */
+ if (outer_code == TRUNCATE
+ && GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
+ return true;
+ }
+ else if (outer_code == AND)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case COMPARE:
+ case NEG:
+ case ABS:
+ if (!FLOAT_MODE_P (mode))
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+ /* FALLTHRU */
+
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case UNSIGNED_FIX:
+ case FLOAT_TRUNCATE:
+ *total = rs6000_cost->fp;
+ return false;
+
+ case FLOAT_EXTEND:
+ if (mode == DFmode)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ return false;
+
+ case UNSPEC:
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_FRSP:
+ *total = rs6000_cost->fp;
+ return true;
+
+ default:
+ break;
+ }
+ break;
+
+ case CALL:
+ case IF_THEN_ELSE:
+ if (optimize_size)
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ else if (FLOAT_MODE_P (mode)
+ && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
+ {
+ *total = rs6000_cost->fp;
+ return false;
+ }
+ break;
+
+ case EQ:
+ case GTU:
+ case LTU:
+ /* Carry bit requires mode == Pmode.
+ NEG or PLUS already counted so only add one. */
+ if (mode == Pmode
+ && (outer_code == NEG || outer_code == PLUS))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ if (outer_code == SET)
+ {
+ if (XEXP (x, 1) == const0_rtx)
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ else if (mode == Pmode)
+ {
+ *total = COSTS_N_INSNS (3);
+ return false;
+ }
+ }
+ /* FALLTHRU */
+
+ case GT:
+ case LT:
+ case UNORDERED:
+ if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ /* CC COMPARE. */
+ if (outer_code == COMPARE)
+ {
+ *total = 0;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* A C expression returning the cost of moving data from a register of class
+ CLASS1 to one of CLASS2. */
+
+int
+rs6000_register_move_cost (enum machine_mode mode,
+ enum reg_class from, enum reg_class to)
+{
+ /* Moves from/to GENERAL_REGS. */
+ if (reg_classes_intersect_p (to, GENERAL_REGS)
+ || reg_classes_intersect_p (from, GENERAL_REGS))
+ {
+ if (! reg_classes_intersect_p (to, GENERAL_REGS))
+ from = to;
+
+ if (from == FLOAT_REGS || from == ALTIVEC_REGS)
+ return (rs6000_memory_move_cost (mode, from, 0)
+ + rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
+
+ /* It's more expensive to move CR_REGS than CR0_REGS because of the
+ shift. */
+ else if (from == CR_REGS)
+ return 4;
+
+ else
+ /* A move will cost one instruction per GPR moved. */
+ return 2 * hard_regno_nregs[0][mode];
+ }
+
+ /* Moving between two similar registers is just one instruction. */
+ else if (reg_classes_intersect_p (to, from))
+ return mode == TFmode ? 4 : 2;
+
+ /* Everything else has to go through GENERAL_REGS. */
+ else
+ return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ + rs6000_register_move_cost (mode, from, GENERAL_REGS));
+}
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory. */
+
+int
+rs6000_memory_move_cost (enum machine_mode mode, enum reg_class class,
+ int in ATTRIBUTE_UNUSED)
+{
+ if (reg_classes_intersect_p (class, GENERAL_REGS))
+ return 4 * hard_regno_nregs[0][mode];
+ else if (reg_classes_intersect_p (class, FLOAT_REGS))
+ return 4 * hard_regno_nregs[32][mode];
+ else if (reg_classes_intersect_p (class, ALTIVEC_REGS))
+ return 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
+ else
+ return 4 + rs6000_register_move_cost (mode, class, GENERAL_REGS);
+}
+
+/* Newton-Raphson approximation of single-precision floating point divide n/d.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swdivsf (rtx res, rtx n, rtx d)
+{
+ rtx x0, e0, e1, y1, u0, v0, one;
+
+ x0 = gen_reg_rtx (SFmode);
+ e0 = gen_reg_rtx (SFmode);
+ e1 = gen_reg_rtx (SFmode);
+ y1 = gen_reg_rtx (SFmode);
+ u0 = gen_reg_rtx (SFmode);
+ v0 = gen_reg_rtx (SFmode);
+ one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
+
+ /* x0 = 1./d estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (SFmode, gen_rtvec (1, d),
+ UNSPEC_FRES)));
+ /* e0 = 1. - d * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e0,
+ gen_rtx_MINUS (SFmode, one,
+ gen_rtx_MULT (SFmode, d, x0))));
+ /* e1 = e0 + e0 * e0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e1,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, e0, e0), e0)));
+ /* y1 = x0 + e1 * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, e1, x0), x0)));
+ /* u0 = n * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (SFmode, n, y1)));
+ /* v0 = n - d * u0 */
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (SFmode, n,
+ gen_rtx_MULT (SFmode, d, u0))));
+ /* res = u0 + v0 * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, res,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, v0, y1), u0)));
+}
+
+/* Newton-Raphson approximation of double-precision floating point divide n/d.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swdivdf (rtx res, rtx n, rtx d)
+{
+ rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
+
+ x0 = gen_reg_rtx (DFmode);
+ e0 = gen_reg_rtx (DFmode);
+ e1 = gen_reg_rtx (DFmode);
+ e2 = gen_reg_rtx (DFmode);
+ y1 = gen_reg_rtx (DFmode);
+ y2 = gen_reg_rtx (DFmode);
+ y3 = gen_reg_rtx (DFmode);
+ u0 = gen_reg_rtx (DFmode);
+ v0 = gen_reg_rtx (DFmode);
+ one = force_reg (DFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, DFmode));
+
+ /* x0 = 1./d estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (DFmode, gen_rtvec (1, d),
+ UNSPEC_FRES)));
+ /* e0 = 1. - d * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e0,
+ gen_rtx_MINUS (DFmode, one,
+ gen_rtx_MULT (SFmode, d, x0))));
+ /* y1 = x0 + e0 * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e0, x0), x0)));
+ /* e1 = e0 * e0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e1,
+ gen_rtx_MULT (DFmode, e0, e0)));
+ /* y2 = y1 + e1 * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, y2,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e1, y1), y1)));
+ /* e2 = e1 * e1 */
+ emit_insn (gen_rtx_SET (VOIDmode, e2,
+ gen_rtx_MULT (DFmode, e1, e1)));
+ /* y3 = y2 + e2 * y2 */
+ emit_insn (gen_rtx_SET (VOIDmode, y3,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e2, y2), y2)));
+ /* u0 = n * y3 */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (DFmode, n, y3)));
+ /* v0 = n - d * u0 */
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (DFmode, n,
+ gen_rtx_MULT (DFmode, d, u0))));
+ /* res = u0 + v0 * y3 */
+ emit_insn (gen_rtx_SET (VOIDmode, res,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, v0, y3), u0)));
+}
+
+/* Return an RTX representing where to find the function value of a
+ function returning MODE. */
+static rtx
+rs6000_complex_function_value (enum machine_mode mode)
+{
+ unsigned int regno;
+ rtx r1, r2;
+ enum machine_mode inner = GET_MODE_INNER (mode);
+ unsigned int inner_bytes = GET_MODE_SIZE (inner);
+
+ if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ regno = FP_ARG_RETURN;
+ else
+ {
+ regno = GP_ARG_RETURN;
+
+ /* 32-bit is OK since it'll go in r3/r4. */
+ if (TARGET_32BIT && inner_bytes >= 4)
+ return gen_rtx_REG (mode, regno);
+ }
+
+ if (inner_bytes >= 8)
+ return gen_rtx_REG (mode, regno);
+
+ r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
+ const0_rtx);
+ r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
+ GEN_INT (inner_bytes));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
+}
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0.
+
+ On the SPE, both FPs and vectors are returned in r3.
+
+ On RS/6000 an integer value is in r3 and a floating-point value is in
+ fp1, unless -msoft-float. */
+
+rtx
+rs6000_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+ unsigned int regno;
+
+ /* Special handling for structs in darwin64. */
+ if (rs6000_darwin64_abi
+ && TYPE_MODE (valtype) == BLKmode
+ && TREE_CODE (valtype) == RECORD_TYPE
+ && int_size_in_bytes (valtype) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed as
+ an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
+ if (valret)
+ return valret;
+ /* Otherwise fall through to standard ABI rules. */
+ }
+
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
+ {
+ /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
+ return gen_rtx_PARALLEL (DImode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4))));
+ }
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
+ {
+ return gen_rtx_PARALLEL (DCmode,
+ gen_rtvec (4,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 2),
+ GEN_INT (8)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 3),
+ GEN_INT (12))));
+ }
+
+ mode = TYPE_MODE (valtype);
+ if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
+ || POINTER_TYPE_P (valtype))
+ mode = TARGET_32BIT ? SImode : DImode;
+
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ regno = GP_ARG_RETURN;
+ else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ regno = FP_ARG_RETURN;
+ else if (TREE_CODE (valtype) == COMPLEX_TYPE
+ && targetm.calls.split_complex_arg)
+ return rs6000_complex_function_value (mode);
+ else if (TREE_CODE (valtype) == VECTOR_TYPE
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (mode))
+ regno = ALTIVEC_ARG_RETURN;
+ else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
+ && (mode == DFmode || mode == DCmode))
+ return spe_build_register_parallel (mode, GP_ARG_RETURN);
+ else
+ regno = GP_ARG_RETURN;
+
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+rtx
+rs6000_libcall_value (enum machine_mode mode)
+{
+ unsigned int regno;
+
+ if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
+ {
+ /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
+ return gen_rtx_PARALLEL (DImode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4))));
+ }
+
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ regno = GP_ARG_RETURN;
+ else if (SCALAR_FLOAT_MODE_P (mode)
+ && TARGET_HARD_FLOAT && TARGET_FPRS)
+ regno = FP_ARG_RETURN;
+ else if (ALTIVEC_VECTOR_MODE (mode)
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
+ regno = ALTIVEC_ARG_RETURN;
+ else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
+ return rs6000_complex_function_value (mode);
+ else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
+ && (mode == DFmode || mode == DCmode))
+ return spe_build_register_parallel (mode, GP_ARG_RETURN);
+ else
+ regno = GP_ARG_RETURN;
+
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Define the offset between two registers, FROM to be eliminated and its
+ replacement TO, at the start of a routine. */
+HOST_WIDE_INT
+rs6000_initial_elimination_offset (int from, int to)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ HOST_WIDE_INT offset;
+
+ if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ offset = info->push_p ? 0 : -info->total_size;
+ else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ {
+ offset = info->push_p ? 0 : -info->total_size;
+ if (FRAME_GROWS_DOWNWARD)
+ offset += info->fixed_size + info->vars_size + info->parm_size;
+ }
+ else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ offset = FRAME_GROWS_DOWNWARD
+ ? info->fixed_size + info->vars_size + info->parm_size
+ : 0;
+ else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ offset = info->total_size;
+ else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ offset = info->push_p ? info->total_size : 0;
+ else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
+ offset = 0;
+ else
+ gcc_unreachable ();
+
+ return offset;
+}
+
+/* Return true if TYPE is a SPE or AltiVec opaque type. */
+
+static bool
+rs6000_is_opaque_type (tree type)
+{
+ return (type == opaque_V2SI_type_node
+ || type == opaque_V2SF_type_node
+ || type == opaque_p_V2SI_type_node
+ || type == opaque_V4SI_type_node);
+}
+
+static rtx
+rs6000_dwarf_register_span (rtx reg)
+{
+ unsigned regno;
+
+ if (TARGET_SPE
+ && (SPE_VECTOR_MODE (GET_MODE (reg))
+ || (TARGET_E500_DOUBLE && GET_MODE (reg) == DFmode)))
+ ;
+ else
+ return NULL_RTX;
+
+ regno = REGNO (reg);
+
+ /* The duality of the SPE register size wreaks all kinds of havoc.
+ This is a way of distinguishing r0 in 32-bits from r0 in
+ 64-bits. */
+ return
+ gen_rtx_PARALLEL (VOIDmode,
+ BYTES_BIG_ENDIAN
+ ? gen_rtvec (2,
+ gen_rtx_REG (SImode, regno + 1200),
+ gen_rtx_REG (SImode, regno))
+ : gen_rtvec (2,
+ gen_rtx_REG (SImode, regno),
+ gen_rtx_REG (SImode, regno + 1200)));
+}
+
+/* Map internal gcc register numbers to DWARF2 register numbers. */
+
+unsigned int
+rs6000_dbx_register_number (unsigned int regno)
+{
+ if (regno <= 63 || write_symbols != DWARF2_DEBUG)
+ return regno;
+ if (regno == MQ_REGNO)
+ return 100;
+ if (regno == LINK_REGISTER_REGNUM)
+ return 108;
+ if (regno == COUNT_REGISTER_REGNUM)
+ return 109;
+ if (CR_REGNO_P (regno))
+ return regno - CR0_REGNO + 86;
+ if (regno == XER_REGNO)
+ return 101;
+ if (ALTIVEC_REGNO_P (regno))
+ return regno - FIRST_ALTIVEC_REGNO + 1124;
+ if (regno == VRSAVE_REGNO)
+ return 356;
+ if (regno == VSCR_REGNO)
+ return 67;
+ if (regno == SPE_ACC_REGNO)
+ return 99;
+ if (regno == SPEFSCR_REGNO)
+ return 612;
+ /* SPE high reg number. We get these values of regno from
+ rs6000_dwarf_register_span. */
+ gcc_assert (regno >= 1200 && regno < 1232);
+ return regno;
+}
+
+/* APPLE LOCAL begin CW asm blocks */
+/* Translate some register names seen in CW asm into GCC standard
+ forms. */
+
+const char *
+rs6000_iasm_register_name (const char *regname, char *buf)
+{
+ /* SP is a valid reg name, but asm doesn't like it yet, so translate. */
+ if (strcmp (regname, "sp") == 0)
+ return "r1";
+ if (decode_reg_name (regname) >= 0)
+ return regname;
+ /* Change "gpr0" to "r0". */
+ if (regname[0] == 'g'
+ && regname[1] == 'p'
+ && decode_reg_name (regname + 2) >= 0)
+ return regname + 2;
+ /* Change "fp0" to "f0". */
+ if (regname[0] == 'f' && regname[1] == 'p')
+ {
+ buf[0] = 'f';
+ strcpy (buf + 1, regname + 2);
+ if (decode_reg_name (buf) >= 0)
+ return buf;
+ }
+ if (regname[0] == 's'
+ && regname[1] == 'p'
+ && regname[2] == 'r'
+ )
+ /* Temp hack, return it as a number. */
+ return regname + 3;
+ if (strcmp (regname, "RTOC") == 0)
+ return "r2";
+ return NULL;
+}
+
+extern bool iasm_memory_clobber (const char *);
+/* Return true iff the opcode wants memory to be stable. We arrange
+ for a memory clobber in these instances. */
+bool
+iasm_memory_clobber (const char *ARG_UNUSED (opcode))
+{
+ return strncmp (opcode, "st", 2) == 0
+ || (strncmp (opcode, "l", 1) == 0 && (strcmp (opcode, "la") != 0
+ && strcmp (opcode, "li") != 0
+ && strcmp (opcode, "lis") != 0))
+ || strcmp (opcode, "sc") == 0
+ || strncmp (opcode, "td", 2) == 0
+ || strcmp (opcode, "trap") == 0
+ || strncmp (opcode, "tw", 2) == 0;
+}
+/* APPLE LOCAL end CW asm blocks */
+
+/* target hook eh_return_filter_mode */
+static enum machine_mode
+rs6000_eh_return_filter_mode (void)
+{
+ return TARGET_32BIT ? SImode : word_mode;
+}
+
+/* Target hook for scalar_mode_supported_p. */
+static bool
+rs6000_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Target hook for vector_mode_supported_p. */
+static bool
+rs6000_vector_mode_supported_p (enum machine_mode mode)
+{
+
+ if (TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return true;
+
+ else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
+ return true;
+
+ else
+ return false;
+}
+
+/* Target hook for invalid_arg_for_unprototyped_fn. */
+static const char *
+invalid_arg_for_unprototyped_fn (tree typelist, tree funcdecl, tree val)
+{
+ return (!rs6000_darwin64_abi
+ && typelist == 0
+ && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
+ && (funcdecl == NULL_TREE
+ || (TREE_CODE (funcdecl) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
+ ? N_("AltiVec argument passed to unprototyped function")
+ : NULL;
+}
+
+/* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
+ setup by using __stack_chk_fail_local hidden function instead of
+ calling __stack_chk_fail directly. Otherwise it is better to call
+ __stack_chk_fail directly. */
+
+static tree
+rs6000_stack_protect_fail (void)
+{
+ return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
+ ? default_hidden_stack_protect_fail ()
+ : default_external_stack_protect_fail ();
+}
+
+/* APPLE LOCAL begin 3399553 */
+/* Calculate the value of FLT_ROUNDS into DEST.
+
+ The rounding mode is in bits 30:31 of FPSCR, and has the following
+ settings:
+ 00 Round to nearest
+ 01 Round to 0
+ 10 Round to +inf
+ 11 Round to -inf
+
+ FLT_ROUNDS, on the other hand, expects the following:
+ -1 Undefined
+ 0 Round to 0
+ 1 Round to nearest
+ 2 Round to +inf
+ 3 Round to -inf
+
+ To perform the conversion, we do:
+ ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
+*/
+extern void
+rs6000_expand_flt_rounds (rtx dest)
+{
+ if (TARGET_HARD_FLOAT && TARGET_FPRS)
+ {
+ rtx mem = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ rtx temp_fp = gen_reg_rtx (DFmode);
+ rtx temp_int = gen_reg_rtx (SImode);
+
+ /* Step #1: Read FPSCR. Unfortunately, this can only be done into
+ bits 32:63 of a FP reg. */
+ emit_insn (gen_mffs (temp_fp));
+
+ /* Step #2: Copy onto a stack temp. */
+ emit_move_insn (mem, temp_fp);
+
+ /* Step #3: Copy into an integer register. */
+ emit_move_insn (dest, adjust_address (mem, SImode,
+ WORDS_BIG_ENDIAN ? 4 : 0));
+
+ /* Step #4: Perform conversion described above. */
+ emit_insn (gen_one_cmplsi2 (temp_int, dest));
+ emit_insn (gen_andsi3 (dest, dest, GEN_INT (0x3)));
+ emit_insn (gen_andsi3 (temp_int, temp_int, GEN_INT (0x3)));
+ emit_insn (gen_lshrsi3 (temp_int, temp_int, const1_rtx));
+ emit_insn (gen_xorsi3 (dest, dest, temp_int));
+ }
+ else
+ {
+ /* Default: return 1 (round to nearest). */
+ emit_move_insn (dest, const1_rtx);
+ }
+}
+/* APPLE LOCAL end 3399553 */
+
+#include "gt-rs6000.h"
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.h
new file mode 100644
index 000000000..9d88f2f3f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.h
@@ -0,0 +1,3520 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* Note that some other tm.h files include this one and then override
+ many of the definitions. */
+
+/* Definitions for the object file format. These are set at
+ compile-time. */
+
+#define OBJECT_XCOFF 1
+#define OBJECT_ELF 2
+#define OBJECT_PEF 3
+#define OBJECT_MACHO 4
+
+#define TARGET_ELF (TARGET_OBJECT_FORMAT == OBJECT_ELF)
+#define TARGET_XCOFF (TARGET_OBJECT_FORMAT == OBJECT_XCOFF)
+#define TARGET_MACOS (TARGET_OBJECT_FORMAT == OBJECT_PEF)
+#define TARGET_MACHO (TARGET_OBJECT_FORMAT == OBJECT_MACHO)
+
+#ifndef TARGET_AIX
+#define TARGET_AIX 0
+#endif
+
+/* Control whether function entry points use a "dot" symbol when
+ ABI_AIX. */
+#define DOT_SYMBOLS 1
+
+/* Default string to use for cpu if not specified. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT ((char *)0)
+#endif
+
+/* If configured for PPC405, support PPC405CR Erratum77. */
+#ifdef CONFIG_PPC405CR
+#define PPC405_ERRATUM77 (rs6000_cpu == PROCESSOR_PPC405)
+#else
+#define PPC405_ERRATUM77 0
+#endif
+
+/* Common ASM definitions used by ASM_SPEC among the various targets
+ for handling -mcpu=xxx switches. */
+#define ASM_CPU_SPEC \
+"%{!mcpu*: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwrx} \
+ %{mpowerpc64*: -mppc64} \
+ %{!mpowerpc64*: %{mpowerpc*: -mppc}} \
+ %{mno-power: %{!mpowerpc*: -mcom}} \
+ %{!mno-power: %{!mpower*: %(asm_default)}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwrx} \
+%{mcpu=power3: -mppc64} \
+%{mcpu=power4: -mpower4} \
+%{mcpu=power5: -mpower4} \
+%{mcpu=power5+: -mpower4} \
+%{mcpu=power6: -mpower4 -maltivec} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwrx} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=rs64a: -mppc64} \
+%{mcpu=401: -mppc} \
+%{mcpu=403: -m403} \
+%{mcpu=405: -m405} \
+%{mcpu=405fp: -m405} \
+%{mcpu=440: -m440} \
+%{mcpu=440fp: -m440} \
+%{mcpu=505: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -mppc} \
+%{mcpu=603e: -mppc} \
+%{mcpu=ec603e: -mppc} \
+%{mcpu=604: -mppc} \
+%{mcpu=604e: -mppc} \
+%{mcpu=620: -mppc64} \
+%{mcpu=630: -mppc64} \
+%{mcpu=740: -mppc} \
+%{mcpu=750: -mppc} \
+%{mcpu=G3: -mppc} \
+%{mcpu=7400: -mppc -maltivec} \
+%{mcpu=7450: -mppc -maltivec} \
+%{mcpu=G4: -mppc -maltivec} \
+%{mcpu=801: -mppc} \
+%{mcpu=821: -mppc} \
+%{mcpu=823: -mppc} \
+%{mcpu=860: -mppc} \
+%{mcpu=970: -mpower4 -maltivec} \
+%{mcpu=G5: -mpower4 -maltivec} \
+%{mcpu=8540: -me500} \
+%{maltivec: -maltivec} \
+-many"
+
+#define CPP_DEFAULT_SPEC ""
+
+#define ASM_DEFAULT_SPEC ""
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define SUBTARGET_EXTRA_SPECS
+
+#define EXTRA_SPECS \
+ { "cpp_default", CPP_DEFAULT_SPEC }, \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "asm_default", ASM_DEFAULT_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+/* Architecture type. */
+
+/* Define TARGET_MFCRF if the target assembler does not support the
+ optional field operand for mfcr. */
+
+#ifndef HAVE_AS_MFCRF
+#undef TARGET_MFCRF
+#define TARGET_MFCRF 0
+#endif
+
+/* Define TARGET_POPCNTB if the target assembler does not support the
+ popcount byte instruction. */
+
+#ifndef HAVE_AS_POPCNTB
+#undef TARGET_POPCNTB
+#define TARGET_POPCNTB 0
+#endif
+
+/* Define TARGET_FPRND if the target assembler does not support the
+ fp rounding instructions. */
+
+#ifndef HAVE_AS_FPRND
+#undef TARGET_FPRND
+#define TARGET_FPRND 0
+#endif
+
+#ifndef TARGET_SECURE_PLT
+#define TARGET_SECURE_PLT 0
+#endif
+
+#define TARGET_32BIT (! TARGET_64BIT)
+
+#ifndef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+#endif
+
+/* Return 1 for a symbol ref for a thread-local storage symbol. */
+#define RS6000_SYMBOL_REF_TLS_P(RTX) \
+ (GET_CODE (RTX) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (RTX) != 0)
+
+#ifdef IN_LIBGCC2
+/* For libgcc2 we make sure this is a compile time constant */
+#if defined (__64BIT__) || defined (__powerpc64__) || defined (__ppc64__)
+#undef TARGET_POWERPC64
+#define TARGET_POWERPC64 1
+#else
+#undef TARGET_POWERPC64
+#define TARGET_POWERPC64 0
+#endif
+#else
+ /* The option machinery will define this. */
+#endif
+
+#define TARGET_DEFAULT (MASK_POWER | MASK_MULTIPLE | MASK_STRING)
+
+/* Processor type. Order must match cpu attribute in MD file. */
+enum processor_type
+ {
+ PROCESSOR_RIOS1,
+ PROCESSOR_RIOS2,
+ PROCESSOR_RS64A,
+ PROCESSOR_MPCCORE,
+ PROCESSOR_PPC403,
+ PROCESSOR_PPC405,
+ PROCESSOR_PPC440,
+ PROCESSOR_PPC601,
+ PROCESSOR_PPC603,
+ PROCESSOR_PPC604,
+ PROCESSOR_PPC604e,
+ PROCESSOR_PPC620,
+ PROCESSOR_PPC630,
+ PROCESSOR_PPC750,
+ PROCESSOR_PPC7400,
+ PROCESSOR_PPC7450,
+ PROCESSOR_PPC8540,
+ PROCESSOR_POWER4,
+ PROCESSOR_POWER5
+};
+
+extern enum processor_type rs6000_cpu;
+
+/* Recast the processor type to the cpu attribute. */
+#define rs6000_cpu_attr ((enum attr_cpu)rs6000_cpu)
+
+/* Define generic processor types based upon current deployment. */
+#define PROCESSOR_COMMON PROCESSOR_PPC601
+#define PROCESSOR_POWER PROCESSOR_RIOS1
+#define PROCESSOR_POWERPC PROCESSOR_PPC604
+#define PROCESSOR_POWERPC64 PROCESSOR_RS64A
+
+/* Define the default processor. This is overridden by other tm.h files. */
+#define PROCESSOR_DEFAULT PROCESSOR_RIOS1
+#define PROCESSOR_DEFAULT64 PROCESSOR_RS64A
+
+/* Specify the dialect of assembler to use. New mnemonics is dialect one
+ and the old mnemonics are dialect zero. */
+#define ASSEMBLER_DIALECT (TARGET_NEW_MNEMONICS ? 1 : 0)
+
+/* Types of costly dependences. */
+enum rs6000_dependence_cost
+ {
+ max_dep_latency = 1000,
+ no_dep_costly,
+ all_deps_costly,
+ true_store_to_load_dep_costly,
+ store_to_load_dep_costly
+ };
+
+/* Types of nop insertion schemes in sched target hook sched_finish. */
+enum rs6000_nop_insertion
+ {
+ sched_finish_regroup_exact = 1000,
+ sched_finish_pad_groups,
+ sched_finish_none
+ };
+
+/* Dispatch group termination caused by an insn. */
+enum group_termination
+ {
+ current_group,
+ previous_group
+ };
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-cpu is ignored if -mcpu is specified.
+ --with-tune is ignored if -mtune is specified.
+ --with-float is ignored if -mhard-float or -msoft-float are
+ specified. */
+#define OPTION_DEFAULT_SPECS \
+ {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \
+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }
+
+/* rs6000_select[0] is reserved for the default cpu defined via --with-cpu */
+struct rs6000_cpu_select
+{
+ const char *string;
+ const char *name;
+ int set_tune_p;
+ int set_arch_p;
+};
+
+extern struct rs6000_cpu_select rs6000_select[];
+
+/* Debug support */
+extern const char *rs6000_debug_name; /* Name for -mdebug-xxxx option */
+extern int rs6000_debug_stack; /* debug stack applications */
+extern int rs6000_debug_arg; /* debug argument handling */
+
+#define TARGET_DEBUG_STACK rs6000_debug_stack
+#define TARGET_DEBUG_ARG rs6000_debug_arg
+
+extern const char *rs6000_traceback_name; /* Type of traceback table. */
+
+/* These are separate from target_flags because we've run out of bits
+ there. */
+extern int rs6000_long_double_type_size;
+extern int rs6000_ieeequad;
+extern int rs6000_altivec_abi;
+extern int rs6000_spe_abi;
+extern int rs6000_float_gprs;
+extern int rs6000_alignment_flags;
+extern const char *rs6000_sched_insert_nops_str;
+extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
+
+/* Alignment options for fields in structures for sub-targets following
+ AIX-like ABI.
+ ALIGN_POWER word-aligns FP doubles (default AIX ABI).
+ ALIGN_NATURAL doubleword-aligns FP doubles (align to object size).
+
+ Override the macro definitions when compiling libobjc to avoid undefined
+ reference to rs6000_alignment_flags due to library's use of GCC alignment
+ macros which use the macros below. */
+
+#ifndef IN_TARGET_LIBS
+#define MASK_ALIGN_POWER 0x00000000
+#define MASK_ALIGN_NATURAL 0x00000001
+#define TARGET_ALIGN_NATURAL (rs6000_alignment_flags & MASK_ALIGN_NATURAL)
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 --ff */
+#define OPTION_MASK_ALIGN_MAC68K 0x00000002
+#define OPTION_ALIGN_MAC68K (rs6000_alignment_flags & OPTION_MASK_ALIGN_MAC68K)
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 --ff */
+#else
+#define TARGET_ALIGN_NATURAL 0
+#endif
+
+#define TARGET_LONG_DOUBLE_128 (rs6000_long_double_type_size == 128)
+#define TARGET_IEEEQUAD rs6000_ieeequad
+#define TARGET_ALTIVEC_ABI rs6000_altivec_abi
+
+#define TARGET_SPE_ABI 0
+#define TARGET_SPE 0
+#define TARGET_E500 0
+#define TARGET_ISEL 0
+#define TARGET_FPRS 1
+#define TARGET_E500_SINGLE 0
+#define TARGET_E500_DOUBLE 0
+
+/* E500 processors only support plain "sync", not lwsync. */
+#define TARGET_NO_LWSYNC TARGET_E500
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Do not use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for.
+
+ On the RS/6000 this is used to define the target cpu type. */
+
+#define OVERRIDE_OPTIONS rs6000_override_options (TARGET_CPU_DEFAULT)
+
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) optimization_options(LEVEL,SIZE)
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* Target pragma. */
+#define REGISTER_TARGET_PRAGMAS() do { \
+ c_register_pragma (0, "longcall", rs6000_pragma_longcall); \
+ targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \
+} while (0)
+
+/* Target #defines. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ rs6000_cpu_cpp_builtins (pfile)
+
+/* This is used by rs6000_cpu_cpp_builtins to indicate the byte order
+ we're compiling for. Some configurations may need to override it. */
+#define RS6000_CPU_CPP_ENDIAN_BUILTINS() \
+ do \
+ { \
+ if (BYTES_BIG_ENDIAN) \
+ { \
+ builtin_define ("__BIG_ENDIAN__"); \
+ builtin_define ("_BIG_ENDIAN"); \
+ builtin_assert ("machine=bigendian"); \
+ } \
+ else \
+ { \
+ builtin_define ("__LITTLE_ENDIAN__"); \
+ builtin_define ("_LITTLE_ENDIAN"); \
+ builtin_assert ("machine=littleendian"); \
+ } \
+ } \
+ while (0)
+
+/* Target machine storage layout. */
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ (MODE) = TARGET_32BIT ? SImode : DImode;
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+/* That is true on RS/6000. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is true on RS/6000. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+
+ For RS/6000 we can decide arbitrarily since there are no machine
+ instructions for them. Might as well be consistent with bits and bytes. */
+#define WORDS_BIG_ENDIAN 1
+
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (! TARGET_POWERPC64 ? 4 : 8)
+#ifdef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#else
+#define MIN_UNITS_PER_WORD 4
+#endif
+#define UNITS_PER_FP_WORD 8
+#define UNITS_PER_ALTIVEC_WORD 16
+#define UNITS_PER_SPE_WORD 8
+
+/* Type used for ptrdiff_t, as a string used in a declaration. */
+#define PTRDIFF_TYPE "int"
+
+/* Type used for size_t, as a string used in a declaration. */
+#define SIZE_TYPE "long unsigned int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#define WCHAR_TYPE "short unsigned int"
+
+/* Width of wchar_t in bits. */
+#define WCHAR_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `short' on the
+ target machine. If you don't define this, the default is half a
+ word. (If this would be less than one storage unit, it is
+ rounded up to one unit.) */
+#define SHORT_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `int' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define INT_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `long' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define LONG_TYPE_SIZE (TARGET_32BIT ? 32 : 64)
+
+/* A C expression for the size in bits of the type `long long' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define LONG_LONG_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `float' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define FLOAT_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `double' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define DOUBLE_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `long double' on
+ the target machine. If you don't define this, the default is two
+ words. */
+#define LONG_DOUBLE_TYPE_SIZE rs6000_long_double_type_size
+
+/* Define this to set long double type size to use in libgcc2.c, which can
+ not depend on target_flags. */
+#ifdef __LONG_DOUBLE_128__
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
+
+/* Work around rs6000_long_double_type_size dependency in ada/targtyps.c. */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE (TARGET_32BIT ? 32 : 64)
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY (TARGET_32BIT ? 32 : 64)
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY \
+ ((TARGET_32BIT && !TARGET_ALTIVEC && !TARGET_ALTIVEC_ABI) ? 64 : 128)
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* APPLE LOCAL begin Macintosh alignment */
+/* Constants for alignment macros below. */
+#define RS6000_DOUBLE_ALIGNMENT 64
+#define RS6000_LONGLONG_ALIGNMENT 64
+#define RS6000_VECTOR_ALIGNMENT 128
+/* APPLE LOCAL end Macintosh alignment */
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 128
+
+/* A C expression to compute the alignment for a variables in the
+ local store. TYPE is the data type, and ALIGN is the alignment
+ that the object would ordinarily have. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : \
+ (TARGET_E500_DOUBLE && TYPE_MODE (TYPE) == DFmode) ? 64 : \
+ (TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE \
+ && SPE_VECTOR_MODE (TYPE_MODE (TYPE))) ? 64 : ALIGN)
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Return 1 if a structure or array containing FIELD should be
+ accessed using `BLKMODE'.
+
+ For the SPE, simd types are V2SI, and gcc can be tempted to put the
+ entire thing in a DI and use subregs to access the internals.
+ store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
+ back-end. Because a single GPR can hold a V2SI, but not a DI, the
+ best thing to do is set structs to BLKmode and avoid Severe Tire
+ Damage.
+
+ On e500 v2, DF and DI modes suffer from the same anomaly. DF can
+ fit into 1, whereas DI still needs two. */
+#define MEMBER_TYPE_FORCES_BLK(FIELD, MODE) \
+ ((TARGET_SPE && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
+ || (TARGET_E500_DOUBLE && (MODE) == DFmode))
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Make strings word-aligned so strcpy from constants will be faster.
+ Make vector constants quadword aligned. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD \
+ ? BITS_PER_WORD \
+ : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons.
+ Align vectors to 128 bits. Align SPE vectors and E500 v2 doubles to
+ 64 bits. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == VECTOR_TYPE ? (TARGET_SPE_ABI ? 64 : 128) \
+ : (TARGET_E500_DOUBLE && TYPE_MODE (TYPE) == DFmode) ? 64 \
+ : TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* Define this macro to be the value 1 if unaligned accesses have a cost
+ many times greater than aligned accesses, for example if they are
+ emulated in a trap handler. */
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) \
+ (STRICT_ALIGNMENT \
+ || (((MODE) == SFmode || (MODE) == DFmode || (MODE) == TFmode \
+ || (MODE) == DImode) \
+ && (ALIGN) < 32))
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ RS/6000 has 32 fixed-point registers, 32 floating-point registers,
+ an MQ register, a count register, a link register, and 8 condition
+ register fields, which we view here as separate registers. AltiVec
+ adds 32 vector registers and a VRsave register.
+
+ In addition, the difference between the frame and argument pointers is
+ a function of the number of registers saved, so we need to have a
+ register for AP that will later be eliminated in favor of SP or FP.
+ This is a normal register, but it is fixed.
+
+ We also create a pseudo register for float/int conversions, that will
+ really represent the memory location used. It is represented here as
+ a register, in order to work around problems in allocating stack storage
+ in inline functions.
+
+ Another pseudo (not included in DWARF_FRAME_REGISTERS) is soft frame
+ pointer, which is eventually eliminated in favor of SP or FP. */
+
+/* APPLE LOCAL begin 3399553 */
+/* OK, so this isn't technically the last physical register on the
+ processor. It's the last register we want mapped into the EH
+ information. Typically, this would be the last physical register,
+ however in our case we'd like to maintain backwards compatibility
+ instead of defining space we won't use anyway. */
+#define LAST_PHYSICAL_REGISTER 113
+
+#define FIRST_PSEUDO_REGISTER 115
+/* APPLE LOCAL end 3399553 */
+
+/* This must be included for pre gcc 3.0 glibc compatibility. */
+#define PRE_GCC3_DWARF_FRAME_REGISTERS 77
+
+/* Add 32 dwarf columns for synthetic SPE registers. */
+/* APPLE LOCAL 3399553 */
+#define DWARF_FRAME_REGISTERS (LAST_PHYSICAL_REGISTER + 32)
+
+/* The SPE has an additional 32 synthetic registers, with DWARF debug
+ info numbering for these registers starting at 1200. While eh_frame
+ register numbering need not be the same as the debug info numbering,
+ we choose to number these regs for eh_frame at 1200 too. This allows
+ future versions of the rs6000 backend to add hard registers and
+ continue to use the gcc hard register numbering for eh_frame. If the
+ extra SPE registers in eh_frame were numbered starting from the
+ current value of FIRST_PSEUDO_REGISTER, then if FIRST_PSEUDO_REGISTER
+ changed we'd need to introduce a mapping in DWARF_FRAME_REGNUM to
+ avoid invalidating older SPE eh_frame info.
+
+ We must map them here to avoid huge unwinder tables mostly consisting
+ of unused space. */
+/* APPLE LOCAL begin 3399553 */
+#define DWARF_REG_TO_UNWIND_COLUMN(r) \
+ ((r) > 1200 ? ((r) - 1200 + LAST_PHYSICAL_REGISTER) : (r))
+/* APPLE LOCAL end 3399553 */
+
+/* Use standard DWARF numbering for DWARF debugging information. */
+#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO)
+
+/* Use gcc hard register numbering for eh_frame. */
+#define DWARF_FRAME_REGNUM(REGNO) (REGNO)
+
+/* Map register numbers held in the call frame info that gcc has
+ collected using DWARF_FRAME_REGNUM to those that should be output in
+ .debug_frame and .eh_frame. We continue to use gcc hard reg numbers
+ for .eh_frame, but use the numbers mandated by the various ABIs for
+ .debug_frame. rs6000_emit_prologue has translated any combination of
+ CR2, CR3, CR4 saves to a save of CR2. The actual code emitted saves
+ the whole of CR, so we map CR2_REGNO to the DWARF reg for CR. */
+#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) \
+ ((FOR_EH) ? (REGNO) \
+ : (REGNO) == CR2_REGNO ? 64 \
+ : DBX_REGISTER_NUMBER (REGNO))
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ On RS/6000, r1 is used for the stack. On Darwin, r2 is available
+ as a local register; for all other OS's r2 is the TOC pointer.
+
+ cr5 is not supposed to be used.
+
+ On System V implementations, r13 is fixed and not available for use. */
+
+#define FIXED_REGISTERS \
+ {0, 1, FIXED_R2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1 \
+ , 1, 1, 1 \
+ /* APPLE LOCAL 3399553 */ \
+ , 1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1 \
+ , 1, 1, 1 \
+ /* APPLE LOCAL 3399553 */ \
+ , 1 \
+}
+
+/* Like `CALL_USED_REGISTERS' except this macro doesn't require that
+ the entire set of `FIXED_REGISTERS' be included.
+ (`CALL_USED_REGISTERS' must be a superset of `FIXED_REGISTERS').
+ This macro is optional. If not specified, it defaults to the value
+ of `CALL_USED_REGISTERS'. */
+
+#define CALL_REALLY_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0 \
+ , 0, 0, 0 \
+ /* APPLE LOCAL 3399553 */ \
+ , 0 \
+}
+
+#define MQ_REGNO 64
+#define CR0_REGNO 68
+#define CR1_REGNO 69
+#define CR2_REGNO 70
+#define CR3_REGNO 71
+#define CR4_REGNO 72
+#define MAX_CR_REGNO 75
+#define XER_REGNO 76
+#define FIRST_ALTIVEC_REGNO 77
+#define LAST_ALTIVEC_REGNO 108
+#define TOTAL_ALTIVEC_REGS (LAST_ALTIVEC_REGNO - FIRST_ALTIVEC_REGNO + 1)
+#define VRSAVE_REGNO 109
+#define VSCR_REGNO 110
+#define SPE_ACC_REGNO 111
+#define SPEFSCR_REGNO 112
+/* APPLE LOCAL 3399553 */
+#define FPSCR_REGNO 114
+
+#define FIRST_SAVED_ALTIVEC_REGNO (FIRST_ALTIVEC_REGNO+20)
+#define FIRST_SAVED_FP_REGNO (14+32)
+#define FIRST_SAVED_GP_REGNO 13
+
+/* APPLE LOCAL begin 3399553 */
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+
+ We allocate in the following order:
+ fp0 (not saved or used for anything)
+ fp13 - fp2 (not saved; incoming fp arg registers)
+ fp1 (not saved; return value)
+ fp31 - fp14 (saved; order given to save least number)
+ cr7, cr6 (not saved or special)
+ cr1 (not saved, but used for FP operations)
+ cr0 (not saved, but used for arithmetic operations)
+ cr4, cr3, cr2 (saved)
+ r0 (not saved; cannot be base reg)
+ r9 (not saved; best for TImode)
+ r11, r10, r8-r4 (not saved; highest used first to make less conflict)
+ r3 (not saved; return value register)
+ r31 - r13 (saved; order given to save least number)
+ r12 (not saved; if used for DImode or DFmode would use r13)
+ mq (not saved; best to use it if we can)
+ ctr (not saved; when we have the choice ctr is better)
+ lr (saved)
+ cr5, r1, r2, ap, xer (fixed)
+ v0 - v1 (not saved or used for anything)
+ v13 - v3 (not saved; incoming vector arg registers)
+ v2 (not saved; incoming vector arg reg; return value)
+ v19 - v14 (not saved or used for anything)
+ v31 - v20 (saved; order given to save least number)
+ vrsave, vscr (fixed)
+ spe_acc, spefscr (fixed)
+ sfp (fixed)
+ fpscr (fixed)
+*/
+/* APPLE LOCAL end 3399553 */
+
+#if FIXED_R2 == 1
+#define MAYBE_R2_AVAILABLE
+#define MAYBE_R2_FIXED 2,
+#else
+#define MAYBE_R2_AVAILABLE 2,
+#define MAYBE_R2_FIXED
+#endif
+
+#define REG_ALLOC_ORDER \
+ {32, \
+ 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, \
+ 33, \
+ 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, \
+ 50, 49, 48, 47, 46, \
+ 75, 74, 69, 68, 72, 71, 70, \
+ 0, MAYBE_R2_AVAILABLE \
+ 9, 11, 10, 8, 7, 6, 5, 4, \
+ 3, \
+ 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, \
+ 18, 17, 16, 15, 14, 13, 12, \
+ 64, 66, 65, \
+ 73, 1, MAYBE_R2_FIXED 67, 76, \
+ /* AltiVec registers. */ \
+ 77, 78, \
+ 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, \
+ 79, \
+ 96, 95, 94, 93, 92, 91, \
+ 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, \
+ 109, 110, \
+ /* APPLE LOCAL 3399553 */ \
+ 111, 112, 113, 114 \
+}
+
+/* True if register is floating-point. */
+#define FP_REGNO_P(N) ((N) >= 32 && (N) <= 63)
+
+/* True if register is a condition register. */
+#define CR_REGNO_P(N) ((N) >= 68 && (N) <= 75)
+
+/* True if register is a condition register, but not cr0. */
+#define CR_REGNO_NOT_CR0_P(N) ((N) >= 69 && (N) <= 75)
+
+/* True if register is an integer register. */
+#define INT_REGNO_P(N) \
+ ((N) <= 31 || (N) == ARG_POINTER_REGNUM || (N) == FRAME_POINTER_REGNUM)
+
+/* SPE SIMD registers are just the GPRs. */
+#define SPE_SIMD_REGNO_P(N) ((N) <= 31)
+
+/* True if register is the XER register. */
+#define XER_REGNO_P(N) ((N) == XER_REGNO)
+
+/* True if register is an AltiVec register. */
+#define ALTIVEC_REGNO_P(N) ((N) >= FIRST_ALTIVEC_REGNO && (N) <= LAST_ALTIVEC_REGNO)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) rs6000_hard_regno_nregs ((REGNO), (MODE))
+
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ ((TARGET_32BIT && TARGET_POWERPC64 \
+ && (GET_MODE_SIZE (MODE) > 4) \
+ && INT_REGNO_P (REGNO)) ? 1 : 0)
+
+#define ALTIVEC_VECTOR_MODE(MODE) \
+ ((MODE) == V16QImode \
+ || (MODE) == V8HImode \
+ || (MODE) == V4SFmode \
+ || (MODE) == V4SImode)
+
+#define SPE_VECTOR_MODE(MODE) \
+ ((MODE) == V4HImode \
+ || (MODE) == V2SFmode \
+ || (MODE) == V1DImode \
+ || (MODE) == V2SImode)
+
+#define UNITS_PER_SIMD_WORD \
+ (TARGET_ALTIVEC ? UNITS_PER_ALTIVEC_WORD \
+ : (TARGET_SPE ? UNITS_PER_SPE_WORD : UNITS_PER_WORD))
+
+/* Value is TRUE if hard register REGNO can hold a value of
+ machine-mode MODE. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ rs6000_hard_regno_mode_ok_p[(int)(MODE)][REGNO]
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (SCALAR_FLOAT_MODE_P (MODE1) \
+ ? SCALAR_FLOAT_MODE_P (MODE2) \
+ : SCALAR_FLOAT_MODE_P (MODE2) \
+ ? SCALAR_FLOAT_MODE_P (MODE1) \
+ : GET_MODE_CLASS (MODE1) == MODE_CC \
+ ? GET_MODE_CLASS (MODE2) == MODE_CC \
+ : GET_MODE_CLASS (MODE2) == MODE_CC \
+ ? GET_MODE_CLASS (MODE1) == MODE_CC \
+ : SPE_VECTOR_MODE (MODE1) \
+ ? SPE_VECTOR_MODE (MODE2) \
+ : SPE_VECTOR_MODE (MODE2) \
+ ? SPE_VECTOR_MODE (MODE1) \
+ : ALTIVEC_VECTOR_MODE (MODE1) \
+ ? ALTIVEC_VECTOR_MODE (MODE2) \
+ : ALTIVEC_VECTOR_MODE (MODE2) \
+ ? ALTIVEC_VECTOR_MODE (MODE1) \
+ : 1)
+
+/* Post-reload, we can't use any new AltiVec registers, as we already
+ emitted the vrsave mask. */
+
+#define HARD_REGNO_RENAME_OK(SRC, DST) \
+ (! ALTIVEC_REGNO_P (DST) || regs_ever_live[DST])
+
+/* A C expression returning the cost of moving data from a register of class
+ CLASS1 to one of CLASS2. */
+
+#define REGISTER_MOVE_COST rs6000_register_move_cost
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory. */
+
+#define MEMORY_MOVE_COST rs6000_memory_move_cost
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch.
+
+ Set this to 3 on the RS/6000 since that is roughly the average cost of an
+ unscheduled conditional branch. */
+
+#define BRANCH_COST 3
+
+/* Override BRANCH_COST heuristic which empirically produces worse
+ performance for removing short circuiting from the logical ops. */
+
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+
+/* A fixed register used at prologue and epilogue generation to fix
+ addressing modes. The SPE needs heavy addressing fixes at the last
+ minute, and it's best to save a register for it.
+
+ AltiVec also needs fixes, but we've gotten around using r11, which
+ is actually wrong because when use_backchain_to_restore_sp is true,
+ we end up clobbering r11.
+
+ The AltiVec case needs to be fixed. Dunno if we should break ABI
+ compatibility and reserve a register for it as well.. */
+
+#define FIXED_SCRATCH (TARGET_SPE ? 14 : 11)
+
+/* Define this macro to change register usage conditional on target
+ flags. */
+
+#define CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage ()
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* RS/6000 pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 1
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM 31
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 113
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 67
+
+/* Place to put static chain when calling a function that requires it. */
+#define STATIC_CHAIN_REGNUM 11
+
+/* Link register number. */
+#define LINK_REGISTER_REGNUM 65
+
+/* Count register number. */
+#define COUNT_REGISTER_REGNUM 66
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The RS/6000 has three types of registers, fixed-point, floating-point,
+ and condition registers, plus three special registers, MQ, CTR, and the
+ link register. AltiVec adds a vector register class.
+
+ However, r0 is special in that it cannot be used as a base register.
+ So make a class for registers valid as base registers.
+
+ Also, cr0 is the only condition code register that can be used in
+ arithmetic insns, so make a separate class for it. */
+
+enum reg_class
+{
+ NO_REGS,
+ BASE_REGS,
+ GENERAL_REGS,
+ FLOAT_REGS,
+ ALTIVEC_REGS,
+ VRSAVE_REGS,
+ VSCR_REGS,
+ SPE_ACC_REGS,
+ SPEFSCR_REGS,
+ NON_SPECIAL_REGS,
+ MQ_REGS,
+ LINK_REGS,
+ CTR_REGS,
+ LINK_OR_CTR_REGS,
+ SPECIAL_REGS,
+ SPEC_OR_GEN_REGS,
+ CR0_REGS,
+ CR_REGS,
+ NON_FLOAT_REGS,
+ XER_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "BASE_REGS", \
+ "GENERAL_REGS", \
+ "FLOAT_REGS", \
+ "ALTIVEC_REGS", \
+ "VRSAVE_REGS", \
+ "VSCR_REGS", \
+ "SPE_ACC_REGS", \
+ "SPEFSCR_REGS", \
+ "NON_SPECIAL_REGS", \
+ "MQ_REGS", \
+ "LINK_REGS", \
+ "CTR_REGS", \
+ "LINK_OR_CTR_REGS", \
+ "SPECIAL_REGS", \
+ "SPEC_OR_GEN_REGS", \
+ "CR0_REGS", \
+ "CR_REGS", \
+ "NON_FLOAT_REGS", \
+ "XER_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0xfffffffe, 0x00000000, 0x00000008, 0x00020000 }, /* BASE_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000008, 0x00020000 }, /* GENERAL_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000, 0x00000000 }, /* FLOAT_REGS */ \
+ { 0x00000000, 0x00000000, 0xffffe000, 0x00001fff }, /* ALTIVEC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00002000 }, /* VRSAVE_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00004000 }, /* VSCR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, /* SPE_ACC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00010000 }, /* SPEFSCR_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000008, 0x00020000 }, /* NON_SPECIAL_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000001, 0x00000000 }, /* MQ_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000002, 0x00000000 }, /* LINK_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000004, 0x00000000 }, /* CTR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000006, 0x00000000 }, /* LINK_OR_CTR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000007, 0x00002000 }, /* SPECIAL_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000000f, 0x00022000 }, /* SPEC_OR_GEN_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000010, 0x00000000 }, /* CR0_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000ff0, 0x00000000 }, /* CR_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000efff, 0x00020000 }, /* NON_FLOAT_REGS */ \
+ { 0x00000000, 0x00000000, 0x00001000, 0x00000000 }, /* XER_REGS */ \
+ /* APPLE LOCAL 3399553 */ \
+ { 0xffffffff, 0xffffffff, 0xffffffff, 0x0007ffff } /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == 0 ? GENERAL_REGS \
+ : (REGNO) < 32 ? BASE_REGS \
+ : FP_REGNO_P (REGNO) ? FLOAT_REGS \
+ : ALTIVEC_REGNO_P (REGNO) ? ALTIVEC_REGS \
+ : (REGNO) == CR0_REGNO ? CR0_REGS \
+ : CR_REGNO_P (REGNO) ? CR_REGS \
+ : (REGNO) == MQ_REGNO ? MQ_REGS \
+ : (REGNO) == LINK_REGISTER_REGNUM ? LINK_REGS \
+ : (REGNO) == COUNT_REGISTER_REGNUM ? CTR_REGS \
+ : (REGNO) == ARG_POINTER_REGNUM ? BASE_REGS \
+ : (REGNO) == XER_REGNO ? XER_REGS \
+ : (REGNO) == VRSAVE_REGNO ? VRSAVE_REGS \
+ : (REGNO) == VSCR_REGNO ? VRSAVE_REGS \
+ : (REGNO) == SPE_ACC_REGNO ? SPE_ACC_REGS \
+ : (REGNO) == SPEFSCR_REGNO ? SPEFSCR_REGS \
+ : (REGNO) == FRAME_POINTER_REGNUM ? BASE_REGS \
+ : NO_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS BASE_REGS
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the RS/6000, we have to return NO_REGS when we want to reload a
+ floating-point CONST_DOUBLE to force it to be copied to memory.
+
+ We also don't want to reload integer values into floating-point
+ registers if we can at all help it. In fact, this can
+ cause reload to die, if it tries to generate a reload of CTR
+ into a FP register and discovers it doesn't have the memory location
+ required.
+
+ ??? Would it be a good idea to have reload do the converse, that is
+ try to reload floating modes into FP registers if possible?
+ */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CONSTANT_P (X) \
+ && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \
+ ? NO_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (CLASS))
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,IN) \
+ rs6000_secondary_reload_class (CLASS, MODE, IN)
+
+/* If we are copying between FP or AltiVec registers and anything
+ else, we need a memory location. */
+
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ ((CLASS1) != (CLASS2) && ((CLASS1) == FLOAT_REGS \
+ || (CLASS2) == FLOAT_REGS \
+ || (CLASS1) == ALTIVEC_REGS \
+ || (CLASS2) == ALTIVEC_REGS))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+
+ On RS/6000, this is the size of MODE in words,
+ except in the FP regs, where a single reg is enough for two words. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (((CLASS) == FLOAT_REGS) \
+ ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
+ : (TARGET_E500_DOUBLE && (CLASS) == GENERAL_REGS && (MODE) == DFmode) \
+ ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? ((GET_MODE_SIZE (FROM) < 8 || GET_MODE_SIZE (TO) < 8 \
+ || TARGET_IEEEQUAD) \
+ && reg_classes_intersect_p (FLOAT_REGS, CLASS)) \
+ : (((TARGET_E500_DOUBLE \
+ && ((((TO) == DFmode) + ((FROM) == DFmode)) == 1 \
+ || (((TO) == DImode) + ((FROM) == DImode)) == 1)) \
+ || (TARGET_SPE \
+ && (SPE_VECTOR_MODE (FROM) + SPE_VECTOR_MODE (TO)) == 1)) \
+ && reg_classes_intersect_p (GENERAL_REGS, CLASS)))
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Enumeration to give which calling sequence to use. */
+enum rs6000_abi {
+ ABI_NONE,
+ ABI_AIX, /* IBM's AIX */
+ ABI_V4, /* System V.4/eabi */
+ ABI_DARWIN /* Apple's Darwin (OS X kernel) */
+};
+
+extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor. */
+#define DWARF_CIE_DATA_ALIGNMENT (-((int) (TARGET_32BIT ? 4 : 8)))
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame.
+
+ On the RS/6000, we grow upwards, from the area after the outgoing
+ arguments. */
+#define FRAME_GROWS_DOWNWARD (flag_stack_protect != 0)
+
+/* Size of the outgoing register save area */
+#define RS6000_REG_SAVE ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN) \
+ ? (TARGET_64BIT ? 64 : 32) \
+ : 0)
+
+/* Size of the fixed area on the stack */
+#define RS6000_SAVE_AREA \
+ (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) ? 24 : 8) \
+ << (TARGET_64BIT ? 1 : 0))
+
+/* MEM representing address to save the TOC register */
+#define RS6000_SAVE_TOC gen_rtx_MEM (Pmode, \
+ plus_constant (stack_pointer_rtx, \
+ (TARGET_32BIT ? 20 : 40)))
+
+/* APPLE LOCAL begin radar 4859753 */
+/* Make 8-byte structs BLKmode instead of DImode */
+#define RS6000_8BYTE_STRUCT_HACK 0
+/* APPLE LOCAL end radar 4859753 */
+/* Align an address */
+#define RS6000_ALIGN(n,a) (((n) + (a) - 1) & ~((a) - 1))
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated.
+
+ On the RS/6000, the frame pointer is the same as the stack pointer,
+ except for dynamic allocations. So we start after the fixed area and
+ outgoing parameter area. */
+
+#define STARTING_FRAME_OFFSET \
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : (RS6000_ALIGN (current_function_outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + RS6000_SAVE_AREA))
+
+/* Offset from the stack pointer register to an item dynamically
+ allocated on the stack, e.g., by `alloca'.
+
+ The default value for this macro is `STACK_POINTER_OFFSET' plus the
+ length of the outgoing arguments. The default is correct for most
+ machines. See `function.c' for details. */
+#define STACK_DYNAMIC_OFFSET(FUNDECL) \
+ (RS6000_ALIGN (current_function_outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + (STACK_POINTER_OFFSET))
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On RS/6000, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Offset of first parameter from the argument pointer register value.
+ On the RS/6000, we define the argument pointer to the start of the fixed
+ area. */
+#define FIRST_PARM_OFFSET(FNDECL) RS6000_SAVE_AREA
+
+/* Offset from the argument pointer register value to the top of
+ stack. This is different from FIRST_PARM_OFFSET because of the
+ register save area. */
+#define ARG_POINTER_CFA_OFFSET(FNDECL) 0
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. The value is the number of bytes allocated to this
+ area. */
+#define REG_PARM_STACK_SPACE(FNDECL) RS6000_REG_SAVE
+
+/* Define this if the above stack space is to be considered part of the
+ space allocated by the caller. */
+#define OUTGOING_REG_PARM_STACK_SPACE
+
+/* This is the difference between the logical top of stack and the actual sp.
+
+ For the RS/6000, sp points past the fixed area. */
+#define STACK_POINTER_OFFSET RS6000_SAVE_AREA
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) rs6000_function_value ((VALTYPE), (FUNC))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) rs6000_libcall_value ((MODE))
+
+/* DRAFT_V4_STRUCT_RET defaults off. */
+#define DRAFT_V4_STRUCT_RET 0
+
+/* Let TARGET_RETURN_IN_MEMORY control what happens. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Mode of stack savearea.
+ FUNCTION is VOIDmode because calling convention maintains SP.
+ BLOCK needs Pmode for SP.
+ NONLOCAL needs twice Pmode to maintain both backchain and SP. */
+#define STACK_SAVEAREA_MODE(LEVEL) \
+ (LEVEL == SAVE_FUNCTION ? VOIDmode \
+ : LEVEL == SAVE_NONLOCAL ? (TARGET_32BIT ? DImode : TImode) : Pmode)
+
+/* Minimum and maximum general purpose registers used to hold arguments. */
+#define GP_ARG_MIN_REG 3
+#define GP_ARG_MAX_REG 10
+#define GP_ARG_NUM_REG (GP_ARG_MAX_REG - GP_ARG_MIN_REG + 1)
+
+/* Minimum and maximum floating point registers used to hold arguments. */
+#define FP_ARG_MIN_REG 33
+#define FP_ARG_AIX_MAX_REG 45
+#define FP_ARG_V4_MAX_REG 40
+#define FP_ARG_MAX_REG ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN) \
+ ? FP_ARG_AIX_MAX_REG : FP_ARG_V4_MAX_REG)
+#define FP_ARG_NUM_REG (FP_ARG_MAX_REG - FP_ARG_MIN_REG + 1)
+
+/* Minimum and maximum AltiVec registers used to hold arguments. */
+#define ALTIVEC_ARG_MIN_REG (FIRST_ALTIVEC_REGNO + 2)
+#define ALTIVEC_ARG_MAX_REG (ALTIVEC_ARG_MIN_REG + 11)
+#define ALTIVEC_ARG_NUM_REG (ALTIVEC_ARG_MAX_REG - ALTIVEC_ARG_MIN_REG + 1)
+
+/* Return registers */
+#define GP_ARG_RETURN GP_ARG_MIN_REG
+#define FP_ARG_RETURN FP_ARG_MIN_REG
+#define ALTIVEC_ARG_RETURN (FIRST_ALTIVEC_REGNO + 2)
+
+/* Flags for the call/call_value rtl operations set up by function_arg */
+#define CALL_NORMAL 0x00000000 /* no special processing */
+/* Bits in 0x00000001 are unused. */
+#define CALL_V4_CLEAR_FP_ARGS 0x00000002 /* V.4, no FP args passed */
+#define CALL_V4_SET_FP_ARGS 0x00000004 /* V.4, FP args were passed */
+#define CALL_LONG 0x00000008 /* always call indirect */
+#define CALL_LIBCALL 0x00000010 /* libcall */
+
+/* We don't have prologue and epilogue functions to save/restore
+ everything for most ABIs. */
+#define WORLD_SAVE_P(INFO) 0
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller.
+
+ On RS/6000, this is r3, fp1, and v2 (for AltiVec). */
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == GP_ARG_RETURN \
+ || ((N) == FP_ARG_RETURN && TARGET_HARD_FLOAT && TARGET_FPRS) \
+ || ((N) == ALTIVEC_ARG_RETURN && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI))
+
+/* 1 if N is a possible register number for function argument passing.
+ On RS/6000, these are r3-r10 and fp1-fp13.
+ On AltiVec, v2 - v13 are used for passing vectors. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((unsigned) (N) - GP_ARG_MIN_REG < GP_ARG_NUM_REG \
+ || ((unsigned) (N) - ALTIVEC_ARG_MIN_REG < ALTIVEC_ARG_NUM_REG \
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI) \
+ || ((unsigned) (N) - FP_ARG_MIN_REG < FP_ARG_NUM_REG \
+ && TARGET_HARD_FLOAT && TARGET_FPRS))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the RS/6000, this is a structure. The first element is the number of
+ total argument words, the second is used to store the next
+ floating-point register number, and the third says how many more args we
+ have prototype types for.
+
+ For ABI_V4, we treat these slightly differently -- `sysv_gregno' is
+ the next available GP register, `fregno' is the next available FP
+ register, and `words' is the number of words used on the stack.
+
+ The varargs/stdarg support requires that this structure's size
+ be a multiple of sizeof(int). */
+
+typedef struct rs6000_args
+{
+ int words; /* # words used for passing GP registers */
+ int fregno; /* next available FP register */
+ int vregno; /* next available AltiVec register */
+ int nargs_prototype; /* # args left in the current prototype */
+ int prototype; /* Whether a prototype was defined */
+ int stdarg; /* Whether function is a stdarg function. */
+ int call_cookie; /* Do special things for this call */
+ int sysv_gregno; /* next available GP register */
+ int intoffset; /* running offset in struct (darwin64) */
+ int use_stack; /* any part of struct on stack (darwin64) */
+ /* APPLE LOCAL begin fix 64-bit varargs 4028089 */
+ int floats_in_gpr; /* count of SFmode floats taking up
+ GPR space (darwin64) */
+ /* APPLE LOCAL end fix 64-bit varargs 4028089 */
+ int named; /* false for varargs params */
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME, FALSE, FALSE, N_NAMED_ARGS)
+
+/* Similar, but when scanning the definition of a procedure. We always
+ set NARGS_PROTOTYPE large so we never return an EXPR_LIST. */
+
+#define INIT_CUMULATIVE_INCOMING_ARGS(CUM, FNTYPE, LIBNAME) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME, TRUE, FALSE, 1000)
+
+/* Like INIT_CUMULATIVE_ARGS' but only used for outgoing libcalls. */
+
+#define INIT_CUMULATIVE_LIBCALL_ARGS(CUM, MODE, LIBNAME) \
+ init_cumulative_args (&CUM, NULL_TREE, LIBNAME, FALSE, TRUE, 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_advance (&CUM, MODE, TYPE, NAMED, 0)
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On RS/6000 the first eight words of non-FP are normally in registers
+ and the rest are pushed. The first 13 FP args are in registers.
+
+ If this is floating-point and no prototype is specified, we use
+ both an FP and integer register (or possibly FP reg and stack). Library
+ functions (when TYPE is zero) always have the proper types for args,
+ so we can pass the FP value just in one register. emit_library_function
+ doesn't support EXPR_LIST anyway. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, NAMED)
+
+/* If defined, a C expression which determines whether, and in which
+ direction, to pad out an argument with extra space. The value
+ should be of type `enum direction': either `upward' to pad above
+ the argument, `downward' to pad below, or `none' to inhibit
+ padding. */
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) function_arg_padding (MODE, TYPE)
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ function_arg_boundary (MODE, TYPE)
+
+/* Implement `va_start' for varargs and stdarg. */
+#define EXPAND_BUILTIN_VA_START(valist, nextarg) \
+ rs6000_va_start (valist, nextarg)
+
+#define PAD_VARARGS_DOWN \
+ (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ output_function_profiler ((FILE), (LABELNO));
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. No definition is equivalent to
+ always zero.
+
+ On the RS/6000, this is nonzero because we can restore the stack from
+ its backpointer, which we maintain. */
+#define EXIT_IGNORE_STACK 1
+
+/* Define this macro as a C expression that is nonzero for registers
+ that are used by the epilogue or the return' pattern. The stack
+ and frame pointer registers are already be assumed to be used as
+ needed. */
+
+#define EPILOGUE_USES(REGNO) \
+ ((reload_completed && (REGNO) == LINK_REGISTER_REGNUM) \
+ || (TARGET_ALTIVEC && (REGNO) == VRSAVE_REGNO) \
+ || (current_function_calls_eh_return \
+ && TARGET_AIX \
+ && (REGNO) == 2))
+
+
+/* TRAMPOLINE_TEMPLATE deleted */
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE rs6000_trampoline_size ()
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, CXT) \
+ rs6000_initialize_trampoline (ADDR, FNADDR, CXT)
+
+/* Definitions for __builtin_return_address and __builtin_frame_address.
+ __builtin_return_address (0) should give link register (65), enable
+ this. */
+/* This should be uncommented, so that the link register is used, but
+ currently this would result in unmatched insns and spilling fixed
+ registers so we'll leave it for another day. When these problems are
+ taken care of one additional fetch will be necessary in RETURN_ADDR_RTX.
+ (mrs) */
+/* #define RETURN_ADDR_IN_PREVIOUS_FRAME */
+
+/* Number of bytes into the frame return addresses can be found. See
+ rs6000_stack_info in rs6000.c for more information on how the different
+ abi's store the return address. */
+#define RETURN_ADDRESS_OFFSET \
+ ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN) ? (TARGET_32BIT ? 8 : 16) : \
+ (DEFAULT_ABI == ABI_V4) ? 4 : \
+ (internal_error ("RETURN_ADDRESS_OFFSET not supported"), 0))
+
+/* The current return address is in link register (65). The return address
+ of anything farther back is accessed normally at an offset of 8 from the
+ frame pointer. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ (rs6000_return_addr (COUNT, FRAME))
+
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the RS/6000. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer.
+
+ In addition, we use the elimination mechanism to see if r30 is needed
+ Initially we assume that it isn't. If it is, we spill it. This is done
+ by making it an eliminable register. We replace it with itself so that
+ if it isn't needed, then existing uses won't be modified. */
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+#define ELIMINABLE_REGS \
+{{ HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { RS6000_PIC_OFFSET_TABLE_REGNUM, RS6000_PIC_OFFSET_TABLE_REGNUM } }
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ For the RS/6000, if frame pointer elimination is being done, we would like
+ to convert ap into fp, not sp.
+
+ We need r30 if -mminimal-toc was specified, and there are constant pool
+ references. */
+
+#define CAN_ELIMINATE(FROM, TO) \
+ ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
+ ? ! frame_pointer_needed \
+ : (FROM) == RS6000_PIC_OFFSET_TABLE_REGNUM \
+ ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0 \
+ : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = rs6000_initial_elimination_offset(FROM, TO))
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_PRE_INCREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? (REGNO) <= 31 || (REGNO) == 67 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
+ : (reg_renumber[REGNO] >= 0 \
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67 \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? ((REGNO) > 0 && (REGNO) <= 31) || (REGNO) == 67 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
+ : (reg_renumber[REGNO] > 0 \
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67 \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)))
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the RS/6000, all integer constants are acceptable, most won't be valid
+ for particular insns, though. Only easy FP constants are
+ acceptable. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (((GET_CODE (X) != CONST_DOUBLE \
+ && GET_CODE (X) != CONST_VECTOR) \
+ || GET_MODE (X) == VOIDmode \
+ || (TARGET_POWERPC64 && GET_MODE (X) == DImode) \
+ || easy_fp_constant (X, GET_MODE (X)) \
+ || easy_vector_constant (X, GET_MODE (X))) \
+ && !rs6000_tls_referenced_p (X))
+
+#define EASY_VECTOR_15(n) ((n) >= -16 && (n) <= 15)
+#define EASY_VECTOR_15_ADD_SELF(n) (!EASY_VECTOR_15((n)) \
+ && EASY_VECTOR_15((n) >> 1) \
+ && ((n) & 1) == 0)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifdef REG_OK_STRICT
+# define REG_OK_STRICT_FLAG 1
+#else
+# define REG_OK_STRICT_FLAG 0
+#endif
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg in the non-strict case. */
+#define INT_REG_OK_FOR_INDEX_P(X, STRICT) \
+ ((!(STRICT) && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
+ || REGNO_OK_FOR_INDEX_P (REGNO (X)))
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg in the non-strict case. */
+#define INT_REG_OK_FOR_BASE_P(X, STRICT) \
+ ((!(STRICT) && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
+ || REGNO_OK_FOR_BASE_P (REGNO (X)))
+
+#define REG_OK_FOR_INDEX_P(X) INT_REG_OK_FOR_INDEX_P (X, REG_OK_STRICT_FLAG)
+#define REG_OK_FOR_BASE_P(X) INT_REG_OK_FOR_BASE_P (X, REG_OK_STRICT_FLAG)
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the RS/6000, there are four valid addresses: a SYMBOL_REF that
+ refers to a constant pool entry of an address (or the sum of it
+ plus a constant), a short (16-bit signed) constant plus a register,
+ the sum of two registers, or a register indirect, possibly with an
+ auto-increment. For DFmode and DImode with a constant plus register,
+ we must ensure that both words are addressable or PowerPC64 with offset
+ word aligned.
+
+ For modes spanning multiple registers (DFmode in 32-bit GPRs,
+ 32-bit DImode, TImode), indexed addressing cannot be used because
+ adjacent memory cells are accessed by adding word-sized offsets
+ during assembly output. */
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ if (rs6000_legitimate_address (MODE, X, REG_OK_STRICT_FLAG)) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On RS/6000, first check for the sum of a register with a constant
+ integer that is out of range. If so, generate code to add the
+ constant with the low-order 16 bits masked to the register and force
+ this result into another register (this can be done with `cau').
+ Then generate an address of REG+(CONST&0xffff), allowing for the
+ possibility of bit 16 being a one.
+
+ Then check for the sum of a register and something not constant, try to
+ load the other things into a register and return the sum. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ rtx result = rs6000_legitimize_address (X, OLDX, MODE); \
+ if (result != NULL_RTX) \
+ { \
+ (X) = result; \
+ goto WIN; \
+ } \
+}
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ Implemented on rs6000 by rs6000_legitimize_reload_address.
+ Note that (X) is evaluated twice; this is safe in current usage. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ int win; \
+ (X) = rs6000_legitimize_reload_address ((X), (MODE), (OPNUM), \
+ (int)(TYPE), (IND_LEVELS), &win); \
+ if ( win ) \
+ goto WIN; \
+} while (0)
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+do { \
+ if (rs6000_mode_dependent_address (ADDR)) \
+ goto LABEL; \
+} while (0)
+
+/* The register number of the register used to address a table of
+ static data addresses in memory. In some cases this register is
+ defined by a processor's "application binary interface" (ABI).
+ When this macro is defined, RTL is generated for this register
+ once, as with the stack pointer and frame pointer registers. If
+ this macro is not defined, it is up to the machine-dependent files
+ to allocate such a register (if necessary). */
+
+#define RS6000_PIC_OFFSET_TABLE_REGNUM 30
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? RS6000_PIC_OFFSET_TABLE_REGNUM : INVALID_REGNUM)
+
+#define TOC_REGISTER (TARGET_MINIMAL_TOC ? RS6000_PIC_OFFSET_TABLE_REGNUM : 2)
+
+/* Define this macro if the register defined by
+ `PIC_OFFSET_TABLE_REGNUM' is clobbered by calls. Do not define
+ this macro if `PIC_OFFSET_TABLE_REGNUM' is not defined. */
+
+/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent
+ code. You can assume that X satisfies `CONSTANT_P', so you need
+ not check this. You can also assume FLAG_PIC is true, so you need
+ not check it either. You need not define this macro if all
+ constants (including `SYMBOL_REF') can be immediate operands when
+ generating position independent code. */
+
+/* #define LEGITIMATE_PIC_OPERAND_P (X) */
+
+/* Define this if some processing needs to be done immediately before
+ emitting code for an insn. */
+
+/* #define FINAL_PRESCAN_INSN(INSN,OPERANDS,NOPERANDS) */
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+
+/* #define FIXUNS_TRUNC_LIKE_FIX_TRUNC */
+
+/* An integer expression for the size in bits of the largest integer machine
+ mode that should actually be used. */
+
+/* Allow pairs of registers to be used, which is the intent of the default. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_POWERPC64 ? TImode : DImode)
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX (! TARGET_POWERPC64 ? 4 : 8)
+#define MAX_MOVE_MAX 8
+
+/* Nonzero if access to memory by bytes is no faster than for words.
+ Also nonzero if doing byte operations (specifically shifts) in registers
+ is undesirable. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* The cntlzw and cntlzd instructions return 32 and 64 for input of zero. */
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = ((MODE) == SImode ? 32 : 64))
+
+/* The CTZ patterns return -1 for input of zero. */
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = -1)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode (TARGET_32BIT ? SImode : DImode)
+
+/* Supply definition of STACK_SIZE_MODE for allocate_dynamic_stack_space. */
+#define STACK_SIZE_MODE (TARGET_32BIT ? SImode : DImode)
+
+/* Mode of a function address in a call instruction (for indexing purposes).
+ Doesn't matter on RS/6000. */
+#define FUNCTION_MODE SImode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits.
+
+ The sle and sre instructions which allow SHIFT_COUNT_TRUNCATED
+ have been dropped from the PowerPC architecture. */
+
+#define SHIFT_COUNT_TRUNCATED (TARGET_POWER ? 1 : 0)
+
+/* Adjust the length of an INSN. LENGTH is the currently-computed length and
+ should be adjusted to reflect any required changes. This macro is used when
+ there is some systematic length adjustment required that would be difficult
+ to express in the length attribute. */
+
+/* #define ADJUST_INSN_LENGTH(X,LENGTH) */
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a
+ COMPARE, return the mode to be used for the comparison. For
+ floating-point, CCFPmode should be used. CCUNSmode should be used
+ for unsigned comparisons. CCEQmode should be used when we are
+ doing an inequality comparison on the result of a
+ comparison. CCmode should be used in all other cases. */
+
+#define SELECT_CC_MODE(OP,X,Y) \
+ (SCALAR_FLOAT_MODE_P (GET_MODE (X)) ? CCFPmode \
+ : (OP) == GTU || (OP) == LTU || (OP) == GEU || (OP) == LEU ? CCUNSmode \
+ : (((OP) == EQ || (OP) == NE) && COMPARISON_P (X) \
+ ? CCEQmode : CCmode))
+
+/* Can the condition code MODE be safely reversed? This is safe in
+ all cases on this port, because at present it doesn't use the
+ trapping FP comparisons (fcmpo). */
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+/* Given a condition code and a mode, return the inverse condition. */
+#define REVERSE_CONDITION(CODE, MODE) rs6000_reverse_condition (MODE, CODE)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. */
+
+extern GTY(()) rtx rs6000_compare_op0;
+extern GTY(()) rtx rs6000_compare_op1;
+extern int rs6000_compare_fp_p;
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+#define ASM_COMMENT_START " #"
+
+/* Flag to say the TOC is initialized */
+extern int toc_initialized;
+
+/* Macro to output a special constant pool entry. Go to WIN if we output
+ it. Otherwise, it is written the usual way.
+
+ On the RS/6000, toc entries are handled this way. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE, X, MODE, ALIGN, LABELNO, WIN) \
+{ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (X, MODE)) \
+ { \
+ output_toc (FILE, X, LABELNO, MODE); \
+ goto WIN; \
+ } \
+}
+
+#ifdef HAVE_GAS_WEAK
+#define RS6000_WEAK 1
+#else
+#define RS6000_WEAK 0
+#endif
+
+#if RS6000_WEAK
+/* Used in lieu of ASM_WEAKEN_LABEL. */
+#define ASM_WEAKEN_DECL(FILE, DECL, NAME, VAL) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ if (TARGET_XCOFF) \
+ fputs ("[DS]", (FILE)); \
+ fputs ("\n\t.weak\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ } \
+ fputc ('\n', (FILE)); \
+ if (VAL) \
+ { \
+ ASM_OUTPUT_DEF ((FILE), (NAME), (VAL)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ fputs ("\t.set\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (",.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VAL)); \
+ fputc ('\n', (FILE)); \
+ } \
+ } \
+ } \
+ while (0)
+#endif
+
+#if HAVE_GAS_WEAKREF
+#define ASM_OUTPUT_WEAKREF(FILE, DECL, NAME, VALUE) \
+ do \
+ { \
+ fputs ("\t.weakref\t", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (", ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VALUE)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ fputs ("\n\t.weakref\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (", .", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VALUE)); \
+ } \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+/* This implements the `alias' attribute. */
+#undef ASM_OUTPUT_DEF_FROM_DECLS
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \
+ do \
+ { \
+ const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ const char *name = IDENTIFIER_POINTER (TARGET); \
+ if (TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ if (TREE_PUBLIC (DECL)) \
+ { \
+ if (!RS6000_WEAK || !DECL_WEAK (DECL)) \
+ { \
+ fputs ("\t.globl\t.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ else if (TARGET_XCOFF) \
+ { \
+ fputs ("\t.lglobl\t.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ fputs ("\t.set\t.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, alias); \
+ fputs (",.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, name); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_DEF (FILE, alias, name); \
+ } \
+ while (0)
+
+#define TARGET_ASM_FILE_START rs6000_file_start
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
+
+#define REGISTER_NAMES \
+{ \
+ &rs6000_reg_names[ 0][0], /* r0 */ \
+ &rs6000_reg_names[ 1][0], /* r1 */ \
+ &rs6000_reg_names[ 2][0], /* r2 */ \
+ &rs6000_reg_names[ 3][0], /* r3 */ \
+ &rs6000_reg_names[ 4][0], /* r4 */ \
+ &rs6000_reg_names[ 5][0], /* r5 */ \
+ &rs6000_reg_names[ 6][0], /* r6 */ \
+ &rs6000_reg_names[ 7][0], /* r7 */ \
+ &rs6000_reg_names[ 8][0], /* r8 */ \
+ &rs6000_reg_names[ 9][0], /* r9 */ \
+ &rs6000_reg_names[10][0], /* r10 */ \
+ &rs6000_reg_names[11][0], /* r11 */ \
+ &rs6000_reg_names[12][0], /* r12 */ \
+ &rs6000_reg_names[13][0], /* r13 */ \
+ &rs6000_reg_names[14][0], /* r14 */ \
+ &rs6000_reg_names[15][0], /* r15 */ \
+ &rs6000_reg_names[16][0], /* r16 */ \
+ &rs6000_reg_names[17][0], /* r17 */ \
+ &rs6000_reg_names[18][0], /* r18 */ \
+ &rs6000_reg_names[19][0], /* r19 */ \
+ &rs6000_reg_names[20][0], /* r20 */ \
+ &rs6000_reg_names[21][0], /* r21 */ \
+ &rs6000_reg_names[22][0], /* r22 */ \
+ &rs6000_reg_names[23][0], /* r23 */ \
+ &rs6000_reg_names[24][0], /* r24 */ \
+ &rs6000_reg_names[25][0], /* r25 */ \
+ &rs6000_reg_names[26][0], /* r26 */ \
+ &rs6000_reg_names[27][0], /* r27 */ \
+ &rs6000_reg_names[28][0], /* r28 */ \
+ &rs6000_reg_names[29][0], /* r29 */ \
+ &rs6000_reg_names[30][0], /* r30 */ \
+ &rs6000_reg_names[31][0], /* r31 */ \
+ \
+ &rs6000_reg_names[32][0], /* fr0 */ \
+ &rs6000_reg_names[33][0], /* fr1 */ \
+ &rs6000_reg_names[34][0], /* fr2 */ \
+ &rs6000_reg_names[35][0], /* fr3 */ \
+ &rs6000_reg_names[36][0], /* fr4 */ \
+ &rs6000_reg_names[37][0], /* fr5 */ \
+ &rs6000_reg_names[38][0], /* fr6 */ \
+ &rs6000_reg_names[39][0], /* fr7 */ \
+ &rs6000_reg_names[40][0], /* fr8 */ \
+ &rs6000_reg_names[41][0], /* fr9 */ \
+ &rs6000_reg_names[42][0], /* fr10 */ \
+ &rs6000_reg_names[43][0], /* fr11 */ \
+ &rs6000_reg_names[44][0], /* fr12 */ \
+ &rs6000_reg_names[45][0], /* fr13 */ \
+ &rs6000_reg_names[46][0], /* fr14 */ \
+ &rs6000_reg_names[47][0], /* fr15 */ \
+ &rs6000_reg_names[48][0], /* fr16 */ \
+ &rs6000_reg_names[49][0], /* fr17 */ \
+ &rs6000_reg_names[50][0], /* fr18 */ \
+ &rs6000_reg_names[51][0], /* fr19 */ \
+ &rs6000_reg_names[52][0], /* fr20 */ \
+ &rs6000_reg_names[53][0], /* fr21 */ \
+ &rs6000_reg_names[54][0], /* fr22 */ \
+ &rs6000_reg_names[55][0], /* fr23 */ \
+ &rs6000_reg_names[56][0], /* fr24 */ \
+ &rs6000_reg_names[57][0], /* fr25 */ \
+ &rs6000_reg_names[58][0], /* fr26 */ \
+ &rs6000_reg_names[59][0], /* fr27 */ \
+ &rs6000_reg_names[60][0], /* fr28 */ \
+ &rs6000_reg_names[61][0], /* fr29 */ \
+ &rs6000_reg_names[62][0], /* fr30 */ \
+ &rs6000_reg_names[63][0], /* fr31 */ \
+ \
+ &rs6000_reg_names[64][0], /* mq */ \
+ &rs6000_reg_names[65][0], /* lr */ \
+ &rs6000_reg_names[66][0], /* ctr */ \
+ &rs6000_reg_names[67][0], /* ap */ \
+ \
+ &rs6000_reg_names[68][0], /* cr0 */ \
+ &rs6000_reg_names[69][0], /* cr1 */ \
+ &rs6000_reg_names[70][0], /* cr2 */ \
+ &rs6000_reg_names[71][0], /* cr3 */ \
+ &rs6000_reg_names[72][0], /* cr4 */ \
+ &rs6000_reg_names[73][0], /* cr5 */ \
+ &rs6000_reg_names[74][0], /* cr6 */ \
+ &rs6000_reg_names[75][0], /* cr7 */ \
+ \
+ &rs6000_reg_names[76][0], /* xer */ \
+ \
+ &rs6000_reg_names[77][0], /* v0 */ \
+ &rs6000_reg_names[78][0], /* v1 */ \
+ &rs6000_reg_names[79][0], /* v2 */ \
+ &rs6000_reg_names[80][0], /* v3 */ \
+ &rs6000_reg_names[81][0], /* v4 */ \
+ &rs6000_reg_names[82][0], /* v5 */ \
+ &rs6000_reg_names[83][0], /* v6 */ \
+ &rs6000_reg_names[84][0], /* v7 */ \
+ &rs6000_reg_names[85][0], /* v8 */ \
+ &rs6000_reg_names[86][0], /* v9 */ \
+ &rs6000_reg_names[87][0], /* v10 */ \
+ &rs6000_reg_names[88][0], /* v11 */ \
+ &rs6000_reg_names[89][0], /* v12 */ \
+ &rs6000_reg_names[90][0], /* v13 */ \
+ &rs6000_reg_names[91][0], /* v14 */ \
+ &rs6000_reg_names[92][0], /* v15 */ \
+ &rs6000_reg_names[93][0], /* v16 */ \
+ &rs6000_reg_names[94][0], /* v17 */ \
+ &rs6000_reg_names[95][0], /* v18 */ \
+ &rs6000_reg_names[96][0], /* v19 */ \
+ &rs6000_reg_names[97][0], /* v20 */ \
+ &rs6000_reg_names[98][0], /* v21 */ \
+ &rs6000_reg_names[99][0], /* v22 */ \
+ &rs6000_reg_names[100][0], /* v23 */ \
+ &rs6000_reg_names[101][0], /* v24 */ \
+ &rs6000_reg_names[102][0], /* v25 */ \
+ &rs6000_reg_names[103][0], /* v26 */ \
+ &rs6000_reg_names[104][0], /* v27 */ \
+ &rs6000_reg_names[105][0], /* v28 */ \
+ &rs6000_reg_names[106][0], /* v29 */ \
+ &rs6000_reg_names[107][0], /* v30 */ \
+ &rs6000_reg_names[108][0], /* v31 */ \
+ &rs6000_reg_names[109][0], /* vrsave */ \
+ &rs6000_reg_names[110][0], /* vscr */ \
+ &rs6000_reg_names[111][0], /* spe_acc */ \
+ &rs6000_reg_names[112][0], /* spefscr */ \
+ &rs6000_reg_names[113][0], /* sfp */ \
+ /* APPLE LOCAL 3399553 */ \
+ &rs6000_reg_names[114][0], /* fpscr */ \
+}
+
+/* Table of additional register names to use in user input. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+ {{"r0", 0}, {"r1", 1}, {"r2", 2}, {"r3", 3}, \
+ {"r4", 4}, {"r5", 5}, {"r6", 6}, {"r7", 7}, \
+ {"r8", 8}, {"r9", 9}, {"r10", 10}, {"r11", 11}, \
+ {"r12", 12}, {"r13", 13}, {"r14", 14}, {"r15", 15}, \
+ {"r16", 16}, {"r17", 17}, {"r18", 18}, {"r19", 19}, \
+ {"r20", 20}, {"r21", 21}, {"r22", 22}, {"r23", 23}, \
+ {"r24", 24}, {"r25", 25}, {"r26", 26}, {"r27", 27}, \
+ {"r28", 28}, {"r29", 29}, {"r30", 30}, {"r31", 31}, \
+ {"fr0", 32}, {"fr1", 33}, {"fr2", 34}, {"fr3", 35}, \
+ {"fr4", 36}, {"fr5", 37}, {"fr6", 38}, {"fr7", 39}, \
+ {"fr8", 40}, {"fr9", 41}, {"fr10", 42}, {"fr11", 43}, \
+ {"fr12", 44}, {"fr13", 45}, {"fr14", 46}, {"fr15", 47}, \
+ {"fr16", 48}, {"fr17", 49}, {"fr18", 50}, {"fr19", 51}, \
+ {"fr20", 52}, {"fr21", 53}, {"fr22", 54}, {"fr23", 55}, \
+ {"fr24", 56}, {"fr25", 57}, {"fr26", 58}, {"fr27", 59}, \
+ {"fr28", 60}, {"fr29", 61}, {"fr30", 62}, {"fr31", 63}, \
+ {"v0", 77}, {"v1", 78}, {"v2", 79}, {"v3", 80}, \
+ {"v4", 81}, {"v5", 82}, {"v6", 83}, {"v7", 84}, \
+ {"v8", 85}, {"v9", 86}, {"v10", 87}, {"v11", 88}, \
+ {"v12", 89}, {"v13", 90}, {"v14", 91}, {"v15", 92}, \
+ {"v16", 93}, {"v17", 94}, {"v18", 95}, {"v19", 96}, \
+ {"v20", 97}, {"v21", 98}, {"v22", 99}, {"v23", 100}, \
+ {"v24", 101},{"v25", 102},{"v26", 103},{"v27", 104}, \
+ {"v28", 105},{"v29", 106},{"v30", 107},{"v31", 108}, \
+ {"vrsave", 109}, {"vscr", 110}, \
+ {"spe_acc", 111}, {"spefscr", 112}, \
+ /* no additional names for: mq, lr, ctr, ap */ \
+ {"cr0", 68}, {"cr1", 69}, {"cr2", 70}, {"cr3", 71}, \
+ {"cr4", 72}, {"cr5", 73}, {"cr6", 74}, {"cr7", 75}, \
+ {"cc", 68}, {"sp", 1}, {"toc", 2} }
+
+/* Text to write out after a CALL that may be replaced by glue code by
+ the loader. This depends on the AIX version. */
+#define RS6000_CALL_GLUE "cror 31,31,31"
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ do { char buf[100]; \
+ fputs ("\t.long ", FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", VALUE); \
+ assemble_name (FILE, buf); \
+ putc ('-', FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", REL); \
+ assemble_name (FILE, buf); \
+ putc ('\n', FILE); \
+ } while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* APPLE LOCAL begin CW asm blocks */
+#define IASM_REGISTER_NAME(STR, BUF) rs6000_iasm_register_name (STR, BUF)
+/* APPLE LOCAL end CW asm blocks */
+
+/* Pick up the return address upon entry to a procedure. Used for
+ dwarf2 unwind information. This also enables the table driven
+ mechanism. */
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LINK_REGISTER_REGNUM)
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 3 : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 10)
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Define which CODE values are valid. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '.' || (CODE) == '&')
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
+
+/* uncomment for disabling the corresponding default options */
+/* #define MACHINE_no_sched_interblock */
+/* #define MACHINE_no_sched_speculative */
+/* #define MACHINE_no_sched_speculative_load */
+
+/* General flags. */
+extern int flag_pic;
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
+extern int frame_pointer_needed;
+
+enum rs6000_builtins
+{
+ /* AltiVec builtins. */
+ ALTIVEC_BUILTIN_ST_INTERNAL_4si,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4si,
+ ALTIVEC_BUILTIN_ST_INTERNAL_8hi,
+ ALTIVEC_BUILTIN_LD_INTERNAL_8hi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_16qi,
+ ALTIVEC_BUILTIN_LD_INTERNAL_16qi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4sf,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4sf,
+ ALTIVEC_BUILTIN_VADDUBM,
+ ALTIVEC_BUILTIN_VADDUHM,
+ ALTIVEC_BUILTIN_VADDUWM,
+ ALTIVEC_BUILTIN_VADDFP,
+ ALTIVEC_BUILTIN_VADDCUW,
+ ALTIVEC_BUILTIN_VADDUBS,
+ ALTIVEC_BUILTIN_VADDSBS,
+ ALTIVEC_BUILTIN_VADDUHS,
+ ALTIVEC_BUILTIN_VADDSHS,
+ ALTIVEC_BUILTIN_VADDUWS,
+ ALTIVEC_BUILTIN_VADDSWS,
+ ALTIVEC_BUILTIN_VAND,
+ ALTIVEC_BUILTIN_VANDC,
+ ALTIVEC_BUILTIN_VAVGUB,
+ ALTIVEC_BUILTIN_VAVGSB,
+ ALTIVEC_BUILTIN_VAVGUH,
+ ALTIVEC_BUILTIN_VAVGSH,
+ ALTIVEC_BUILTIN_VAVGUW,
+ ALTIVEC_BUILTIN_VAVGSW,
+ ALTIVEC_BUILTIN_VCFUX,
+ ALTIVEC_BUILTIN_VCFSX,
+ ALTIVEC_BUILTIN_VCTSXS,
+ ALTIVEC_BUILTIN_VCTUXS,
+ ALTIVEC_BUILTIN_VCMPBFP,
+ ALTIVEC_BUILTIN_VCMPEQUB,
+ ALTIVEC_BUILTIN_VCMPEQUH,
+ ALTIVEC_BUILTIN_VCMPEQUW,
+ ALTIVEC_BUILTIN_VCMPEQFP,
+ ALTIVEC_BUILTIN_VCMPGEFP,
+ ALTIVEC_BUILTIN_VCMPGTUB,
+ ALTIVEC_BUILTIN_VCMPGTSB,
+ ALTIVEC_BUILTIN_VCMPGTUH,
+ ALTIVEC_BUILTIN_VCMPGTSH,
+ ALTIVEC_BUILTIN_VCMPGTUW,
+ ALTIVEC_BUILTIN_VCMPGTSW,
+ ALTIVEC_BUILTIN_VCMPGTFP,
+ ALTIVEC_BUILTIN_VEXPTEFP,
+ ALTIVEC_BUILTIN_VLOGEFP,
+ ALTIVEC_BUILTIN_VMADDFP,
+ ALTIVEC_BUILTIN_VMAXUB,
+ ALTIVEC_BUILTIN_VMAXSB,
+ ALTIVEC_BUILTIN_VMAXUH,
+ ALTIVEC_BUILTIN_VMAXSH,
+ ALTIVEC_BUILTIN_VMAXUW,
+ ALTIVEC_BUILTIN_VMAXSW,
+ ALTIVEC_BUILTIN_VMAXFP,
+ ALTIVEC_BUILTIN_VMHADDSHS,
+ ALTIVEC_BUILTIN_VMHRADDSHS,
+ ALTIVEC_BUILTIN_VMLADDUHM,
+ ALTIVEC_BUILTIN_VMRGHB,
+ ALTIVEC_BUILTIN_VMRGHH,
+ ALTIVEC_BUILTIN_VMRGHW,
+ ALTIVEC_BUILTIN_VMRGLB,
+ ALTIVEC_BUILTIN_VMRGLH,
+ ALTIVEC_BUILTIN_VMRGLW,
+ ALTIVEC_BUILTIN_VMSUMUBM,
+ ALTIVEC_BUILTIN_VMSUMMBM,
+ ALTIVEC_BUILTIN_VMSUMUHM,
+ ALTIVEC_BUILTIN_VMSUMSHM,
+ ALTIVEC_BUILTIN_VMSUMUHS,
+ ALTIVEC_BUILTIN_VMSUMSHS,
+ ALTIVEC_BUILTIN_VMINUB,
+ ALTIVEC_BUILTIN_VMINSB,
+ ALTIVEC_BUILTIN_VMINUH,
+ ALTIVEC_BUILTIN_VMINSH,
+ ALTIVEC_BUILTIN_VMINUW,
+ ALTIVEC_BUILTIN_VMINSW,
+ ALTIVEC_BUILTIN_VMINFP,
+ ALTIVEC_BUILTIN_VMULEUB,
+ ALTIVEC_BUILTIN_VMULESB,
+ ALTIVEC_BUILTIN_VMULEUH,
+ ALTIVEC_BUILTIN_VMULESH,
+ ALTIVEC_BUILTIN_VMULOUB,
+ ALTIVEC_BUILTIN_VMULOSB,
+ ALTIVEC_BUILTIN_VMULOUH,
+ ALTIVEC_BUILTIN_VMULOSH,
+ ALTIVEC_BUILTIN_VNMSUBFP,
+ ALTIVEC_BUILTIN_VNOR,
+ ALTIVEC_BUILTIN_VOR,
+ ALTIVEC_BUILTIN_VSEL_4SI,
+ ALTIVEC_BUILTIN_VSEL_4SF,
+ ALTIVEC_BUILTIN_VSEL_8HI,
+ ALTIVEC_BUILTIN_VSEL_16QI,
+ ALTIVEC_BUILTIN_VPERM_4SI,
+ ALTIVEC_BUILTIN_VPERM_4SF,
+ ALTIVEC_BUILTIN_VPERM_8HI,
+ ALTIVEC_BUILTIN_VPERM_16QI,
+ ALTIVEC_BUILTIN_VPKUHUM,
+ ALTIVEC_BUILTIN_VPKUWUM,
+ ALTIVEC_BUILTIN_VPKPX,
+ ALTIVEC_BUILTIN_VPKUHSS,
+ ALTIVEC_BUILTIN_VPKSHSS,
+ ALTIVEC_BUILTIN_VPKUWSS,
+ ALTIVEC_BUILTIN_VPKSWSS,
+ ALTIVEC_BUILTIN_VPKUHUS,
+ ALTIVEC_BUILTIN_VPKSHUS,
+ ALTIVEC_BUILTIN_VPKUWUS,
+ ALTIVEC_BUILTIN_VPKSWUS,
+ ALTIVEC_BUILTIN_VREFP,
+ ALTIVEC_BUILTIN_VRFIM,
+ ALTIVEC_BUILTIN_VRFIN,
+ ALTIVEC_BUILTIN_VRFIP,
+ ALTIVEC_BUILTIN_VRFIZ,
+ ALTIVEC_BUILTIN_VRLB,
+ ALTIVEC_BUILTIN_VRLH,
+ ALTIVEC_BUILTIN_VRLW,
+ ALTIVEC_BUILTIN_VRSQRTEFP,
+ ALTIVEC_BUILTIN_VSLB,
+ ALTIVEC_BUILTIN_VSLH,
+ ALTIVEC_BUILTIN_VSLW,
+ ALTIVEC_BUILTIN_VSL,
+ ALTIVEC_BUILTIN_VSLO,
+ ALTIVEC_BUILTIN_VSPLTB,
+ ALTIVEC_BUILTIN_VSPLTH,
+ ALTIVEC_BUILTIN_VSPLTW,
+ ALTIVEC_BUILTIN_VSPLTISB,
+ ALTIVEC_BUILTIN_VSPLTISH,
+ ALTIVEC_BUILTIN_VSPLTISW,
+ ALTIVEC_BUILTIN_VSRB,
+ ALTIVEC_BUILTIN_VSRH,
+ ALTIVEC_BUILTIN_VSRW,
+ ALTIVEC_BUILTIN_VSRAB,
+ ALTIVEC_BUILTIN_VSRAH,
+ ALTIVEC_BUILTIN_VSRAW,
+ ALTIVEC_BUILTIN_VSR,
+ ALTIVEC_BUILTIN_VSRO,
+ ALTIVEC_BUILTIN_VSUBUBM,
+ ALTIVEC_BUILTIN_VSUBUHM,
+ ALTIVEC_BUILTIN_VSUBUWM,
+ ALTIVEC_BUILTIN_VSUBFP,
+ ALTIVEC_BUILTIN_VSUBCUW,
+ ALTIVEC_BUILTIN_VSUBUBS,
+ ALTIVEC_BUILTIN_VSUBSBS,
+ ALTIVEC_BUILTIN_VSUBUHS,
+ ALTIVEC_BUILTIN_VSUBSHS,
+ ALTIVEC_BUILTIN_VSUBUWS,
+ ALTIVEC_BUILTIN_VSUBSWS,
+ ALTIVEC_BUILTIN_VSUM4UBS,
+ ALTIVEC_BUILTIN_VSUM4SBS,
+ ALTIVEC_BUILTIN_VSUM4SHS,
+ ALTIVEC_BUILTIN_VSUM2SWS,
+ ALTIVEC_BUILTIN_VSUMSWS,
+ ALTIVEC_BUILTIN_VXOR,
+ ALTIVEC_BUILTIN_VSLDOI_16QI,
+ ALTIVEC_BUILTIN_VSLDOI_8HI,
+ ALTIVEC_BUILTIN_VSLDOI_4SI,
+ ALTIVEC_BUILTIN_VSLDOI_4SF,
+ ALTIVEC_BUILTIN_VUPKHSB,
+ ALTIVEC_BUILTIN_VUPKHPX,
+ ALTIVEC_BUILTIN_VUPKHSH,
+ ALTIVEC_BUILTIN_VUPKLSB,
+ ALTIVEC_BUILTIN_VUPKLPX,
+ ALTIVEC_BUILTIN_VUPKLSH,
+ ALTIVEC_BUILTIN_MTVSCR,
+ ALTIVEC_BUILTIN_MFVSCR,
+ ALTIVEC_BUILTIN_DSSALL,
+ ALTIVEC_BUILTIN_DSS,
+ ALTIVEC_BUILTIN_LVSL,
+ ALTIVEC_BUILTIN_LVSR,
+ ALTIVEC_BUILTIN_DSTT,
+ ALTIVEC_BUILTIN_DSTST,
+ ALTIVEC_BUILTIN_DSTSTT,
+ ALTIVEC_BUILTIN_DST,
+ ALTIVEC_BUILTIN_LVEBX,
+ ALTIVEC_BUILTIN_LVEHX,
+ ALTIVEC_BUILTIN_LVEWX,
+ ALTIVEC_BUILTIN_LVXL,
+ ALTIVEC_BUILTIN_LVX,
+ ALTIVEC_BUILTIN_STVX,
+ ALTIVEC_BUILTIN_STVEBX,
+ ALTIVEC_BUILTIN_STVEHX,
+ ALTIVEC_BUILTIN_STVEWX,
+ ALTIVEC_BUILTIN_STVXL,
+ ALTIVEC_BUILTIN_VCMPBFP_P,
+ ALTIVEC_BUILTIN_VCMPEQFP_P,
+ ALTIVEC_BUILTIN_VCMPEQUB_P,
+ ALTIVEC_BUILTIN_VCMPEQUH_P,
+ ALTIVEC_BUILTIN_VCMPEQUW_P,
+ ALTIVEC_BUILTIN_VCMPGEFP_P,
+ ALTIVEC_BUILTIN_VCMPGTFP_P,
+ ALTIVEC_BUILTIN_VCMPGTSB_P,
+ ALTIVEC_BUILTIN_VCMPGTSH_P,
+ ALTIVEC_BUILTIN_VCMPGTSW_P,
+ ALTIVEC_BUILTIN_VCMPGTUB_P,
+ ALTIVEC_BUILTIN_VCMPGTUH_P,
+ ALTIVEC_BUILTIN_VCMPGTUW_P,
+ ALTIVEC_BUILTIN_ABSS_V4SI,
+ ALTIVEC_BUILTIN_ABSS_V8HI,
+ ALTIVEC_BUILTIN_ABSS_V16QI,
+ ALTIVEC_BUILTIN_ABS_V4SI,
+ ALTIVEC_BUILTIN_ABS_V4SF,
+ ALTIVEC_BUILTIN_ABS_V8HI,
+ ALTIVEC_BUILTIN_ABS_V16QI,
+ ALTIVEC_BUILTIN_MASK_FOR_LOAD,
+ ALTIVEC_BUILTIN_MASK_FOR_STORE,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF,
+
+ /* Altivec overloaded builtins. */
+ ALTIVEC_BUILTIN_VCMPEQ_P,
+ ALTIVEC_BUILTIN_OVERLOADED_FIRST = ALTIVEC_BUILTIN_VCMPEQ_P,
+ ALTIVEC_BUILTIN_VCMPGT_P,
+ ALTIVEC_BUILTIN_VCMPGE_P,
+ ALTIVEC_BUILTIN_VEC_ABS,
+ ALTIVEC_BUILTIN_VEC_ABSS,
+ ALTIVEC_BUILTIN_VEC_ADD,
+ ALTIVEC_BUILTIN_VEC_ADDC,
+ ALTIVEC_BUILTIN_VEC_ADDS,
+ ALTIVEC_BUILTIN_VEC_AND,
+ ALTIVEC_BUILTIN_VEC_ANDC,
+ ALTIVEC_BUILTIN_VEC_AVG,
+ ALTIVEC_BUILTIN_VEC_CEIL,
+ ALTIVEC_BUILTIN_VEC_CMPB,
+ ALTIVEC_BUILTIN_VEC_CMPEQ,
+ ALTIVEC_BUILTIN_VEC_CMPEQUB,
+ ALTIVEC_BUILTIN_VEC_CMPEQUH,
+ ALTIVEC_BUILTIN_VEC_CMPEQUW,
+ ALTIVEC_BUILTIN_VEC_CMPGE,
+ ALTIVEC_BUILTIN_VEC_CMPGT,
+ ALTIVEC_BUILTIN_VEC_CMPLE,
+ ALTIVEC_BUILTIN_VEC_CMPLT,
+ ALTIVEC_BUILTIN_VEC_CTF,
+ ALTIVEC_BUILTIN_VEC_CTS,
+ ALTIVEC_BUILTIN_VEC_CTU,
+ ALTIVEC_BUILTIN_VEC_DST,
+ ALTIVEC_BUILTIN_VEC_DSTST,
+ ALTIVEC_BUILTIN_VEC_DSTSTT,
+ ALTIVEC_BUILTIN_VEC_DSTT,
+ ALTIVEC_BUILTIN_VEC_EXPTE,
+ ALTIVEC_BUILTIN_VEC_FLOOR,
+ ALTIVEC_BUILTIN_VEC_LD,
+ ALTIVEC_BUILTIN_VEC_LDE,
+ ALTIVEC_BUILTIN_VEC_LDL,
+ ALTIVEC_BUILTIN_VEC_LOGE,
+ ALTIVEC_BUILTIN_VEC_LVEBX,
+ ALTIVEC_BUILTIN_VEC_LVEHX,
+ ALTIVEC_BUILTIN_VEC_LVEWX,
+ ALTIVEC_BUILTIN_VEC_LVSL,
+ ALTIVEC_BUILTIN_VEC_LVSR,
+ ALTIVEC_BUILTIN_VEC_MADD,
+ ALTIVEC_BUILTIN_VEC_MADDS,
+ ALTIVEC_BUILTIN_VEC_MAX,
+ ALTIVEC_BUILTIN_VEC_MERGEH,
+ ALTIVEC_BUILTIN_VEC_MERGEL,
+ ALTIVEC_BUILTIN_VEC_MIN,
+ ALTIVEC_BUILTIN_VEC_MLADD,
+ ALTIVEC_BUILTIN_VEC_MPERM,
+ ALTIVEC_BUILTIN_VEC_MRADDS,
+ ALTIVEC_BUILTIN_VEC_MRGHB,
+ ALTIVEC_BUILTIN_VEC_MRGHH,
+ ALTIVEC_BUILTIN_VEC_MRGHW,
+ ALTIVEC_BUILTIN_VEC_MRGLB,
+ ALTIVEC_BUILTIN_VEC_MRGLH,
+ ALTIVEC_BUILTIN_VEC_MRGLW,
+ ALTIVEC_BUILTIN_VEC_MSUM,
+ ALTIVEC_BUILTIN_VEC_MSUMS,
+ ALTIVEC_BUILTIN_VEC_MTVSCR,
+ ALTIVEC_BUILTIN_VEC_MULE,
+ ALTIVEC_BUILTIN_VEC_MULO,
+ ALTIVEC_BUILTIN_VEC_NMSUB,
+ ALTIVEC_BUILTIN_VEC_NOR,
+ ALTIVEC_BUILTIN_VEC_OR,
+ ALTIVEC_BUILTIN_VEC_PACK,
+ ALTIVEC_BUILTIN_VEC_PACKPX,
+ ALTIVEC_BUILTIN_VEC_PACKS,
+ ALTIVEC_BUILTIN_VEC_PACKSU,
+ ALTIVEC_BUILTIN_VEC_PERM,
+ ALTIVEC_BUILTIN_VEC_RE,
+ ALTIVEC_BUILTIN_VEC_RL,
+ ALTIVEC_BUILTIN_VEC_ROUND,
+ ALTIVEC_BUILTIN_VEC_RSQRTE,
+ ALTIVEC_BUILTIN_VEC_SEL,
+ ALTIVEC_BUILTIN_VEC_SL,
+ ALTIVEC_BUILTIN_VEC_SLD,
+ ALTIVEC_BUILTIN_VEC_SLL,
+ ALTIVEC_BUILTIN_VEC_SLO,
+ ALTIVEC_BUILTIN_VEC_SPLAT,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S16,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S32,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S8,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U16,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U32,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U8,
+ ALTIVEC_BUILTIN_VEC_SPLTB,
+ ALTIVEC_BUILTIN_VEC_SPLTH,
+ ALTIVEC_BUILTIN_VEC_SPLTW,
+ ALTIVEC_BUILTIN_VEC_SR,
+ ALTIVEC_BUILTIN_VEC_SRA,
+ ALTIVEC_BUILTIN_VEC_SRL,
+ ALTIVEC_BUILTIN_VEC_SRO,
+ ALTIVEC_BUILTIN_VEC_ST,
+ ALTIVEC_BUILTIN_VEC_STE,
+ ALTIVEC_BUILTIN_VEC_STL,
+ ALTIVEC_BUILTIN_VEC_STVEBX,
+ ALTIVEC_BUILTIN_VEC_STVEHX,
+ ALTIVEC_BUILTIN_VEC_STVEWX,
+ ALTIVEC_BUILTIN_VEC_SUB,
+ ALTIVEC_BUILTIN_VEC_SUBC,
+ ALTIVEC_BUILTIN_VEC_SUBS,
+ ALTIVEC_BUILTIN_VEC_SUM2S,
+ ALTIVEC_BUILTIN_VEC_SUM4S,
+ ALTIVEC_BUILTIN_VEC_SUMS,
+ ALTIVEC_BUILTIN_VEC_TRUNC,
+ ALTIVEC_BUILTIN_VEC_UNPACKH,
+ ALTIVEC_BUILTIN_VEC_UNPACKL,
+ ALTIVEC_BUILTIN_VEC_VADDFP,
+ ALTIVEC_BUILTIN_VEC_VADDSBS,
+ ALTIVEC_BUILTIN_VEC_VADDSHS,
+ ALTIVEC_BUILTIN_VEC_VADDSWS,
+ ALTIVEC_BUILTIN_VEC_VADDUBM,
+ ALTIVEC_BUILTIN_VEC_VADDUBS,
+ ALTIVEC_BUILTIN_VEC_VADDUHM,
+ ALTIVEC_BUILTIN_VEC_VADDUHS,
+ ALTIVEC_BUILTIN_VEC_VADDUWM,
+ ALTIVEC_BUILTIN_VEC_VADDUWS,
+ ALTIVEC_BUILTIN_VEC_VAVGSB,
+ ALTIVEC_BUILTIN_VEC_VAVGSH,
+ ALTIVEC_BUILTIN_VEC_VAVGSW,
+ ALTIVEC_BUILTIN_VEC_VAVGUB,
+ ALTIVEC_BUILTIN_VEC_VAVGUH,
+ ALTIVEC_BUILTIN_VEC_VAVGUW,
+ ALTIVEC_BUILTIN_VEC_VCFSX,
+ ALTIVEC_BUILTIN_VEC_VCFUX,
+ ALTIVEC_BUILTIN_VEC_VCMPEQFP,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUB,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUH,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUW,
+ ALTIVEC_BUILTIN_VEC_VCMPGTFP,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSB,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSH,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSW,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUB,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUH,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUW,
+ ALTIVEC_BUILTIN_VEC_VMAXFP,
+ ALTIVEC_BUILTIN_VEC_VMAXSB,
+ ALTIVEC_BUILTIN_VEC_VMAXSH,
+ ALTIVEC_BUILTIN_VEC_VMAXSW,
+ ALTIVEC_BUILTIN_VEC_VMAXUB,
+ ALTIVEC_BUILTIN_VEC_VMAXUH,
+ ALTIVEC_BUILTIN_VEC_VMAXUW,
+ ALTIVEC_BUILTIN_VEC_VMINFP,
+ ALTIVEC_BUILTIN_VEC_VMINSB,
+ ALTIVEC_BUILTIN_VEC_VMINSH,
+ ALTIVEC_BUILTIN_VEC_VMINSW,
+ ALTIVEC_BUILTIN_VEC_VMINUB,
+ ALTIVEC_BUILTIN_VEC_VMINUH,
+ ALTIVEC_BUILTIN_VEC_VMINUW,
+ ALTIVEC_BUILTIN_VEC_VMRGHB,
+ ALTIVEC_BUILTIN_VEC_VMRGHH,
+ ALTIVEC_BUILTIN_VEC_VMRGHW,
+ ALTIVEC_BUILTIN_VEC_VMRGLB,
+ ALTIVEC_BUILTIN_VEC_VMRGLH,
+ ALTIVEC_BUILTIN_VEC_VMRGLW,
+ ALTIVEC_BUILTIN_VEC_VMSUMMBM,
+ ALTIVEC_BUILTIN_VEC_VMSUMSHM,
+ ALTIVEC_BUILTIN_VEC_VMSUMSHS,
+ ALTIVEC_BUILTIN_VEC_VMSUMUBM,
+ ALTIVEC_BUILTIN_VEC_VMSUMUHM,
+ ALTIVEC_BUILTIN_VEC_VMSUMUHS,
+ ALTIVEC_BUILTIN_VEC_VMULESB,
+ ALTIVEC_BUILTIN_VEC_VMULESH,
+ ALTIVEC_BUILTIN_VEC_VMULEUB,
+ ALTIVEC_BUILTIN_VEC_VMULEUH,
+ ALTIVEC_BUILTIN_VEC_VMULOSB,
+ ALTIVEC_BUILTIN_VEC_VMULOSH,
+ ALTIVEC_BUILTIN_VEC_VMULOUB,
+ ALTIVEC_BUILTIN_VEC_VMULOUH,
+ ALTIVEC_BUILTIN_VEC_VPKSHSS,
+ ALTIVEC_BUILTIN_VEC_VPKSHUS,
+ ALTIVEC_BUILTIN_VEC_VPKSWSS,
+ ALTIVEC_BUILTIN_VEC_VPKSWUS,
+ ALTIVEC_BUILTIN_VEC_VPKUHUM,
+ ALTIVEC_BUILTIN_VEC_VPKUHUS,
+ ALTIVEC_BUILTIN_VEC_VPKUWUM,
+ ALTIVEC_BUILTIN_VEC_VPKUWUS,
+ ALTIVEC_BUILTIN_VEC_VRLB,
+ ALTIVEC_BUILTIN_VEC_VRLH,
+ ALTIVEC_BUILTIN_VEC_VRLW,
+ ALTIVEC_BUILTIN_VEC_VSLB,
+ ALTIVEC_BUILTIN_VEC_VSLH,
+ ALTIVEC_BUILTIN_VEC_VSLW,
+ ALTIVEC_BUILTIN_VEC_VSPLTB,
+ ALTIVEC_BUILTIN_VEC_VSPLTH,
+ ALTIVEC_BUILTIN_VEC_VSPLTW,
+ ALTIVEC_BUILTIN_VEC_VSRAB,
+ ALTIVEC_BUILTIN_VEC_VSRAH,
+ ALTIVEC_BUILTIN_VEC_VSRAW,
+ ALTIVEC_BUILTIN_VEC_VSRB,
+ ALTIVEC_BUILTIN_VEC_VSRH,
+ ALTIVEC_BUILTIN_VEC_VSRW,
+ ALTIVEC_BUILTIN_VEC_VSUBFP,
+ ALTIVEC_BUILTIN_VEC_VSUBSBS,
+ ALTIVEC_BUILTIN_VEC_VSUBSHS,
+ ALTIVEC_BUILTIN_VEC_VSUBSWS,
+ ALTIVEC_BUILTIN_VEC_VSUBUBM,
+ ALTIVEC_BUILTIN_VEC_VSUBUBS,
+ ALTIVEC_BUILTIN_VEC_VSUBUHM,
+ ALTIVEC_BUILTIN_VEC_VSUBUHS,
+ ALTIVEC_BUILTIN_VEC_VSUBUWM,
+ ALTIVEC_BUILTIN_VEC_VSUBUWS,
+ ALTIVEC_BUILTIN_VEC_VSUM4SBS,
+ ALTIVEC_BUILTIN_VEC_VSUM4SHS,
+ ALTIVEC_BUILTIN_VEC_VSUM4UBS,
+ ALTIVEC_BUILTIN_VEC_VUPKHPX,
+ ALTIVEC_BUILTIN_VEC_VUPKHSB,
+ ALTIVEC_BUILTIN_VEC_VUPKHSH,
+ ALTIVEC_BUILTIN_VEC_VUPKLPX,
+ ALTIVEC_BUILTIN_VEC_VUPKLSB,
+ ALTIVEC_BUILTIN_VEC_VUPKLSH,
+ ALTIVEC_BUILTIN_VEC_XOR,
+ ALTIVEC_BUILTIN_VEC_STEP,
+ ALTIVEC_BUILTIN_OVERLOADED_LAST = ALTIVEC_BUILTIN_VEC_STEP,
+
+ /* SPE builtins. */
+ SPE_BUILTIN_EVADDW,
+ SPE_BUILTIN_EVAND,
+ SPE_BUILTIN_EVANDC,
+ SPE_BUILTIN_EVDIVWS,
+ SPE_BUILTIN_EVDIVWU,
+ SPE_BUILTIN_EVEQV,
+ SPE_BUILTIN_EVFSADD,
+ SPE_BUILTIN_EVFSDIV,
+ SPE_BUILTIN_EVFSMUL,
+ SPE_BUILTIN_EVFSSUB,
+ SPE_BUILTIN_EVLDDX,
+ SPE_BUILTIN_EVLDHX,
+ SPE_BUILTIN_EVLDWX,
+ SPE_BUILTIN_EVLHHESPLATX,
+ SPE_BUILTIN_EVLHHOSSPLATX,
+ SPE_BUILTIN_EVLHHOUSPLATX,
+ SPE_BUILTIN_EVLWHEX,
+ SPE_BUILTIN_EVLWHOSX,
+ SPE_BUILTIN_EVLWHOUX,
+ SPE_BUILTIN_EVLWHSPLATX,
+ SPE_BUILTIN_EVLWWSPLATX,
+ SPE_BUILTIN_EVMERGEHI,
+ SPE_BUILTIN_EVMERGEHILO,
+ SPE_BUILTIN_EVMERGELO,
+ SPE_BUILTIN_EVMERGELOHI,
+ SPE_BUILTIN_EVMHEGSMFAA,
+ SPE_BUILTIN_EVMHEGSMFAN,
+ SPE_BUILTIN_EVMHEGSMIAA,
+ SPE_BUILTIN_EVMHEGSMIAN,
+ SPE_BUILTIN_EVMHEGUMIAA,
+ SPE_BUILTIN_EVMHEGUMIAN,
+ SPE_BUILTIN_EVMHESMF,
+ SPE_BUILTIN_EVMHESMFA,
+ SPE_BUILTIN_EVMHESMFAAW,
+ SPE_BUILTIN_EVMHESMFANW,
+ SPE_BUILTIN_EVMHESMI,
+ SPE_BUILTIN_EVMHESMIA,
+ SPE_BUILTIN_EVMHESMIAAW,
+ SPE_BUILTIN_EVMHESMIANW,
+ SPE_BUILTIN_EVMHESSF,
+ SPE_BUILTIN_EVMHESSFA,
+ SPE_BUILTIN_EVMHESSFAAW,
+ SPE_BUILTIN_EVMHESSFANW,
+ SPE_BUILTIN_EVMHESSIAAW,
+ SPE_BUILTIN_EVMHESSIANW,
+ SPE_BUILTIN_EVMHEUMI,
+ SPE_BUILTIN_EVMHEUMIA,
+ SPE_BUILTIN_EVMHEUMIAAW,
+ SPE_BUILTIN_EVMHEUMIANW,
+ SPE_BUILTIN_EVMHEUSIAAW,
+ SPE_BUILTIN_EVMHEUSIANW,
+ SPE_BUILTIN_EVMHOGSMFAA,
+ SPE_BUILTIN_EVMHOGSMFAN,
+ SPE_BUILTIN_EVMHOGSMIAA,
+ SPE_BUILTIN_EVMHOGSMIAN,
+ SPE_BUILTIN_EVMHOGUMIAA,
+ SPE_BUILTIN_EVMHOGUMIAN,
+ SPE_BUILTIN_EVMHOSMF,
+ SPE_BUILTIN_EVMHOSMFA,
+ SPE_BUILTIN_EVMHOSMFAAW,
+ SPE_BUILTIN_EVMHOSMFANW,
+ SPE_BUILTIN_EVMHOSMI,
+ SPE_BUILTIN_EVMHOSMIA,
+ SPE_BUILTIN_EVMHOSMIAAW,
+ SPE_BUILTIN_EVMHOSMIANW,
+ SPE_BUILTIN_EVMHOSSF,
+ SPE_BUILTIN_EVMHOSSFA,
+ SPE_BUILTIN_EVMHOSSFAAW,
+ SPE_BUILTIN_EVMHOSSFANW,
+ SPE_BUILTIN_EVMHOSSIAAW,
+ SPE_BUILTIN_EVMHOSSIANW,
+ SPE_BUILTIN_EVMHOUMI,
+ SPE_BUILTIN_EVMHOUMIA,
+ SPE_BUILTIN_EVMHOUMIAAW,
+ SPE_BUILTIN_EVMHOUMIANW,
+ SPE_BUILTIN_EVMHOUSIAAW,
+ SPE_BUILTIN_EVMHOUSIANW,
+ SPE_BUILTIN_EVMWHSMF,
+ SPE_BUILTIN_EVMWHSMFA,
+ SPE_BUILTIN_EVMWHSMI,
+ SPE_BUILTIN_EVMWHSMIA,
+ SPE_BUILTIN_EVMWHSSF,
+ SPE_BUILTIN_EVMWHSSFA,
+ SPE_BUILTIN_EVMWHUMI,
+ SPE_BUILTIN_EVMWHUMIA,
+ SPE_BUILTIN_EVMWLSMIAAW,
+ SPE_BUILTIN_EVMWLSMIANW,
+ SPE_BUILTIN_EVMWLSSIAAW,
+ SPE_BUILTIN_EVMWLSSIANW,
+ SPE_BUILTIN_EVMWLUMI,
+ SPE_BUILTIN_EVMWLUMIA,
+ SPE_BUILTIN_EVMWLUMIAAW,
+ SPE_BUILTIN_EVMWLUMIANW,
+ SPE_BUILTIN_EVMWLUSIAAW,
+ SPE_BUILTIN_EVMWLUSIANW,
+ SPE_BUILTIN_EVMWSMF,
+ SPE_BUILTIN_EVMWSMFA,
+ SPE_BUILTIN_EVMWSMFAA,
+ SPE_BUILTIN_EVMWSMFAN,
+ SPE_BUILTIN_EVMWSMI,
+ SPE_BUILTIN_EVMWSMIA,
+ SPE_BUILTIN_EVMWSMIAA,
+ SPE_BUILTIN_EVMWSMIAN,
+ SPE_BUILTIN_EVMWHSSFAA,
+ SPE_BUILTIN_EVMWSSF,
+ SPE_BUILTIN_EVMWSSFA,
+ SPE_BUILTIN_EVMWSSFAA,
+ SPE_BUILTIN_EVMWSSFAN,
+ SPE_BUILTIN_EVMWUMI,
+ SPE_BUILTIN_EVMWUMIA,
+ SPE_BUILTIN_EVMWUMIAA,
+ SPE_BUILTIN_EVMWUMIAN,
+ SPE_BUILTIN_EVNAND,
+ SPE_BUILTIN_EVNOR,
+ SPE_BUILTIN_EVOR,
+ SPE_BUILTIN_EVORC,
+ SPE_BUILTIN_EVRLW,
+ SPE_BUILTIN_EVSLW,
+ SPE_BUILTIN_EVSRWS,
+ SPE_BUILTIN_EVSRWU,
+ SPE_BUILTIN_EVSTDDX,
+ SPE_BUILTIN_EVSTDHX,
+ SPE_BUILTIN_EVSTDWX,
+ SPE_BUILTIN_EVSTWHEX,
+ SPE_BUILTIN_EVSTWHOX,
+ SPE_BUILTIN_EVSTWWEX,
+ SPE_BUILTIN_EVSTWWOX,
+ SPE_BUILTIN_EVSUBFW,
+ SPE_BUILTIN_EVXOR,
+ SPE_BUILTIN_EVABS,
+ SPE_BUILTIN_EVADDSMIAAW,
+ SPE_BUILTIN_EVADDSSIAAW,
+ SPE_BUILTIN_EVADDUMIAAW,
+ SPE_BUILTIN_EVADDUSIAAW,
+ SPE_BUILTIN_EVCNTLSW,
+ SPE_BUILTIN_EVCNTLZW,
+ SPE_BUILTIN_EVEXTSB,
+ SPE_BUILTIN_EVEXTSH,
+ SPE_BUILTIN_EVFSABS,
+ SPE_BUILTIN_EVFSCFSF,
+ SPE_BUILTIN_EVFSCFSI,
+ SPE_BUILTIN_EVFSCFUF,
+ SPE_BUILTIN_EVFSCFUI,
+ SPE_BUILTIN_EVFSCTSF,
+ SPE_BUILTIN_EVFSCTSI,
+ SPE_BUILTIN_EVFSCTSIZ,
+ SPE_BUILTIN_EVFSCTUF,
+ SPE_BUILTIN_EVFSCTUI,
+ SPE_BUILTIN_EVFSCTUIZ,
+ SPE_BUILTIN_EVFSNABS,
+ SPE_BUILTIN_EVFSNEG,
+ SPE_BUILTIN_EVMRA,
+ SPE_BUILTIN_EVNEG,
+ SPE_BUILTIN_EVRNDW,
+ SPE_BUILTIN_EVSUBFSMIAAW,
+ SPE_BUILTIN_EVSUBFSSIAAW,
+ SPE_BUILTIN_EVSUBFUMIAAW,
+ SPE_BUILTIN_EVSUBFUSIAAW,
+ SPE_BUILTIN_EVADDIW,
+ SPE_BUILTIN_EVLDD,
+ SPE_BUILTIN_EVLDH,
+ SPE_BUILTIN_EVLDW,
+ SPE_BUILTIN_EVLHHESPLAT,
+ SPE_BUILTIN_EVLHHOSSPLAT,
+ SPE_BUILTIN_EVLHHOUSPLAT,
+ SPE_BUILTIN_EVLWHE,
+ SPE_BUILTIN_EVLWHOS,
+ SPE_BUILTIN_EVLWHOU,
+ SPE_BUILTIN_EVLWHSPLAT,
+ SPE_BUILTIN_EVLWWSPLAT,
+ SPE_BUILTIN_EVRLWI,
+ SPE_BUILTIN_EVSLWI,
+ SPE_BUILTIN_EVSRWIS,
+ SPE_BUILTIN_EVSRWIU,
+ SPE_BUILTIN_EVSTDD,
+ SPE_BUILTIN_EVSTDH,
+ SPE_BUILTIN_EVSTDW,
+ SPE_BUILTIN_EVSTWHE,
+ SPE_BUILTIN_EVSTWHO,
+ SPE_BUILTIN_EVSTWWE,
+ SPE_BUILTIN_EVSTWWO,
+ SPE_BUILTIN_EVSUBIFW,
+
+ /* Compares. */
+ SPE_BUILTIN_EVCMPEQ,
+ SPE_BUILTIN_EVCMPGTS,
+ SPE_BUILTIN_EVCMPGTU,
+ SPE_BUILTIN_EVCMPLTS,
+ SPE_BUILTIN_EVCMPLTU,
+ SPE_BUILTIN_EVFSCMPEQ,
+ SPE_BUILTIN_EVFSCMPGT,
+ SPE_BUILTIN_EVFSCMPLT,
+ SPE_BUILTIN_EVFSTSTEQ,
+ SPE_BUILTIN_EVFSTSTGT,
+ SPE_BUILTIN_EVFSTSTLT,
+
+ /* EVSEL compares. */
+ SPE_BUILTIN_EVSEL_CMPEQ,
+ SPE_BUILTIN_EVSEL_CMPGTS,
+ SPE_BUILTIN_EVSEL_CMPGTU,
+ SPE_BUILTIN_EVSEL_CMPLTS,
+ SPE_BUILTIN_EVSEL_CMPLTU,
+ SPE_BUILTIN_EVSEL_FSCMPEQ,
+ SPE_BUILTIN_EVSEL_FSCMPGT,
+ SPE_BUILTIN_EVSEL_FSCMPLT,
+ SPE_BUILTIN_EVSEL_FSTSTEQ,
+ SPE_BUILTIN_EVSEL_FSTSTGT,
+ SPE_BUILTIN_EVSEL_FSTSTLT,
+
+ SPE_BUILTIN_EVSPLATFI,
+ SPE_BUILTIN_EVSPLATI,
+ SPE_BUILTIN_EVMWHSSMAA,
+ SPE_BUILTIN_EVMWHSMFAA,
+ SPE_BUILTIN_EVMWHSMIAA,
+ SPE_BUILTIN_EVMWHUSIAA,
+ SPE_BUILTIN_EVMWHUMIAA,
+ SPE_BUILTIN_EVMWHSSFAN,
+ SPE_BUILTIN_EVMWHSSIAN,
+ SPE_BUILTIN_EVMWHSMFAN,
+ SPE_BUILTIN_EVMWHSMIAN,
+ SPE_BUILTIN_EVMWHUSIAN,
+ SPE_BUILTIN_EVMWHUMIAN,
+ SPE_BUILTIN_EVMWHGSSFAA,
+ SPE_BUILTIN_EVMWHGSMFAA,
+ SPE_BUILTIN_EVMWHGSMIAA,
+ SPE_BUILTIN_EVMWHGUMIAA,
+ SPE_BUILTIN_EVMWHGSSFAN,
+ SPE_BUILTIN_EVMWHGSMFAN,
+ SPE_BUILTIN_EVMWHGSMIAN,
+ SPE_BUILTIN_EVMWHGUMIAN,
+ SPE_BUILTIN_MTSPEFSCR,
+ SPE_BUILTIN_MFSPEFSCR,
+ SPE_BUILTIN_BRINC,
+
+ /* APPLE LOCAL begin AltiVec */
+ /* AltiVec PIM functions, used in Apple AltiVec mode. */
+ ALTIVEC_PIM__FIRST,
+
+ /* PIM Operations. */
+ ALTIVEC_PIM_VEC_ABS = ALTIVEC_PIM__FIRST,
+ ALTIVEC_PIM_VEC_ABS_2,
+ ALTIVEC_PIM_VEC_ABS_3,
+ ALTIVEC_PIM_VEC_ABS_4,
+ ALTIVEC_PIM_VEC_ABSS,
+ ALTIVEC_PIM_VEC_ABSS_2,
+ ALTIVEC_PIM_VEC_ABSS_3,
+ ALTIVEC_PIM_VEC_ADD,
+ ALTIVEC_PIM_VEC_ADD_2,
+ ALTIVEC_PIM_VEC_ADD_3,
+ ALTIVEC_PIM_VEC_ADD_4,
+ ALTIVEC_PIM_VEC_ADDC,
+ ALTIVEC_PIM_VEC_ADDS,
+ ALTIVEC_PIM_VEC_ADDS_2,
+ ALTIVEC_PIM_VEC_ADDS_3,
+ ALTIVEC_PIM_VEC_ADDS_4,
+ ALTIVEC_PIM_VEC_ADDS_5,
+ ALTIVEC_PIM_VEC_ADDS_6,
+ ALTIVEC_PIM_VEC_AND,
+ ALTIVEC_PIM_VEC_ANDC,
+ ALTIVEC_PIM_VEC_AVG,
+ ALTIVEC_PIM_VEC_AVG_2,
+ ALTIVEC_PIM_VEC_AVG_3,
+ ALTIVEC_PIM_VEC_AVG_4,
+ ALTIVEC_PIM_VEC_AVG_5,
+ ALTIVEC_PIM_VEC_AVG_6,
+ ALTIVEC_PIM_VEC_CEIL,
+ ALTIVEC_PIM_VEC_CMPB,
+ ALTIVEC_PIM_VEC_CMPEQ,
+ ALTIVEC_PIM_VEC_CMPEQ_2,
+ ALTIVEC_PIM_VEC_CMPEQ_3,
+ ALTIVEC_PIM_VEC_CMPEQ_4,
+ ALTIVEC_PIM_VEC_CMPGE,
+ ALTIVEC_PIM_VEC_CMPGT,
+ ALTIVEC_PIM_VEC_CMPGT_2,
+ ALTIVEC_PIM_VEC_CMPGT_3,
+ ALTIVEC_PIM_VEC_CMPGT_4,
+ ALTIVEC_PIM_VEC_CMPGT_5,
+ ALTIVEC_PIM_VEC_CMPGT_6,
+ ALTIVEC_PIM_VEC_CMPGT_7,
+ ALTIVEC_PIM_VEC_CMPLE,
+ ALTIVEC_PIM_VEC_CMPLT,
+ ALTIVEC_PIM_VEC_CMPLT_2,
+ ALTIVEC_PIM_VEC_CMPLT_3,
+ ALTIVEC_PIM_VEC_CMPLT_4,
+ ALTIVEC_PIM_VEC_CMPLT_5,
+ ALTIVEC_PIM_VEC_CMPLT_6,
+ ALTIVEC_PIM_VEC_CMPLT_7,
+ ALTIVEC_PIM_VEC_CTF,
+ ALTIVEC_PIM_VEC_CTF_2,
+ ALTIVEC_PIM_VEC_CTS,
+ ALTIVEC_PIM_VEC_CTU,
+ ALTIVEC_PIM_VEC_DSS,
+ ALTIVEC_PIM_VEC_DSSALL,
+ ALTIVEC_PIM_VEC_DST,
+ ALTIVEC_PIM_VEC_DSTST,
+ ALTIVEC_PIM_VEC_DSTSTT,
+ ALTIVEC_PIM_VEC_DSTT,
+ ALTIVEC_PIM_VEC_EXPTE,
+ ALTIVEC_PIM_VEC_FLOOR,
+ ALTIVEC_PIM_VEC_LD,
+ ALTIVEC_PIM_VEC_LDE,
+ ALTIVEC_PIM_VEC_LDE_2,
+ ALTIVEC_PIM_VEC_LDE_3,
+ ALTIVEC_PIM_VEC_LDL,
+ ALTIVEC_PIM_VEC_LOGE,
+ ALTIVEC_PIM_VEC_LVEBX,
+ ALTIVEC_PIM_VEC_LVEHX,
+ ALTIVEC_PIM_VEC_LVEWX,
+ ALTIVEC_PIM_VEC_LVSL,
+ ALTIVEC_PIM_VEC_LVSR,
+ ALTIVEC_PIM_VEC_LVX,
+ ALTIVEC_PIM_VEC_LVXL,
+ ALTIVEC_PIM_VEC_MADD,
+ ALTIVEC_PIM_VEC_MADDS,
+ ALTIVEC_PIM_VEC_MAX,
+ ALTIVEC_PIM_VEC_MAX_2,
+ ALTIVEC_PIM_VEC_MAX_3,
+ ALTIVEC_PIM_VEC_MAX_4,
+ ALTIVEC_PIM_VEC_MAX_5,
+ ALTIVEC_PIM_VEC_MAX_6,
+ ALTIVEC_PIM_VEC_MAX_7,
+ ALTIVEC_PIM_VEC_MERGEH,
+ ALTIVEC_PIM_VEC_MERGEH_2,
+ ALTIVEC_PIM_VEC_MERGEH_3,
+ ALTIVEC_PIM_VEC_MERGEL,
+ ALTIVEC_PIM_VEC_MERGEL_2,
+ ALTIVEC_PIM_VEC_MERGEL_3,
+ ALTIVEC_PIM_VEC_MFVSCR,
+ ALTIVEC_PIM_VEC_MIN,
+ ALTIVEC_PIM_VEC_MIN_2,
+ ALTIVEC_PIM_VEC_MIN_3,
+ ALTIVEC_PIM_VEC_MIN_4,
+ ALTIVEC_PIM_VEC_MIN_5,
+ ALTIVEC_PIM_VEC_MIN_6,
+ ALTIVEC_PIM_VEC_MIN_7,
+ ALTIVEC_PIM_VEC_MLADD,
+ ALTIVEC_PIM_VEC_MLADD_2,
+ ALTIVEC_PIM_VEC_MRADDS,
+ ALTIVEC_PIM_VEC_MSUM,
+ ALTIVEC_PIM_VEC_MSUM_2,
+ ALTIVEC_PIM_VEC_MSUM_3,
+ ALTIVEC_PIM_VEC_MSUM_4,
+ ALTIVEC_PIM_VEC_MSUMS,
+ ALTIVEC_PIM_VEC_MSUMS_2,
+ ALTIVEC_PIM_VEC_MTVSCR,
+ ALTIVEC_PIM_VEC_MULE,
+ ALTIVEC_PIM_VEC_MULE_2,
+ ALTIVEC_PIM_VEC_MULE_3,
+ ALTIVEC_PIM_VEC_MULE_4,
+ ALTIVEC_PIM_VEC_MULO,
+ ALTIVEC_PIM_VEC_MULO_2,
+ ALTIVEC_PIM_VEC_MULO_3,
+ ALTIVEC_PIM_VEC_MULO_4,
+ ALTIVEC_PIM_VEC_NMSUB,
+ ALTIVEC_PIM_VEC_NOR,
+ ALTIVEC_PIM_VEC_OR,
+ ALTIVEC_PIM_VEC_PACK,
+ ALTIVEC_PIM_VEC_PACK_2,
+ ALTIVEC_PIM_VEC_PACKPX,
+ ALTIVEC_PIM_VEC_PACKS,
+ ALTIVEC_PIM_VEC_PACKS_2,
+ ALTIVEC_PIM_VEC_PACKS_3,
+ ALTIVEC_PIM_VEC_PACKS_4,
+ ALTIVEC_PIM_VEC_PACKSU,
+ ALTIVEC_PIM_VEC_PACKSU_2,
+ ALTIVEC_PIM_VEC_PACKSU_3,
+ ALTIVEC_PIM_VEC_PACKSU_4,
+ ALTIVEC_PIM_VEC_PERM,
+ ALTIVEC_PIM_VEC_RE,
+ ALTIVEC_PIM_VEC_RL,
+ ALTIVEC_PIM_VEC_RL_2,
+ ALTIVEC_PIM_VEC_RL_3,
+ ALTIVEC_PIM_VEC_ROUND,
+ ALTIVEC_PIM_VEC_RSQRTE,
+ ALTIVEC_PIM_VEC_SEL,
+ ALTIVEC_PIM_VEC_SL,
+ ALTIVEC_PIM_VEC_SL_2,
+ ALTIVEC_PIM_VEC_SL_3,
+ ALTIVEC_PIM_VEC_SLD,
+ ALTIVEC_PIM_VEC_SLL,
+ ALTIVEC_PIM_VEC_SLO,
+ ALTIVEC_PIM_VEC_SPLAT,
+ ALTIVEC_PIM_VEC_SPLAT_2,
+ ALTIVEC_PIM_VEC_SPLAT_3,
+ ALTIVEC_PIM_VEC_SPLAT_S8,
+ ALTIVEC_PIM_VEC_SPLAT_S16,
+ ALTIVEC_PIM_VEC_SPLAT_S32,
+ ALTIVEC_PIM_VEC_SPLAT_U8,
+ ALTIVEC_PIM_VEC_SPLAT_U16,
+ ALTIVEC_PIM_VEC_SPLAT_U32,
+ ALTIVEC_PIM_VEC_SR,
+ ALTIVEC_PIM_VEC_SR_2,
+ ALTIVEC_PIM_VEC_SR_3,
+ ALTIVEC_PIM_VEC_SRA,
+ ALTIVEC_PIM_VEC_SRA_2,
+ ALTIVEC_PIM_VEC_SRA_3,
+ ALTIVEC_PIM_VEC_SRL,
+ ALTIVEC_PIM_VEC_SRO,
+ ALTIVEC_PIM_VEC_ST,
+ ALTIVEC_PIM_VEC_STE,
+ ALTIVEC_PIM_VEC_STE_2,
+ ALTIVEC_PIM_VEC_STE_3,
+ ALTIVEC_PIM_VEC_STL,
+ ALTIVEC_PIM_VEC_STVEBX,
+ ALTIVEC_PIM_VEC_STVEHX,
+ ALTIVEC_PIM_VEC_STVEWX,
+ ALTIVEC_PIM_VEC_STVX,
+ ALTIVEC_PIM_VEC_STVXL,
+ ALTIVEC_PIM_VEC_SUB,
+ ALTIVEC_PIM_VEC_SUB_2,
+ ALTIVEC_PIM_VEC_SUB_3,
+ ALTIVEC_PIM_VEC_SUB_4,
+ ALTIVEC_PIM_VEC_SUBC,
+ ALTIVEC_PIM_VEC_SUBS,
+ ALTIVEC_PIM_VEC_SUBS_2,
+ ALTIVEC_PIM_VEC_SUBS_3,
+ ALTIVEC_PIM_VEC_SUBS_4,
+ ALTIVEC_PIM_VEC_SUBS_5,
+ ALTIVEC_PIM_VEC_SUBS_6,
+ ALTIVEC_PIM_VEC_SUM4S,
+ ALTIVEC_PIM_VEC_SUM4S_2,
+ ALTIVEC_PIM_VEC_SUM4S_3,
+ ALTIVEC_PIM_VEC_SUM2S,
+ ALTIVEC_PIM_VEC_SUMS,
+ ALTIVEC_PIM_VEC_TRUNC,
+ ALTIVEC_PIM_VEC_UNPACKH,
+ ALTIVEC_PIM_VEC_UNPACKH_2,
+ ALTIVEC_PIM_VEC_UNPACKH_3,
+ ALTIVEC_PIM_VEC_UNPACKL,
+ ALTIVEC_PIM_VEC_UNPACKL_2,
+ ALTIVEC_PIM_VEC_UNPACKL_3,
+ ALTIVEC_PIM_VEC_VADDCUW,
+ ALTIVEC_PIM_VEC_VADDFP,
+ ALTIVEC_PIM_VEC_VADDSBS,
+ ALTIVEC_PIM_VEC_VADDSHS,
+ ALTIVEC_PIM_VEC_VADDSWS,
+ ALTIVEC_PIM_VEC_VADDUBM,
+ ALTIVEC_PIM_VEC_VADDUBS,
+ ALTIVEC_PIM_VEC_VADDUHM,
+ ALTIVEC_PIM_VEC_VADDUHS,
+ ALTIVEC_PIM_VEC_VADDUWM,
+ ALTIVEC_PIM_VEC_VADDUWS,
+ ALTIVEC_PIM_VEC_VAND,
+ ALTIVEC_PIM_VEC_VANDC,
+ ALTIVEC_PIM_VEC_VAVGSB,
+ ALTIVEC_PIM_VEC_VAVGSH,
+ ALTIVEC_PIM_VEC_VAVGSW,
+ ALTIVEC_PIM_VEC_VAVGUB,
+ ALTIVEC_PIM_VEC_VAVGUH,
+ ALTIVEC_PIM_VEC_VAVGUW,
+ ALTIVEC_PIM_VEC_VCFSX,
+ ALTIVEC_PIM_VEC_VCFUX,
+ ALTIVEC_PIM_VEC_VCMPBFP,
+ ALTIVEC_PIM_VEC_VCMPEQFP,
+ ALTIVEC_PIM_VEC_VCMPEQUB,
+ ALTIVEC_PIM_VEC_VCMPEQUH,
+ ALTIVEC_PIM_VEC_VCMPEQUW,
+ ALTIVEC_PIM_VEC_VCMPGEFP,
+ ALTIVEC_PIM_VEC_VCMPGTFP,
+ ALTIVEC_PIM_VEC_VCMPGTSB,
+ ALTIVEC_PIM_VEC_VCMPGTSH,
+ ALTIVEC_PIM_VEC_VCMPGTSW,
+ ALTIVEC_PIM_VEC_VCMPGTUB,
+ ALTIVEC_PIM_VEC_VCMPGTUH,
+ ALTIVEC_PIM_VEC_VCMPGTUW,
+ ALTIVEC_PIM_VEC_VCTSXS,
+ ALTIVEC_PIM_VEC_VCTUXS,
+ ALTIVEC_PIM_VEC_VEXPTEFP,
+ ALTIVEC_PIM_VEC_VLOGEFP,
+ ALTIVEC_PIM_VEC_VMADDFP,
+ ALTIVEC_PIM_VEC_VMAXFP,
+ ALTIVEC_PIM_VEC_VMAXSB,
+ ALTIVEC_PIM_VEC_VMAXSH,
+ ALTIVEC_PIM_VEC_VMAXSW,
+ ALTIVEC_PIM_VEC_VMAXUB,
+ ALTIVEC_PIM_VEC_VMAXUH,
+ ALTIVEC_PIM_VEC_VMAXUW,
+ ALTIVEC_PIM_VEC_VMHADDSHS,
+ ALTIVEC_PIM_VEC_VMHRADDSHS,
+ ALTIVEC_PIM_VEC_VMINFP,
+ ALTIVEC_PIM_VEC_VMINSB,
+ ALTIVEC_PIM_VEC_VMINSH,
+ ALTIVEC_PIM_VEC_VMINSW,
+ ALTIVEC_PIM_VEC_VMINUB,
+ ALTIVEC_PIM_VEC_VMINUH,
+ ALTIVEC_PIM_VEC_VMINUW,
+ ALTIVEC_PIM_VEC_VMLADDUHM,
+ ALTIVEC_PIM_VEC_VMRGHB,
+ ALTIVEC_PIM_VEC_VMRGHH,
+ ALTIVEC_PIM_VEC_VMRGHW,
+ ALTIVEC_PIM_VEC_VMRGLB,
+ ALTIVEC_PIM_VEC_VMRGLH,
+ ALTIVEC_PIM_VEC_VMRGLW,
+ ALTIVEC_PIM_VEC_VMSUMMBM,
+ ALTIVEC_PIM_VEC_VMSUMSHM,
+ ALTIVEC_PIM_VEC_VMSUMSHS,
+ ALTIVEC_PIM_VEC_VMSUMUBM,
+ ALTIVEC_PIM_VEC_VMSUMUHM,
+ ALTIVEC_PIM_VEC_VMSUMUHS,
+ ALTIVEC_PIM_VEC_VMULESB,
+ ALTIVEC_PIM_VEC_VMULESH,
+ ALTIVEC_PIM_VEC_VMULEUB,
+ ALTIVEC_PIM_VEC_VMULEUH,
+ ALTIVEC_PIM_VEC_VMULOSB,
+ ALTIVEC_PIM_VEC_VMULOSH,
+ ALTIVEC_PIM_VEC_VMULOUB,
+ ALTIVEC_PIM_VEC_VMULOUH,
+ ALTIVEC_PIM_VEC_VNMSUBFP,
+ ALTIVEC_PIM_VEC_VNOR,
+ ALTIVEC_PIM_VEC_VOR,
+ ALTIVEC_PIM_VEC_VPERM,
+ ALTIVEC_PIM_VEC_VPKPX,
+ ALTIVEC_PIM_VEC_VPKSHSS,
+ ALTIVEC_PIM_VEC_VPKSHUS,
+ ALTIVEC_PIM_VEC_VPKSWSS,
+ ALTIVEC_PIM_VEC_VPKSWUS,
+ ALTIVEC_PIM_VEC_VPKUHUM,
+ ALTIVEC_PIM_VEC_VPKUHUS,
+ ALTIVEC_PIM_VEC_VPKUWUM,
+ ALTIVEC_PIM_VEC_VPKUWUS,
+ ALTIVEC_PIM_VEC_VREFP,
+ ALTIVEC_PIM_VEC_VRFIM,
+ ALTIVEC_PIM_VEC_VRFIN,
+ ALTIVEC_PIM_VEC_VRFIP,
+ ALTIVEC_PIM_VEC_VRFIZ,
+ ALTIVEC_PIM_VEC_VRLB,
+ ALTIVEC_PIM_VEC_VRLH,
+ ALTIVEC_PIM_VEC_VRLW,
+ ALTIVEC_PIM_VEC_VRSQRTEFP,
+ ALTIVEC_PIM_VEC_VSEL,
+ ALTIVEC_PIM_VEC_VSL,
+ ALTIVEC_PIM_VEC_VSLB,
+ ALTIVEC_PIM_VEC_VSLDOI,
+ ALTIVEC_PIM_VEC_VSLH,
+ ALTIVEC_PIM_VEC_VSLO,
+ ALTIVEC_PIM_VEC_VSLW,
+ ALTIVEC_PIM_VEC_VSPLTB,
+ ALTIVEC_PIM_VEC_VSPLTH,
+ ALTIVEC_PIM_VEC_VSPLTISB,
+ ALTIVEC_PIM_VEC_VSPLTISH,
+ ALTIVEC_PIM_VEC_VSPLTISW,
+ ALTIVEC_PIM_VEC_VSPLTW,
+ ALTIVEC_PIM_VEC_VSR,
+ ALTIVEC_PIM_VEC_VSRAB,
+ ALTIVEC_PIM_VEC_VSRAH,
+ ALTIVEC_PIM_VEC_VSRAW,
+ ALTIVEC_PIM_VEC_VSRB,
+ ALTIVEC_PIM_VEC_VSRH,
+ ALTIVEC_PIM_VEC_VSRO,
+ ALTIVEC_PIM_VEC_VSRW,
+ ALTIVEC_PIM_VEC_VSUBCUW,
+ ALTIVEC_PIM_VEC_VSUBFP,
+ ALTIVEC_PIM_VEC_VSUBSBS,
+ ALTIVEC_PIM_VEC_VSUBSHS,
+ ALTIVEC_PIM_VEC_VSUBSWS,
+ ALTIVEC_PIM_VEC_VSUBUBM,
+ ALTIVEC_PIM_VEC_VSUBUBS,
+ ALTIVEC_PIM_VEC_VSUBUHM,
+ ALTIVEC_PIM_VEC_VSUBUHS,
+ ALTIVEC_PIM_VEC_VSUBUWM,
+ ALTIVEC_PIM_VEC_VSUBUWS,
+ ALTIVEC_PIM_VEC_VSUM4SBS,
+ ALTIVEC_PIM_VEC_VSUM4SHS,
+ ALTIVEC_PIM_VEC_VSUM4UBS,
+ ALTIVEC_PIM_VEC_VSUM2SWS,
+ ALTIVEC_PIM_VEC_VSUMSWS,
+ ALTIVEC_PIM_VEC_VUPKHPX,
+ ALTIVEC_PIM_VEC_VUPKHSB,
+ ALTIVEC_PIM_VEC_VUPKHSH,
+ ALTIVEC_PIM_VEC_VUPKLPX,
+ ALTIVEC_PIM_VEC_VUPKLSB,
+ ALTIVEC_PIM_VEC_VUPKLSH,
+ ALTIVEC_PIM_VEC_VXOR,
+ ALTIVEC_PIM_VEC_XOR,
+
+ /* PIM Predicates. */
+ ALTIVEC_PIM_VEC_ALL_EQ,
+ ALTIVEC_PIM_VEC_ALL_EQ_2,
+ ALTIVEC_PIM_VEC_ALL_EQ_3,
+ ALTIVEC_PIM_VEC_ALL_EQ_4,
+ ALTIVEC_PIM_VEC_ALL_GE,
+ ALTIVEC_PIM_VEC_ALL_GE_2,
+ ALTIVEC_PIM_VEC_ALL_GE_3,
+ ALTIVEC_PIM_VEC_ALL_GE_4,
+ ALTIVEC_PIM_VEC_ALL_GE_5,
+ ALTIVEC_PIM_VEC_ALL_GE_6,
+ ALTIVEC_PIM_VEC_ALL_GE_7,
+ ALTIVEC_PIM_VEC_ALL_GT,
+ ALTIVEC_PIM_VEC_ALL_GT_2,
+ ALTIVEC_PIM_VEC_ALL_GT_3,
+ ALTIVEC_PIM_VEC_ALL_GT_4,
+ ALTIVEC_PIM_VEC_ALL_GT_5,
+ ALTIVEC_PIM_VEC_ALL_GT_6,
+ ALTIVEC_PIM_VEC_ALL_GT_7,
+ ALTIVEC_PIM_VEC_ALL_IN,
+ ALTIVEC_PIM_VEC_ALL_LE,
+ ALTIVEC_PIM_VEC_ALL_LE_2,
+ ALTIVEC_PIM_VEC_ALL_LE_3,
+ ALTIVEC_PIM_VEC_ALL_LE_4,
+ ALTIVEC_PIM_VEC_ALL_LE_5,
+ ALTIVEC_PIM_VEC_ALL_LE_6,
+ ALTIVEC_PIM_VEC_ALL_LE_7,
+ ALTIVEC_PIM_VEC_ALL_LT,
+ ALTIVEC_PIM_VEC_ALL_LT_2,
+ ALTIVEC_PIM_VEC_ALL_LT_3,
+ ALTIVEC_PIM_VEC_ALL_LT_4,
+ ALTIVEC_PIM_VEC_ALL_LT_5,
+ ALTIVEC_PIM_VEC_ALL_LT_6,
+ ALTIVEC_PIM_VEC_ALL_LT_7,
+ ALTIVEC_PIM_VEC_ALL_NAN,
+ ALTIVEC_PIM_VEC_ALL_NE,
+ ALTIVEC_PIM_VEC_ALL_NE_2,
+ ALTIVEC_PIM_VEC_ALL_NE_3,
+ ALTIVEC_PIM_VEC_ALL_NE_4,
+ ALTIVEC_PIM_VEC_ALL_NGE,
+ ALTIVEC_PIM_VEC_ALL_NGT,
+ ALTIVEC_PIM_VEC_ALL_NLE,
+ ALTIVEC_PIM_VEC_ALL_NLT,
+ ALTIVEC_PIM_VEC_ALL_NUMERIC,
+ ALTIVEC_PIM_VEC_ANY_EQ,
+ ALTIVEC_PIM_VEC_ANY_EQ_2,
+ ALTIVEC_PIM_VEC_ANY_EQ_3,
+ ALTIVEC_PIM_VEC_ANY_EQ_4,
+ ALTIVEC_PIM_VEC_ANY_GE,
+ ALTIVEC_PIM_VEC_ANY_GE_2,
+ ALTIVEC_PIM_VEC_ANY_GE_3,
+ ALTIVEC_PIM_VEC_ANY_GE_4,
+ ALTIVEC_PIM_VEC_ANY_GE_5,
+ ALTIVEC_PIM_VEC_ANY_GE_6,
+ ALTIVEC_PIM_VEC_ANY_GE_7,
+ ALTIVEC_PIM_VEC_ANY_GT,
+ ALTIVEC_PIM_VEC_ANY_GT_2,
+ ALTIVEC_PIM_VEC_ANY_GT_3,
+ ALTIVEC_PIM_VEC_ANY_GT_4,
+ ALTIVEC_PIM_VEC_ANY_GT_5,
+ ALTIVEC_PIM_VEC_ANY_GT_6,
+ ALTIVEC_PIM_VEC_ANY_GT_7,
+ ALTIVEC_PIM_VEC_ANY_LE,
+ ALTIVEC_PIM_VEC_ANY_LE_2,
+ ALTIVEC_PIM_VEC_ANY_LE_3,
+ ALTIVEC_PIM_VEC_ANY_LE_4,
+ ALTIVEC_PIM_VEC_ANY_LE_5,
+ ALTIVEC_PIM_VEC_ANY_LE_6,
+ ALTIVEC_PIM_VEC_ANY_LE_7,
+ ALTIVEC_PIM_VEC_ANY_LT,
+ ALTIVEC_PIM_VEC_ANY_LT_2,
+ ALTIVEC_PIM_VEC_ANY_LT_3,
+ ALTIVEC_PIM_VEC_ANY_LT_4,
+ ALTIVEC_PIM_VEC_ANY_LT_5,
+ ALTIVEC_PIM_VEC_ANY_LT_6,
+ ALTIVEC_PIM_VEC_ANY_LT_7,
+ ALTIVEC_PIM_VEC_ANY_NAN,
+ ALTIVEC_PIM_VEC_ANY_NE,
+ ALTIVEC_PIM_VEC_ANY_NE_2,
+ ALTIVEC_PIM_VEC_ANY_NE_3,
+ ALTIVEC_PIM_VEC_ANY_NE_4,
+ ALTIVEC_PIM_VEC_ANY_NGE,
+ ALTIVEC_PIM_VEC_ANY_NGT,
+ ALTIVEC_PIM_VEC_ANY_NLE,
+ ALTIVEC_PIM_VEC_ANY_NLT,
+ ALTIVEC_PIM_VEC_ANY_NUMERIC,
+ ALTIVEC_PIM_VEC_ANY_OUT,
+
+ ALTIVEC_PIM__LAST = ALTIVEC_PIM_VEC_ANY_OUT,
+ /* APPLE LOCAL end AltiVec */
+
+ /* APPLE LOCAL begin constant cfstrings */
+ RS6000_BUILTIN_MAX,
+ TARGET_BUILTIN_MAX = RS6000_BUILTIN_MAX,
+ /* APPLE LOCAL end constant cfstrings */
+
+ RS6000_BUILTIN_COUNT
+};
+/* APPLE LOCAL radar 4204303 */
+#define INITIAL_FRAME_ADDRESS_RTX stack_pointer_rtx
+
+/* APPLE LOCAL begin CW asm blocks */
+/* Table of instructions that need extra constraints. */
+#undef TARGET_IASM_OP_CONSTRAINT
+#define TARGET_IASM_OP_CONSTRAINT \
+ { "la", 2, "m" }, \
+ { "lbz", 2, "m" }, \
+ { "lbzu", 2, "m" }, \
+ { "ld", 2, "m" }, \
+ { "ldu", 2, "m" }, \
+ { "lfd", 2, "m" }, \
+ { "lfdu", 2, "m" }, \
+ { "lfs", 2, "m" }, \
+ { "lfsu", 2, "m" }, \
+ { "lha", 2, "m" }, \
+ { "lhau", 2, "m" }, \
+ { "lhz", 2, "m" }, \
+ { "lhzu", 2, "m" }, \
+ { "lmw", 2, "m" }, \
+ { "lwa", 2, "m" }, \
+ { "lwz", 2, "m" }, \
+ { "lwzu", 2, "m" }, \
+ { "stb", 2, "m" }, \
+ { "stbu", 2, "m" }, \
+ { "std", 2, "m" }, \
+ { "stdu", 2, "m" }, \
+ { "stfd", 2, "m" }, \
+ { "stfdu", 2, "m" }, \
+ { "stfs", 2, "m" }, \
+ { "stfsu", 2, "m" }, \
+ { "sth", 2, "m" }, \
+ { "sthu", 2, "m" }, \
+ { "stmw", 2, "m" }, \
+ { "stw", 2, "m" }, \
+ { "stwu", 2, "m" },
+
+#define IASM_FUNCTION_MODIFIER "z"
+
+/* APPLE LOCAL end CW asm blocks */
+
+enum rs6000_builtin_type_index
+{
+ RS6000_BTI_NOT_OPAQUE,
+ RS6000_BTI_opaque_V2SI,
+ RS6000_BTI_opaque_V2SF,
+ RS6000_BTI_opaque_p_V2SI,
+ RS6000_BTI_opaque_V4SI,
+ RS6000_BTI_V16QI,
+ RS6000_BTI_V2SI,
+ RS6000_BTI_V2SF,
+ RS6000_BTI_V4HI,
+ RS6000_BTI_V4SI,
+ RS6000_BTI_V4SF,
+ RS6000_BTI_V8HI,
+ RS6000_BTI_unsigned_V16QI,
+ RS6000_BTI_unsigned_V8HI,
+ RS6000_BTI_unsigned_V4SI,
+ RS6000_BTI_bool_char, /* __bool char */
+ RS6000_BTI_bool_short, /* __bool short */
+ RS6000_BTI_bool_int, /* __bool int */
+ RS6000_BTI_pixel, /* __pixel */
+ RS6000_BTI_bool_V16QI, /* __vector __bool char */
+ RS6000_BTI_bool_V8HI, /* __vector __bool short */
+ RS6000_BTI_bool_V4SI, /* __vector __bool int */
+ RS6000_BTI_pixel_V8HI, /* __vector __pixel */
+ RS6000_BTI_long, /* long_integer_type_node */
+ RS6000_BTI_unsigned_long, /* long_unsigned_type_node */
+ RS6000_BTI_INTQI, /* intQI_type_node */
+ RS6000_BTI_UINTQI, /* unsigned_intQI_type_node */
+ RS6000_BTI_INTHI, /* intHI_type_node */
+ RS6000_BTI_UINTHI, /* unsigned_intHI_type_node */
+ RS6000_BTI_INTSI, /* intSI_type_node */
+ RS6000_BTI_UINTSI, /* unsigned_intSI_type_node */
+ RS6000_BTI_float, /* float_type_node */
+ RS6000_BTI_void, /* void_type_node */
+ RS6000_BTI_MAX
+};
+
+
+#define opaque_V2SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V2SI])
+#define opaque_V2SF_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V2SF])
+#define opaque_p_V2SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_p_V2SI])
+#define opaque_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V4SI])
+#define V16QI_type_node (rs6000_builtin_types[RS6000_BTI_V16QI])
+#define V2SI_type_node (rs6000_builtin_types[RS6000_BTI_V2SI])
+#define V2SF_type_node (rs6000_builtin_types[RS6000_BTI_V2SF])
+#define V4HI_type_node (rs6000_builtin_types[RS6000_BTI_V4HI])
+#define V4SI_type_node (rs6000_builtin_types[RS6000_BTI_V4SI])
+#define V4SF_type_node (rs6000_builtin_types[RS6000_BTI_V4SF])
+#define V8HI_type_node (rs6000_builtin_types[RS6000_BTI_V8HI])
+#define unsigned_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V16QI])
+#define unsigned_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V8HI])
+#define unsigned_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V4SI])
+#define bool_char_type_node (rs6000_builtin_types[RS6000_BTI_bool_char])
+#define bool_short_type_node (rs6000_builtin_types[RS6000_BTI_bool_short])
+#define bool_int_type_node (rs6000_builtin_types[RS6000_BTI_bool_int])
+#define pixel_type_node (rs6000_builtin_types[RS6000_BTI_pixel])
+#define bool_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V16QI])
+#define bool_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V8HI])
+#define bool_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V4SI])
+#define pixel_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_pixel_V8HI])
+
+#define long_integer_type_internal_node (rs6000_builtin_types[RS6000_BTI_long])
+#define long_unsigned_type_internal_node (rs6000_builtin_types[RS6000_BTI_unsigned_long])
+#define intQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTQI])
+#define uintQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTQI])
+#define intHI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTHI])
+#define uintHI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTHI])
+#define intSI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTSI])
+#define uintSI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTSI])
+#define float_type_internal_node (rs6000_builtin_types[RS6000_BTI_float])
+#define void_type_internal_node (rs6000_builtin_types[RS6000_BTI_void])
+
+extern GTY(()) tree rs6000_builtin_types[RS6000_BTI_MAX];
+extern GTY(()) tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.md
new file mode 100644
index 000000000..f4147b167
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.md
@@ -0,0 +1,14451 @@
+;; Machine description for IBM RISC System 6000 (POWER) for GNU C compiler
+;; Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+;; 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+;; Free Software Foundation, Inc.
+;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;;
+;; UNSPEC usage
+;;
+
+(define_constants
+ [(UNSPEC_FRSP 0) ; frsp for POWER machines
+ (UNSPEC_TIE 5) ; tie stack contents and stack pointer
+ (UNSPEC_TOCPTR 6) ; address of a word pointing to the TOC
+ (UNSPEC_TOC 7) ; address of the TOC (more-or-less)
+ (UNSPEC_MOVSI_GOT 8)
+ (UNSPEC_MV_CR_OV 9) ; move_from_CR_ov_bit
+ (UNSPEC_FCTIWZ 10)
+ (UNSPEC_FRIM 11)
+ (UNSPEC_FRIN 12)
+ (UNSPEC_FRIP 13)
+ (UNSPEC_FRIZ 14)
+ (UNSPEC_LD_MPIC 15) ; load_macho_picbase
+ (UNSPEC_MPIC_CORRECT 16) ; macho_correct_pic
+ (UNSPEC_TLSGD 17)
+ (UNSPEC_TLSLD 18)
+ (UNSPEC_MOVESI_FROM_CR 19)
+ (UNSPEC_MOVESI_TO_CR 20)
+ (UNSPEC_TLSDTPREL 21)
+ (UNSPEC_TLSDTPRELHA 22)
+ (UNSPEC_TLSDTPRELLO 23)
+ (UNSPEC_TLSGOTDTPREL 24)
+ (UNSPEC_TLSTPREL 25)
+ (UNSPEC_TLSTPRELHA 26)
+ (UNSPEC_TLSTPRELLO 27)
+ (UNSPEC_TLSGOTTPREL 28)
+ (UNSPEC_TLSTLS 29)
+ (UNSPEC_FIX_TRUNC_TF 30) ; fadd, rounding towards zero
+ (UNSPEC_MV_CR_GT 31) ; move_from_CR_gt_bit
+ ;; APPLE LOCAL special ObjC method use of R12
+ (UNSPEC_LD_MPIC_L 32) ; local_macho_picbase_label
+ (UNSPEC_STFIWX 32)
+ (UNSPEC_POPCNTB 33)
+ (UNSPEC_FRES 34)
+ (UNSPEC_SP_SET 35)
+ (UNSPEC_SP_TEST 36)
+ (UNSPEC_SYNC 37)
+ (UNSPEC_LWSYNC 38)
+ (UNSPEC_ISYNC 39)
+ (UNSPEC_SYNC_OP 40)
+ (UNSPEC_ATOMIC 41)
+ (UNSPEC_CMPXCHG 42)
+ (UNSPEC_XCHG 43)
+ (UNSPEC_AND 44)
+ (UNSPEC_DLMZB 45)
+ (UNSPEC_DLMZB_CR 46)
+ (UNSPEC_DLMZB_STRLEN 47)
+ ;; APPLE LOCAL begin 3399553
+ (UNSPEC_MFFS 48)
+ (UNSPEC_FLT_ROUNDS 49)
+ ;; APPLE LOCAL end 3399553
+ ])
+
+;;
+;; UNSPEC_VOLATILE usage
+;;
+
+(define_constants
+ [(UNSPECV_BLOCK 0)
+ (UNSPECV_LL 1) ; load-locked
+ (UNSPECV_SC 2) ; store-conditional
+ (UNSPECV_EH_RR 9) ; eh_reg_restore
+ ])
+
+;; Define an insn type attribute. This is used in function unit delay
+;; computations.
+(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c"
+ (const_string "integer"))
+
+;; Length (in bytes).
+; '(pc)' in the following doesn't include the instruction itself; it is
+; calculated as if the instruction had zero size.
+(define_attr "length" ""
+ (if_then_else (eq_attr "type" "branch")
+ (if_then_else (and (ge (minus (match_dup 0) (pc))
+ (const_int -32768))
+ (lt (minus (match_dup 0) (pc))
+ (const_int 32764)))
+ (const_int 4)
+ (const_int 8))
+ (const_int 4)))
+
+;; Processor type -- this attribute must exactly match the processor_type
+;; enumeration in rs6000.h.
+
+(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,power4,power5"
+ (const (symbol_ref "rs6000_cpu_attr")))
+
+(automata_option "ndfa")
+
+(include "rios1.md")
+(include "rios2.md")
+(include "rs64.md")
+(include "mpc.md")
+(include "40x.md")
+(include "440.md")
+(include "603.md")
+(include "6xx.md")
+(include "7xx.md")
+(include "7450.md")
+(include "8540.md")
+(include "power4.md")
+(include "power5.md")
+
+(include "predicates.md")
+(include "constraints.md")
+
+(include "darwin.md")
+
+
+;; Mode macros
+
+; This mode macro allows :GPR to be used to indicate the allowable size
+; of whole values in GPRs.
+(define_mode_macro GPR [SI (DI "TARGET_POWERPC64")])
+
+; Any supported integer mode.
+(define_mode_macro INT [QI HI SI DI TI])
+
+; Any supported integer mode that fits in one register.
+(define_mode_macro INT1 [QI HI SI (DI "TARGET_POWERPC64")])
+
+; extend modes for DImode
+(define_mode_macro QHSI [QI HI SI])
+
+; SImode or DImode, even if DImode doesn't fit in GPRs.
+(define_mode_macro SDI [SI DI])
+
+; The size of a pointer. Also, the size of the value that a record-condition
+; (one with a '.') will compare.
+(define_mode_macro P [(SI "TARGET_32BIT") (DI "TARGET_64BIT")])
+
+; Any hardware-supported floating-point mode
+(define_mode_macro FP [(SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)")
+ (TF "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128")])
+
+; Various instructions that come in SI and DI forms.
+; A generic w/d attribute, for things like cmpw/cmpd.
+(define_mode_attr wd [(QI "b") (HI "h") (SI "w") (DI "d")])
+
+; DImode bits
+(define_mode_attr dbits [(QI "56") (HI "48") (SI "32")])
+
+
+;; Start with fixed-point load and store insns. Here we put only the more
+;; complex forms. Basic data transfer is done later.
+
+(define_expand "zero_extend<mode>di2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn "*zero_extend<mode>di2_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_operand:QHSI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ l<wd>z%U1%X1 %0,%1
+ rldicl %0,%1,0,<dbits>"
+ [(set_attr "type" "load,*")])
+
+(define_insn "*zero_extend<mode>di2_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldicl. %2,%1,0,<dbits>
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn "*zero_extend<mode>di2_internal3"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ rldicl. %0,%1,0,<dbits>
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "extendqidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "extsb %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendhidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lha%U1%X1 %0,%1
+ extsh %0,%1"
+ [(set_attr "type" "load_ext,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ extsh. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ extsh. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "lwa_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lwa%U1%X1 %0,%1
+ extsw %0,%1"
+ [(set_attr "type" "load_ext,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ extsw. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ extsw. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lbz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqisi2"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:QI 1 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWERPC)
+ emit_insn (gen_extendqisi2_ppc (operands[0], operands[1]));
+ else if (TARGET_POWER)
+ emit_insn (gen_extendqisi2_power (operands[0], operands[1]));
+ else
+ emit_insn (gen_extendqisi2_no_power (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "extendqisi2_ppc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC"
+ "extsb %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_dup 1)))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqisi2_power"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (clobber (scratch:SI))])
+ (parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (scratch:SI))])]
+ "TARGET_POWER"
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqisi2_no_power"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "gpc_reg_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lbz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:HI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:HI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (zero_extend:HI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:HI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqihi2"
+ [(use (match_operand:HI 0 "gpc_reg_operand" ""))
+ (use (match_operand:QI 1 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWERPC)
+ emit_insn (gen_extendqihi2_ppc (operands[0], operands[1]));
+ else if (TARGET_POWER)
+ emit_insn (gen_extendqihi2_power (operands[0], operands[1]));
+ else
+ emit_insn (gen_extendqihi2_no_power (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "extendqihi2_ppc"
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC"
+ "extsb %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 ""))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:HI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:HI (match_dup 1)))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (sign_extend:HI (match_dup 1)))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:HI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqihi2_power"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (clobber (scratch:SI))])
+ (parallel [(set (match_operand:HI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (scratch:SI))])]
+ "TARGET_POWER"
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqihi2_no_power"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lhz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xffff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xffff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xffff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lha%U1%X1 %0,%1
+ {exts|extsh} %0,%1"
+ [(set_attr "type" "load_ext,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {exts.|extsh.} %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {exts.|extsh.} %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+;; IBM 405 and 440 half-word multiplication operations.
+
+(define_insn "*macchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "macchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "macchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (zero_extend:SI
+ (match_dup 1)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "macchwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "macchwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "machhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "machhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "machhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "machhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "maclhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "maclhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (zero_extend:SI
+ (match_dup 1))
+ (zero_extend:SI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "maclhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "maclhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmacchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1)))))]
+ "TARGET_MULHW"
+ "nmacchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmacchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))))]
+ "TARGET_MULHW"
+ "nmacchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmachhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16)))))]
+ "TARGET_MULHW"
+ "nmachhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmachhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))))]
+ "TARGET_MULHW"
+ "nmachhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmaclhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2)))))]
+ "TARGET_MULHW"
+ "nmaclhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmaclhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))))]
+ "TARGET_MULHW"
+ "nmaclhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1))))]
+ "TARGET_MULHW"
+ "mulchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mulchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (zero_extend:SI
+ (match_dup 1))))]
+ "TARGET_MULHW"
+ "mulchwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mulchwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2))))]
+ "TARGET_MULHW"
+ "mullhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mullhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (zero_extend:SI
+ (match_dup 1))
+ (zero_extend:SI
+ (match_dup 2))))]
+ "TARGET_MULHW"
+ "mullhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mullhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+;; IBM 405 and 440 string-search dlmzb instruction support.
+(define_insn "dlmzb"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")]
+ UNSPEC_DLMZB_CR))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_DLMZB))]
+ "TARGET_DLMZB"
+ "dlmzb. %0, %1, %2")
+
+(define_expand "strlensi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:BLK 1 "general_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_DLMZB_STRLEN))
+ (clobber (match_scratch:CC 4 "=x"))]
+ "TARGET_DLMZB && WORDS_BIG_ENDIAN && !optimize_size"
+{
+ rtx result = operands[0];
+ rtx src = operands[1];
+ rtx search_char = operands[2];
+ rtx align = operands[3];
+ rtx addr, scratch_string, word1, word2, scratch_dlmzb;
+ rtx loop_label, end_label, mem, cr0, cond;
+ if (search_char != const0_rtx
+ || GET_CODE (align) != CONST_INT
+ || INTVAL (align) < 8)
+ FAIL;
+ word1 = gen_reg_rtx (SImode);
+ word2 = gen_reg_rtx (SImode);
+ scratch_dlmzb = gen_reg_rtx (SImode);
+ scratch_string = gen_reg_rtx (Pmode);
+ loop_label = gen_label_rtx ();
+ end_label = gen_label_rtx ();
+ addr = force_reg (Pmode, XEXP (src, 0));
+ emit_move_insn (scratch_string, addr);
+ emit_label (loop_label);
+ mem = change_address (src, SImode, scratch_string);
+ emit_move_insn (word1, mem);
+ emit_move_insn (word2, adjust_address (mem, SImode, 4));
+ cr0 = gen_rtx_REG (CCmode, CR0_REGNO);
+ emit_insn (gen_dlmzb (scratch_dlmzb, word1, word2, cr0));
+ cond = gen_rtx_NE (VOIDmode, cr0, const0_rtx);
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ cond,
+ gen_rtx_LABEL_REF
+ (VOIDmode,
+ end_label),
+ pc_rtx)));
+ emit_insn (gen_addsi3 (scratch_string, scratch_string, GEN_INT (8)));
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, loop_label)));
+ emit_barrier ();
+ emit_label (end_label);
+ emit_insn (gen_addsi3 (scratch_string, scratch_string, scratch_dlmzb));
+ emit_insn (gen_subsi3 (result, scratch_string, addr));
+ emit_insn (gen_subsi3 (result, result, const1_rtx));
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Fixed-point arithmetic insns.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (plus:SDI (match_operand:SDI 1 "gpc_reg_operand" "")
+ (match_operand:SDI 2 "reg_or_add_cint_operand" "")))]
+ ""
+{
+ if (<MODE>mode == DImode && ! TARGET_POWERPC64)
+ {
+ if (non_short_cint_operand (operands[2], DImode))
+ FAIL;
+ }
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && ! add_operand (operands[2], <MODE>mode))
+ {
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (<MODE>mode));
+
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
+
+ if (<MODE>mode == DImode && !satisfies_constraint_L (GEN_INT (rest)))
+ FAIL;
+
+ /* The ordering here is important for the prolog expander.
+ When space is allocated from the stack, adding 'low' first may
+ produce a temporary deallocation (which would be bad). */
+ emit_insn (gen_add<mode>3 (tmp, operands[1], GEN_INT (rest)));
+ emit_insn (gen_add<mode>3 (operands[0], tmp, GEN_INT (low)));
+ DONE;
+ }
+})
+
+;; Discourage ai/addic because of carry but provide it in an alternative
+;; allowing register zero as source.
+(define_insn "*add<mode>3_internal1"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r,?r,r")
+ (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,b,r,b")
+ (match_operand:GPR 2 "add_operand" "r,I,I,L")))]
+ ""
+ "@
+ {cax|add} %0,%1,%2
+ {cal %0,%2(%1)|addi %0,%1,%2}
+ {ai|addic} %0,%1,%2
+ {cau|addis} %0,%1,%v2"
+ [(set_attr "length" "4,4,4,4")])
+
+(define_insn "addsi3_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (high:SI (match_operand 2 "" ""))))]
+ "TARGET_MACHO && !TARGET_64BIT"
+ "{cau|addis} %0,%1,ha16(%2)"
+ [(set_attr "length" "4")])
+
+(define_insn "*add<mode>3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 "=r,r,r,r"))]
+ ""
+ "@
+ {cax.|add.} %3,%1,%2
+ {ai.|addic.} %3,%1,%2
+ #
+ #"
+ [(set_attr "type" "fast_compare,compare,compare,compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:GPR 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (plus:GPR (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*add<mode>3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:P (match_dup 1)
+ (match_dup 2)))]
+ ""
+ "@
+ {cax.|add.} %0,%1,%2
+ {ai.|addic.} %0,%1,%2
+ #
+ #"
+ [(set_attr "type" "fast_compare,compare,compare,compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (plus:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (plus:P (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split an add that we can't do in one insn into two insns, each of which
+;; does one 16-bit part. This is used by combine. Note that the low-order
+;; add should be last in case the result gets used in an address.
+
+(define_split
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "non_add_cint_operand" "")))]
+ ""
+ [(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
+
+ operands[4] = GEN_INT (low);
+ if (<MODE>mode == SImode || satisfies_constraint_L (GEN_INT (rest)))
+ operands[3] = GEN_INT (rest);
+ else if (! no_new_pseudos)
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ emit_move_insn (operands[3], operands[2]);
+ emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ else
+ FAIL;
+})
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ ""
+ "nor %0,%1,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 "=r,r"))]
+ ""
+ "@
+ nor. %2,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (not:P (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (not:P (match_dup 1)))]
+ ""
+ "@
+ nor. %0,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (not:P (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (not:P (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_short_operand" "rI")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))]
+ "! TARGET_POWERPC"
+ "{sf%I1|subf%I1c} %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
+ (minus:GPR (match_operand:GPR 1 "reg_or_short_operand" "r,I")
+ (match_operand:GPR 2 "gpc_reg_operand" "r,r")))]
+ "TARGET_POWERPC"
+ "@
+ subf %0,%2,%1
+ subfic %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC"
+ "@
+ {sf.|subfc.} %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ subf. %3,%2,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (minus:P (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC"
+ "@
+ {sf.|subfc.} %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (minus:P (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_POWERPC"
+ "@
+ subf. %0,%2,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (minus:P (match_dup 1)
+ (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (minus:P (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (minus:SDI (match_operand:SDI 1 "reg_or_short_operand" "")
+ (match_operand:SDI 2 "reg_or_sub_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_add<mode>3 (operands[0], operands[1],
+ negate_rtx (<MODE>mode, operands[2])));
+ DONE;
+ }
+}")
+
+;; For SMIN, SMAX, UMIN, and UMAX, we use DEFINE_EXPAND's that involve a doz[i]
+;; instruction and some auxiliary computations. Then we just have a single
+;; DEFINE_INSN for doz[i] and the define_splits to make them if made by
+;; combine.
+
+(define_expand "sminsi3"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 2) (match_dup 3)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]);
+ DONE;
+ }
+
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (smin:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0) (minus:SI (match_dup 2) (match_dup 3)))]
+ "")
+
+(define_expand "smaxsi3"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 3) (match_dup 1)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]);
+ DONE;
+ }
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (smax:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0) (plus:SI (match_dup 3) (match_dup 1)))]
+ "")
+
+(define_expand "uminsi3"
+ [(set (match_dup 3) (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 4) (xor:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 3) (if_then_else:SI (gt (match_dup 3) (match_dup 4))
+ (const_int 0)
+ (minus:SI (match_dup 4) (match_dup 3))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 2) (match_dup 3)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ rs6000_emit_minmax (operands[0], UMIN, operands[1], operands[2]);
+ DONE;
+ }
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = GEN_INT (-2147483647 - 1);
+}")
+
+(define_expand "umaxsi3"
+ [(set (match_dup 3) (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 4) (xor:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 3) (if_then_else:SI (gt (match_dup 3) (match_dup 4))
+ (const_int 0)
+ (minus:SI (match_dup 4) (match_dup 3))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 3) (match_dup 1)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ rs6000_emit_minmax (operands[0], UMAX, operands[1], operands[2]);
+ DONE;
+ }
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = GEN_INT (-2147483647 - 1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2. %3,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ doz%I2. %0,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; We don't need abs with condition code because such comparisons should
+;; never be done.
+(define_expand "abssi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ ""
+ "
+{
+ if (TARGET_ISEL)
+ {
+ emit_insn (gen_abssi2_isel (operands[0], operands[1]));
+ DONE;
+ }
+ else if (! TARGET_POWER)
+ {
+ emit_insn (gen_abssi2_nopower (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*abssi2_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "abs %0,%1")
+
+(define_insn_and_split "abssi2_isel"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (clobber (match_scratch:SI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=y"))]
+ "TARGET_ISEL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (neg:SI (match_dup 1)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 1)
+ (const_int 0)))
+ (set (match_dup 0)
+ (if_then_else:SI (ge (match_dup 3)
+ (const_int 0))
+ (match_dup 1)
+ (match_dup 2)))]
+ "")
+
+(define_insn_and_split "abssi2_nopower"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,0")))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "! TARGET_POWER && ! TARGET_ISEL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (set (match_dup 0) (xor:SI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:SI (match_dup 0) (match_dup 2)))]
+ "")
+
+(define_insn "*nabs_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_POWER"
+ "nabs %0,%1")
+
+(define_insn_and_split "*nabs_nopower"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,0"))))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "! TARGET_POWER"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (set (match_dup 0) (xor:SI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:SI (match_dup 2) (match_dup 0)))]
+ "")
+
+(define_expand "neg<mode>2"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (neg:SDI (match_operand:SDI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn "*neg<mode>2_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ ""
+ "neg %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 "=r,r"))]
+ ""
+ "@
+ neg. %2,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (neg:P (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (match_dup 1)))]
+ ""
+ "@
+ neg. %0,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (neg:P (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (neg:P (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (clz:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ ""
+ "{cntlz|cntlz<wd>} %0,%1")
+
+(define_expand "ctz<mode>2"
+ [(set (match_dup 2)
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))
+ (parallel [(set (match_dup 3) (and:GPR (match_dup 1)
+ (match_dup 2)))
+ (clobber (scratch:CC))])
+ (set (match_dup 4) (clz:GPR (match_dup 3)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (minus:GPR (match_dup 5) (match_dup 4)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 1);
+ })
+
+(define_expand "ffs<mode>2"
+ [(set (match_dup 2)
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))
+ (parallel [(set (match_dup 3) (and:GPR (match_dup 1)
+ (match_dup 2)))
+ (clobber (scratch:CC))])
+ (set (match_dup 4) (clz:GPR (match_dup 3)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (minus:GPR (match_dup 5) (match_dup 4)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode));
+ })
+
+(define_expand "popcount<mode>2"
+ [(set (match_dup 2)
+ (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
+ UNSPEC_POPCNTB))
+ (set (match_dup 3)
+ (mult:GPR (match_dup 2) (match_dup 4)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (lshiftrt:GPR (match_dup 3) (match_dup 5)))]
+ "TARGET_POPCNTB"
+ {
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = force_reg (<MODE>mode,
+ <MODE>mode == SImode
+ ? GEN_INT (0x01010101)
+ : GEN_INT ((HOST_WIDE_INT)
+ 0x01010101 << 32 | 0x01010101));
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 8);
+ })
+
+(define_insn "popcntb<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
+ UNSPEC_POPCNTB))]
+ "TARGET_POPCNTB"
+ "popcntb %0,%1")
+
+(define_expand "mulsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_short_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_mulsi3_mq (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_mulsi3_no_mq (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "mulsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (clobber (match_scratch:SI 3 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls|mullw} %0,%1,%2
+ {muli|mulli} %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "imul")))])
+
+(define_insn "mulsi3_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))]
+ "! TARGET_POWER"
+ "@
+ {muls|mullw} %0,%1,%2
+ {muli|mulli} %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "imul")))])
+
+(define_insn "*mulsi3_mq_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))
+ (clobber (match_scratch:SI 4 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls.|mullw.} %3,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*mulsi3_no_mq_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER"
+ "@
+ {muls.|mullw.} %3,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*mulsi3_mq_internal2"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls.|mullw.} %0,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*mulsi3_no_mq_internal2"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER"
+ "@
+ {muls.|mullw.} %0,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (mult:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Operand 1 is divided by operand 2; quotient goes to operand
+;; 0 and remainder to operand 3.
+;; ??? At some point, see what, if anything, we can do about if (x % y == 0).
+
+(define_expand "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))])]
+ "TARGET_POWER || (! TARGET_POWER && ! TARGET_POWERPC)"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_divss_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, 4));
+ DONE;
+ }
+}")
+
+(define_insn "*divmodsi4_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (set (match_operand:SI 3 "register_operand" "=q")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "divs %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_expand "udiv<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC || (! TARGET_POWER && ! TARGET_POWERPC)"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_quous_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_udivsi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "udivsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "divwu %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn "*udivsi3_no_mq"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "div<wd>u %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+;; For powers of two we can do srai/aze for divide and then adjust for
+;; modulus. If it isn't a power of two, FAIL on POWER so divmodsi4 will be
+;; used; for PowerPC, force operands into register and do a normal divide;
+;; for AIX common-mode, use quoss call on register operands.
+(define_expand "div<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 0
+ && exact_log2 (INTVAL (operands[2])) >= 0)
+ ;
+ else if (TARGET_POWERPC)
+ {
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_divsi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ }
+ else if (TARGET_POWER)
+ FAIL;
+ else
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_quoss_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+}")
+
+(define_insn "divsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "divw %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn "*div<mode>3_no_mq"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "div<wd> %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_expand "mod<mode>3"
+ [(use (match_operand:GPR 0 "gpc_reg_operand" ""))
+ (use (match_operand:GPR 1 "gpc_reg_operand" ""))
+ (use (match_operand:GPR 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ int i;
+ rtx temp1;
+ rtx temp2;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 0
+ || (i = exact_log2 (INTVAL (operands[2]))) < 0)
+ FAIL;
+
+ temp1 = gen_reg_rtx (<MODE>mode);
+ temp2 = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_div<mode>3 (temp1, operands[1], operands[2]));
+ emit_insn (gen_ashl<mode>3 (temp2, temp1, GEN_INT (i)));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], temp2));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "exact_log2_cint_operand" "N")))]
+ ""
+ "{srai|sra<wd>i} %0,%1,%p2\;{aze|addze} %0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 "=r,r"))]
+ ""
+ "@
+ {srai|sra<wd>i} %3,%1,%p2\;{aze.|addze.} %3,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "exact_log2_cint_operand"
+ ""))
+ (const_int 0)))
+ (clobber (match_scratch:GPR 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (div:<MODE> (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (div:P (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {srai|sra<wd>i} %0,%1,%p2\;{aze.|addze.} %0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "exact_log2_cint_operand"
+ ""))
+ (const_int 0)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (div:GPR (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (div:<MODE> (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI
+ (plus:DI (ashift:DI
+ (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (const_int 32))
+ (zero_extend:DI (match_operand:SI 4 "register_operand" "2")))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (set (match_operand:SI 2 "register_operand" "=*q")
+ (umod:SI
+ (plus:DI (ashift:DI
+ (zero_extend:DI (match_dup 1)) (const_int 32))
+ (zero_extend:DI (match_dup 4)))
+ (match_dup 3)))]
+ "TARGET_POWER"
+ "div %0,%1,%3"
+ [(set_attr "type" "idiv")])
+
+;; To do unsigned divide we handle the cases of the divisor looking like a
+;; negative number. If it is a constant that is less than 2**31, we don't
+;; have to worry about the branches. So make a few subroutines here.
+;;
+;; First comes the normal case.
+(define_expand "udivmodsi4_normal"
+ [(set (match_dup 4) (const_int 0))
+ (parallel [(set (match_operand:SI 0 "" "")
+ (udiv:SI (plus:DI (ashift:DI (zero_extend:DI (match_dup 4))
+ (const_int 32))
+ (zero_extend:DI (match_operand:SI 1 "" "")))
+ (match_operand:SI 2 "" "")))
+ (set (match_operand:SI 3 "" "")
+ (umod:SI (plus:DI (ashift:DI (zero_extend:DI (match_dup 4))
+ (const_int 32))
+ (zero_extend:DI (match_dup 1)))
+ (match_dup 2)))])]
+ "TARGET_POWER"
+ "
+{ operands[4] = gen_reg_rtx (SImode); }")
+
+;; This handles the branches.
+(define_expand "udivmodsi4_tests"
+ [(set (match_operand:SI 0 "" "") (const_int 0))
+ (set (match_operand:SI 3 "" "") (match_operand:SI 1 "" ""))
+ (set (match_dup 5) (compare:CCUNS (match_dup 1) (match_operand:SI 2 "" "")))
+ (set (pc) (if_then_else (ltu (match_dup 5) (const_int 0))
+ (label_ref (match_operand:SI 4 "" "")) (pc)))
+ (set (match_dup 0) (const_int 1))
+ (set (match_dup 3) (minus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 6) (compare:CC (match_dup 2) (const_int 0)))
+ (set (pc) (if_then_else (lt (match_dup 6) (const_int 0))
+ (label_ref (match_dup 4)) (pc)))]
+ "TARGET_POWER"
+ "
+{ operands[5] = gen_reg_rtx (CCUNSmode);
+ operands[6] = gen_reg_rtx (CCmode);
+}")
+
+(define_expand "udivmodsi4"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))])]
+ ""
+ "
+{
+ rtx label = 0;
+
+ if (! TARGET_POWER)
+ {
+ if (! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_divus_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, 4));
+ DONE;
+ }
+ else
+ FAIL;
+ }
+
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) < 0)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ label = gen_label_rtx ();
+ emit (gen_udivmodsi4_tests (operands[0], operands[1], operands[2],
+ operands[3], label));
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit (gen_udivmodsi4_normal (operands[0], operands[1], operands[2],
+ operands[3]));
+ if (label)
+ emit_label (label);
+
+ DONE;
+}")
+
+;; AIX architecture-independent common-mode multiply (DImode),
+;; divide/modulus, and quotient subroutine calls. Input operands in R3 and
+;; R4; results in R3 and sometimes R4; link register always clobbered by bla
+;; instruction; R0 sometimes clobbered; also, MQ sometimes clobbered but
+;; assumed unused if generating common-mode, so ignore.
+(define_insn "mulh_call"
+ [(set (reg:SI 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (reg:SI 3))
+ (sign_extend:DI (reg:SI 4)))
+ (const_int 32))))
+ (clobber (match_scratch:SI 0 "=l"))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __mulh"
+ [(set_attr "type" "imul")])
+
+(define_insn "mull_call"
+ [(set (reg:DI 3)
+ (mult:DI (sign_extend:DI (reg:SI 3))
+ (sign_extend:DI (reg:SI 4))))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __mull"
+ [(set_attr "type" "imul")])
+
+(define_insn "divss_call"
+ [(set (reg:SI 3)
+ (div:SI (reg:SI 3) (reg:SI 4)))
+ (set (reg:SI 4)
+ (mod:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __divss"
+ [(set_attr "type" "idiv")])
+
+(define_insn "divus_call"
+ [(set (reg:SI 3)
+ (udiv:SI (reg:SI 3) (reg:SI 4)))
+ (set (reg:SI 4)
+ (umod:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))
+ (clobber (match_scratch:CC 1 "=x"))
+ (clobber (reg:CC 69))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __divus"
+ [(set_attr "type" "idiv")])
+
+(define_insn "quoss_call"
+ [(set (reg:SI 3)
+ (div:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __quoss"
+ [(set_attr "type" "idiv")])
+
+(define_insn "quous_call"
+ [(set (reg:SI 3)
+ (udiv:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))
+ (clobber (match_scratch:CC 1 "=x"))
+ (clobber (reg:CC 69))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __quous"
+ [(set_attr "type" "idiv")])
+
+;; Logical instructions
+;; The logical instructions are mostly combined by using match_operator,
+;; but the plain AND insns are somewhat different because there is no
+;; plain 'andi' (only 'andi.'), no plain 'andis', and there are all
+;; those rotate-and-mask operations. Thus, the AND insns come first.
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "and_operand" "?r,T,K,L")))
+ (clobber (match_scratch:CC 3 "=X,X,x,x"))]
+ ""
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2"
+ [(set_attr "type" "*,*,compare,compare")])
+
+;; Note to set cr's other than cr0 we do the and immediate and then
+;; the test again -- this avoids a mfcr which on the higher end
+;; machines causes an execution serialization
+
+(define_insn "*andsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_32BIT"
+ "@
+ and. %3,%1,%2
+ {andil.|andi.} %3,%1,%b2
+ {andiu.|andis.} %3,%1,%u2
+ {rlinm.|rlwinm.} %3,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_insn "*andsi3_internal3"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT"
+ "@
+ #
+ {andil.|andi.} %3,%1,%b2
+ {andiu.|andis.} %3,%1,%u2
+ {rlinm.|rlwinm.} %3,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "8,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "and_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:GPR 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (and:<MODE> (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+;; We don't have a 32 bit "and. rt,ra,rb" for ppc64. cr is set from the
+;; whole 64 bit reg, and we don't know what is in the high 32 bits.
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*andsi3_internal4"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_32BIT"
+ "@
+ and. %0,%1,%2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2
+ {rlinm.|rlwinm.} %0,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_insn "*andsi3_internal5"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT"
+ "@
+ #
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2
+ {rlinm.|rlwinm.} %0,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "8,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "and_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Handle the PowerPC64 rlwinm corner case
+
+(define_insn_and_split "*andsi3_internal6"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "mask_operand_wrap" "i")))]
+ "TARGET_POWERPC64"
+ "#"
+ "TARGET_POWERPC64"
+ [(set (match_dup 0)
+ (and:SI (rotate:SI (match_dup 1) (match_dup 3))
+ (match_dup 4)))
+ (set (match_dup 0)
+ (rotate:SI (match_dup 0) (match_dup 5)))]
+ "
+{
+ int mb = extract_MB (operands[2]);
+ int me = extract_ME (operands[2]);
+ operands[3] = GEN_INT (me + 1);
+ operands[5] = GEN_INT (32 - (me + 1));
+ operands[4] = GEN_INT (~((HOST_WIDE_INT) -1 << (33 + me - mb)));
+}"
+ [(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_logical_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! logical_operand (operands[2], SImode))
+ {
+ HOST_WIDE_INT value = INTVAL (operands[2]);
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ emit_insn (gen_iorsi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_iorsi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_logical_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! logical_operand (operands[2], SImode))
+ {
+ HOST_WIDE_INT value = INTVAL (operands[2]);
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ emit_insn (gen_xorsi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_xorsi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_insn "*boolsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (match_operator:SI 3 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r,r")
+ (match_operand:SI 2 "logical_operand" "r,K,L")]))]
+ ""
+ "@
+ %q3 %0,%1,%2
+ {%q3il|%q3i} %0,%1,%b2
+ {%q3iu|%q3is} %0,%1,%u2")
+
+(define_insn "*boolsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_32BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_32BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split a logical operation that we can't do in one insn into two insns,
+;; each of which does one 16-bit part. This is used by combine.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operator:SI 3 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "non_logical_cint_operand" "")]))]
+ ""
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+"
+{
+ rtx i;
+ i = GEN_INT (INTVAL (operands[2]) & (~ (HOST_WIDE_INT) 0xffff));
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[1], i);
+ i = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ operands[5] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[0], i);
+}")
+
+(define_insn "*boolcsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 3 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r")]))]
+ ""
+ "%q3 %0,%2,%1")
+
+(define_insn "*boolcsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_32BIT"
+ "@
+ %q4. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolcsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_32BIT"
+ "@
+ %q4. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 3 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))]))]
+ ""
+ "%q3 %0,%1,%2")
+
+(define_insn "*boolccsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_32BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_32BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; maskir insn. We need four forms because things might be in arbitrary
+;; orders. Don't define forms that only set CR fields because these
+;; would modify an input register.
+
+(define_insn "*maskir_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "r"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_dup 2))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand:SI 3 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal4"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal5"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0"))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0"))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "r,r")
+ (match_dup 2)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_dup 2)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal7"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "r,r")
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal8"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Rotate and shift insns, in all their variants. These support shifts,
+;; field inserts and extracts, and various combinations thereof.
+(define_expand "insv"
+ [(set (zero_extract (match_operand 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand 3 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ /* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
+ the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
+ compiler if the address of the structure is taken later. Likewise, do
+ not handle invalid E500 subregs. */
+ if (GET_CODE (operands[0]) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD
+ || ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && invalid_e500_subreg (operands[0], GET_MODE (operands[0])))))
+ FAIL;
+
+ if (TARGET_POWERPC64 && GET_MODE (operands[0]) == DImode)
+ emit_insn (gen_insvdi (operands[0], operands[1], operands[2], operands[3]));
+ else
+ emit_insn (gen_insvsi (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "insvsi"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "gpc_reg_operand" "r"))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal1"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (rotate:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal2"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal3"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal4"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (zero_extract:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")
+ (match_operand:SI 5 "const_int_operand" "i")))]
+ "INTVAL (operands[4]) >= INTVAL (operands[1])"
+ "*
+{
+ int extract_start = INTVAL (operands[5]) & 31;
+ int extract_size = INTVAL (operands[4]) & 31;
+ int insert_start = INTVAL (operands[2]) & 31;
+ int insert_size = INTVAL (operands[1]) & 31;
+
+/* Align extract field with insert field */
+ operands[5] = GEN_INT (extract_start + extract_size - insert_start - insert_size);
+ operands[1] = GEN_INT (insert_start + insert_size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h5,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+;; combine patterns for rlwimi
+(define_insn "*insvsi_internal5"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (match_operand:SI 1 "mask_operand" "i"))
+ (and:SI (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 5 "mask_operand" "i"))))]
+ "TARGET_POWERPC && INTVAL(operands[1]) == ~INTVAL(operands[5])"
+ "*
+{
+ int me = extract_ME(operands[5]);
+ int mb = extract_MB(operands[5]);
+ operands[4] = GEN_INT(32 - INTVAL(operands[2]));
+ operands[2] = GEN_INT(mb);
+ operands[1] = GEN_INT(me);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal6"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 5 "mask_operand" "i"))
+ (and:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (match_operand:SI 1 "mask_operand" "i"))))]
+ "TARGET_POWERPC && INTVAL(operands[1]) == ~INTVAL(operands[5])"
+ "*
+{
+ int me = extract_ME(operands[5]);
+ int mb = extract_MB(operands[5]);
+ operands[4] = GEN_INT(32 - INTVAL(operands[2]));
+ operands[2] = GEN_INT(mb);
+ operands[1] = GEN_INT(me);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "insvdi"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "gpc_reg_operand" "r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[2]) & 63;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[1] = GEN_INT (64 - start - size);
+ return \"rldimi %0,%3,%H1,%H2\";
+}")
+
+(define_insn "*insvdi_internal2"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashiftrt:DI (match_operand:DI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "TARGET_POWERPC64
+ && insvdi_rshift_rlwimi_p (operands[1], operands[2], operands[4])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 63;
+ int start = (INTVAL (operands[2]) & 63) - 32;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[4] = GEN_INT (64 - shift - start - size);
+ operands[2] = GEN_INT (start);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"rlwimi %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvdi_internal3"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (lshiftrt:DI (match_operand:DI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "TARGET_POWERPC64
+ && insvdi_rshift_rlwimi_p (operands[1], operands[2], operands[4])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 63;
+ int start = (INTVAL (operands[2]) & 63) - 32;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[4] = GEN_INT (64 - shift - start - size);
+ operands[2] = GEN_INT (start);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"rlwimi %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_expand "extzv"
+ [(set (match_operand 0 "gpc_reg_operand" "")
+ (zero_extract (match_operand 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ /* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
+ the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
+ compiler if the address of the structure is taken later. */
+ if (GET_CODE (operands[0]) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD))
+ FAIL;
+
+ if (TARGET_POWERPC64 && GET_MODE (operands[1]) == DImode)
+ emit_insn (gen_extzvdi (operands[0], operands[1], operands[2], operands[3]));
+ else
+ emit_insn (gen_extzvsi (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "extzvsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm|rlwinm} %0,%1,%3,%s2,31\";
+}")
+
+(define_insn "*extzvsi_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ /* If the bit-field being tested fits in the upper or lower half of a
+ word, it is possible to use andiu. or andil. to test it. This is
+ useful because the condition register set-use delay is smaller for
+ andi[ul]. than for rlinm. This doesn't work when the starting bit
+ position is 0 because the LT and GT bits may be set wrong. */
+
+ if ((start > 0 && start + size <= 16) || start >= 16)
+ {
+ operands[3] = GEN_INT (((1 << (16 - (start & 15)))
+ - (1 << (16 - (start & 15) - size))));
+ if (start < 16)
+ return \"{andiu.|andis.} %4,%1,%3\";
+ else
+ return \"{andil.|andi.} %4,%1,%3\";
+ }
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm.|rlwinm.} %4,%1,%3,%s2,31\";
+}"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "reload_completed"
+ [(set (match_dup 4)
+ (zero_extract:SI (match_dup 1) (match_dup 2)
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*extzvsi_internal2"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ /* Since we are using the output value, we can't ignore any need for
+ a shift. The bit-field must end at the LSB. */
+ if (start >= 16 && start + size == 32)
+ {
+ operands[3] = GEN_INT ((1 << size) - 1);
+ return \"{andil.|andi.} %0,%1,%3\";
+ }
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm.|rlwinm.} %0,%1,%3,%s2,31\";
+}"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "extzvdi"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl %0,%1,%3,%2\";
+}")
+
+(define_insn "*extzvdi_internal1"
+ [(set (match_operand:CC 0 "gpc_reg_operand" "=x")
+ (compare:CC (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r"))]
+ "TARGET_64BIT"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl. %4,%1,%3,%2\";
+}"
+ [(set_attr "type" "compare")])
+
+(define_insn "*extzvdi_internal2"
+ [(set (match_operand:CC 4 "gpc_reg_operand" "=x")
+ (compare:CC (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extract:DI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "TARGET_64BIT"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl. %0,%1,%3,%2\";
+}"
+ [(set_attr "type" "compare")])
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xffffffff")
+
+(define_insn "*rotlsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %3,%1,%h2,0xffffffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (rotate:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (rotate:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,0xffffffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (rotate:SI (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (rotate:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal4"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri"))
+ (match_operand:SI 3 "mask_operand" "n")))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,%m3,%M3")
+
+(define_insn "*rotlsi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %4,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "reload_completed"
+ [(set (match_dup 4)
+ (and:SI (rotate:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal7"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")) 0)))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xff")
+
+(define_insn "*rotlsi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %3,%1,%h2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:QI
+ (rotate:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal9"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal10"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")) 0)))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xffff")
+
+(define_insn "*rotlsi3_internal11"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %3,%1,%h2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:HI
+ (rotate:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal12"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Note that we use "sle." instead of "sl." so that we can set
+;; SHIFT_COUNT_TRUNCATED.
+
+(define_expand "ashlsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_ashlsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_ashlsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "ashlsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))
+ (clobber (match_scratch:SI 3 "=q,X"))]
+ "TARGET_POWER"
+ "@
+ sle %0,%1,%2
+ {sli|slwi} %0,%1,%h2")
+
+(define_insn "ashlsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "! TARGET_POWER"
+ "{sl|slw}%I2 %0,%1,%h2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ sle. %3,%1,%2
+ {sli.|slwi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ {sl|slw}%I2. %3,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ sle. %0,%1,%2
+ {sli.|slwi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ {sl|slw}%I2. %0,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "rlwinm"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "mask_operand" "n")))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "{rlinm|rlwinm} %0,%1,%h2,%m3,%M3")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %4,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "includes_lshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_lshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; The AIX assembler mis-handles "sri x,x,0", so write that case as
+;; "sli x,x,0".
+(define_expand "lshrsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_lshrsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_lshrsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "lshrsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i")))
+ (clobber (match_scratch:SI 3 "=q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre %0,%1,%2
+ mr %0,%1
+ {s%A2i|s%A2wi} %0,%1,%h2")
+
+(define_insn "lshrsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,ri")))]
+ "! TARGET_POWER"
+ "@
+ mr %0,%1
+ {sr|srw}%I2 %0,%1,%h2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i,r,O,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,X,r,r,X,r"))
+ (clobber (match_scratch:SI 4 "=q,X,X,q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre. %3,%1,%2
+ mr. %1,%1
+ {s%A2i.|s%A2wi.} %3,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,ri,O,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r,X,r"))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ mr. %1,%1
+ {sr|srw}%I2. %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i,r,O,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,X,q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre. %0,%1,%2
+ mr. %0,%1
+ {s%A2i.|s%A2wi.} %0,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,ri,O,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ mr. %0,%1
+ {sr|srw}%I2. %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "mask_operand" "n")))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "{rlinm|rlwinm} %0,%1,%s2,%m3,%M3")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %4,%1,%s2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "includes_rshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_rshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "{rlinm|rlwinm} %0,%1,%s2,0xff")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "@
+ {rlinm.|rlwinm.} %3,%1,%s2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "includes_rshift_p (operands[2], GEN_INT (255)) && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:QI
+ (lshiftrt:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255)) && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "{rlinm|rlwinm} %0,%1,%s2,0xffff")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "@
+ {rlinm.|rlwinm.} %3,%1,%s2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "includes_rshift_p (operands[2], GEN_INT (65535)) && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:HI
+ (lshiftrt:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535)) && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (ashiftrt:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 31)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (lshiftrt:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 31)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (zero_extract:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 1)
+ (const_int 0)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_ashrsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_ashrsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "ashrsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))
+ (clobber (match_scratch:SI 3 "=q,X"))]
+ "TARGET_POWER"
+ "@
+ srea %0,%1,%2
+ {srai|srawi} %0,%1,%h2")
+
+(define_insn "ashrsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "! TARGET_POWER"
+ "{sra|sraw}%I2 %0,%1,%h2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ srea. %3,%1,%2
+ {srai.|srawi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER"
+ "@
+ {sra|sraw}%I2. %3,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ srea. %0,%1,%2
+ {srai.|srawi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER"
+ "@
+ {sra|sraw}%I2. %0,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Floating-point insns, excluding normal data motion.
+;;
+;; PowerPC has a full set of single-precision floating point instructions.
+;;
+;; For the POWER architecture, we pretend that we have both SFmode and
+;; DFmode insns, while, in fact, all fp insns are actually done in double.
+;; The only conversions we will do will be when storing to memory. In that
+;; case, we will use the "frsp" instruction before storing.
+;;
+;; Note that when we store into a single-precision memory location, we need to
+;; use the frsp insn first. If the register being stored isn't dead, we
+;; need a scratch register for the frsp. But this is difficult when the store
+;; is done by reload. It is not incorrect to do the frsp on the register in
+;; this case, we just lose precision that we would have otherwise gotten but
+;; is not guaranteed. Perhaps this should be tightened up at some point.
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_extend:DF (match_operand:SF 1 "reg_or_none500mem_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn_and_split "*extendsfdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f,f")
+ (float_extend:DF (match_operand:SF 1 "reg_or_mem_operand" "0,f,m")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "@
+ #
+ fmr %0,%1
+ lfs%U1%X1 %0,%1"
+ "&& reload_completed && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "type" "fp,fp,fpload")])
+
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*truncdfsf2_fpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "aux_truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRSP))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "negsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "*negsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "abssf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn "*abssf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (abs:SF (match_operand:SF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fadds %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fa|fadd} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsubs %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fs|fsub} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fmuls %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fm|fmul} %0,%1,%2"
+ [(set_attr "type" "dmul")])
+
+(define_insn "fres"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_PPC_GFXOPT && flag_finite_math_only"
+ "fres %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+{
+ if (swdiv && !optimize_size && TARGET_PPC_GFXOPT
+ && flag_finite_math_only && !flag_trapping_math)
+ {
+ rs6000_emit_swdivsf (operands[0], operands[1], operands[2]);
+ DONE;
+ }
+})
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fdivs %0,%1,%2"
+ [(set_attr "type" "sdiv")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fd|fdiv} %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "fmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fma|fmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "fmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fms|fmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (SFmode)"
+ "fnmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "fnmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+;; APPLE LOCAL do this even if honoring siged zeros
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (SFmode)"
+ "fnmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))))]
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "fnmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))))]
+;; APPLE LOCAL do this even if honoring siged zeros
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_expand "sqrtsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "(TARGET_PPC_GPOPT || TARGET_POWER2) && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GPOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsqrts %0,%1"
+ [(set_attr "type" "ssqrt")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_POWER2 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsqrt %0,%1"
+ [(set_attr "type" "dsqrt")])
+
+(define_expand "copysignsf3"
+ [(set (match_dup 3)
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "")))
+ (set (match_dup 4)
+ (neg:SF (abs:SF (match_dup 1))))
+ (set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_dup 5))
+ (match_dup 3)
+ (match_dup 4)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && !HONOR_NANS (SFmode) && !HONOR_SIGNED_ZEROS (SFmode)"
+ {
+ operands[3] = gen_reg_rtx (SFmode);
+ operands[4] = gen_reg_rtx (SFmode);
+ operands[5] = CONST0_RTX (SFmode);
+ })
+
+(define_expand "copysigndf3"
+ [(set (match_dup 3)
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "")))
+ (set (match_dup 4)
+ (neg:DF (abs:DF (match_dup 1))))
+ (set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_dup 5))
+ (match_dup 3)
+ (match_dup 4)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && !HONOR_NANS (DFmode) && !HONOR_SIGNED_ZEROS (DFmode)"
+ {
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = CONST0_RTX (DFmode);
+ })
+
+;; For MIN, MAX, and conditional move, we use DEFINE_EXPAND's that involve a
+;; fsel instruction and some auxiliary computations. Then we just have a
+;; single DEFINE_INSN for fsel and the define_splits to make them if made by
+;; combine.
+(define_expand "smaxsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" ""))
+ (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
+
+(define_expand "sminsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" ""))
+ (match_dup 2)
+ (match_dup 1)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
+
+(define_split
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operator:SF 3 "min_max_operator"
+ [(match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")]))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
+ [(const_int 0)]
+ "
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+ operands[1], operands[2]);
+ DONE;
+}")
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_operand:SI 3 "gpc_reg_operand" "")))]
+ "TARGET_ISEL"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; We use the BASE_REGS for the isel input operands because, if rA is
+;; 0, the value of 0 is placed in rD upon truth. Similarly for rB
+;; because we may switch the operands and rB may end up being rA.
+;;
+;; We need 2 patterns: an unsigned and a signed pattern. We could
+;; leave out the mode in operand 4 and use one pattern, but reload can
+;; change the mode underneath our feet and then gets confused trying
+;; to reload the value.
+(define_insn "isel_signed"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:CC 4 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 2 "gpc_reg_operand" "b")
+ (match_operand:SI 3 "gpc_reg_operand" "b")))]
+ "TARGET_ISEL"
+ "*
+{ return output_isel (operands); }"
+ [(set_attr "length" "4")])
+
+(define_insn "isel_unsigned"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:CCUNS 4 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 2 "gpc_reg_operand" "b")
+ (match_operand:SI 3 "gpc_reg_operand" "b")))]
+ "TARGET_ISEL"
+ "*
+{ return output_isel (operands); }"
+ [(set_attr "length" "4")])
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_operand:SF 3 "gpc_reg_operand" "")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*fselsfsf4"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (match_operand:SF 2 "gpc_reg_operand" "f")
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fseldfsf4"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:SF (ge (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 4 "zero_fp_constant" "F"))
+ (match_operand:SF 2 "gpc_reg_operand" "f")
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*negdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*absdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "*nabsdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*adddf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fa|fadd} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*subdf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fs|fsub} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*muldf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fm|fmul} %0,%1,%2"
+ [(set_attr "type" "dmul")])
+
+(define_insn "fred"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_POPCNTB && flag_finite_math_only"
+ "fre %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+{
+ if (swdiv && !optimize_size && TARGET_POPCNTB
+ && flag_finite_math_only && !flag_trapping_math)
+ {
+ rs6000_emit_swdivdf (operands[0], operands[1], operands[2]);
+ DONE;
+ }
+})
+
+(define_insn "*divdf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fd|fdiv} %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fma|fmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fms|fmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (DFmode)"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (mult:DF (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f"))
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (DFmode)"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (match_operand:DF 3 "gpc_reg_operand" "f")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))))]
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "(TARGET_PPC_GPOPT || TARGET_POWER2) && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsqrt %0,%1"
+ [(set_attr "type" "dsqrt")])
+
+;; The conditional move instructions allow us to perform max and min
+;; operations even when
+
+(define_expand "smaxdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" ""))
+ (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
+
+(define_expand "smindf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" ""))
+ (match_dup 2)
+ (match_dup 1)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operator:DF 3 "min_max_operator"
+ [(match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")]))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && !flag_trapping_math"
+ [(const_int 0)]
+ "
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+ operands[1], operands[2]);
+ DONE;
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_operand:DF 3 "gpc_reg_operand" "")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*fseldfdf4"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 4 "zero_fp_constant" "F"))
+ (match_operand:DF 2 "gpc_reg_operand" "f")
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fselsfdf4"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (if_then_else:DF (ge (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (match_operand:DF 2 "gpc_reg_operand" "f")
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+;; Conversions to and from floating-point.
+
+(define_expand "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unsigned_fix:SI (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "")
+
+(define_expand "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "")
+
+; For each of these conversions, there is a define_expand, a define_insn
+; with a '#' template, and a define_split (with C code). The idea is
+; to allow constant folding with the template of the define_insn,
+; then to have the insns split later (between sched1 and final).
+
+(define_expand "floatsidf2"
+ [(parallel [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))])]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_floatsidf2 (operands[0], operands[1]));
+ DONE;
+ }
+ if (TARGET_POWERPC64)
+ {
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
+ rtx t1 = gen_reg_rtx (DImode);
+ rtx t2 = gen_reg_rtx (DImode);
+ emit_insn (gen_floatsidf_ppc64 (operands[0], operands[1], mem, t1, t2));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, GEN_INT (0x43300000));
+ operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503601774854144\", DFmode));
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp to get
+ * better scheduling, at the cost of some stack space. */
+ operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode),
+ GET_MODE_ALIGNMENT (DFmode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
+ operands[5] = gen_reg_rtx (DFmode);
+ operands[6] = gen_reg_rtx (SImode);
+}")
+
+(define_insn_and_split "*floatsidf2_internal"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (use (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (use (match_operand:DF 3 "gpc_reg_operand" "f"))
+ (clobber (match_operand:DF 4 "memory_operand" "=o"))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" "=&f"))
+ (clobber (match_operand:SI 6 "gpc_reg_operand" "=&r"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "#"
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[4]))"
+ [(pc)]
+ "
+{
+ rtx lowword, highword;
+ gcc_assert (MEM_P (operands[4]));
+ highword = adjust_address (operands[4], SImode, 0);
+ lowword = adjust_address (operands[4], SImode, 4);
+ if (! WORDS_BIG_ENDIAN)
+ {
+ rtx tmp;
+ tmp = highword; highword = lowword; lowword = tmp;
+ }
+
+ emit_insn (gen_xorsi3 (operands[6], operands[1],
+ GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff)));
+ emit_move_insn (lowword, operands[6]);
+ emit_move_insn (highword, operands[2]);
+ emit_move_insn (operands[5], operands[4]);
+ emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
+ DONE;
+}"
+ [(set_attr "length" "24")])
+
+(define_expand "floatunssisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (unsigned_float:SF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "")
+
+(define_expand "floatunssidf2"
+ [(parallel [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))])]
+ "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_floatunssidf2 (operands[0], operands[1]));
+ DONE;
+ }
+ if (TARGET_POWERPC64)
+ {
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
+ rtx t1 = gen_reg_rtx (DImode);
+ rtx t2 = gen_reg_rtx (DImode);
+ emit_insn (gen_floatunssidf_ppc64 (operands[0], operands[1], mem,
+ t1, t2));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, GEN_INT (0x43300000));
+ operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503599627370496\", DFmode));
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode),
+ GET_MODE_ALIGNMENT (DFmode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
+ operands[5] = gen_reg_rtx (DFmode);
+}")
+
+(define_insn_and_split "*floatunssidf2_internal"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (use (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (use (match_operand:DF 3 "gpc_reg_operand" "f"))
+ (clobber (match_operand:DF 4 "memory_operand" "=o"))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" "=&f"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "#"
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[4]))"
+ [(pc)]
+ "
+{
+ rtx lowword, highword;
+ gcc_assert (MEM_P (operands[4]));
+ highword = adjust_address (operands[4], SImode, 0);
+ lowword = adjust_address (operands[4], SImode, 4);
+ if (! WORDS_BIG_ENDIAN)
+ {
+ rtx tmp;
+ tmp = highword; highword = lowword; lowword = tmp;
+ }
+
+ emit_move_insn (lowword, operands[1]);
+ emit_move_insn (highword, operands[2]);
+ emit_move_insn (operands[5], operands[4]);
+ emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
+ DONE;
+}"
+ [(set_attr "length" "20")])
+
+(define_expand "fix_truncdfsi2"
+ [(parallel [(set (match_operand:SI 0 "fix_trunc_dest_operand" "")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))])]
+ "(TARGET_POWER2 || TARGET_POWERPC)
+ && TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_fix_truncdfsi2 (operands[0], operands[1]));
+ DONE;
+ }
+ operands[2] = gen_reg_rtx (DImode);
+ if (TARGET_PPC_GFXOPT)
+ {
+ rtx orig_dest = operands[0];
+ if (! memory_operand (orig_dest, GET_MODE (orig_dest)))
+ operands[0] = assign_stack_temp (SImode, GET_MODE_SIZE (SImode), 0);
+ emit_insn (gen_fix_truncdfsi2_internal_gfxopt (operands[0], operands[1],
+ operands[2]));
+ if (operands[0] != orig_dest)
+ emit_move_insn (orig_dest, operands[0]);
+ DONE;
+ }
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ operands[3] = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
+}")
+
+(define_insn_and_split "*fix_truncdfsi2_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DI 3 "memory_operand" "=o"))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "#"
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[3]))"
+ [(pc)]
+ "
+{
+ rtx lowword;
+ gcc_assert (MEM_P (operands[3]));
+ lowword = adjust_address (operands[3], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
+
+ emit_insn (gen_fctiwz (operands[2], operands[1]));
+ emit_move_insn (operands[3], operands[2]);
+ emit_move_insn (operands[0], lowword);
+ DONE;
+}"
+ [(set_attr "length" "16")])
+
+(define_insn_and_split "fix_truncdfsi2_internal_gfxopt"
+ [(set (match_operand:SI 0 "memory_operand" "=Z")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_PPC_GFXOPT"
+ "#"
+ "&& 1"
+ [(pc)]
+ "
+{
+ emit_insn (gen_fctiwz (operands[2], operands[1]));
+ emit_insn (gen_stfiwx (operands[0], operands[2]));
+ DONE;
+}"
+ [(set_attr "length" "16")])
+
+; Here, we use (set (reg) (unspec:DI [(fix:SI ...)] UNSPEC_FCTIWZ))
+; rather than (set (subreg:SI (reg)) (fix:SI ...))
+; because the first makes it clear that operand 0 is not live
+; before the instruction.
+(define_insn "fctiwz"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
+ (unspec:DI [(fix:SI (match_operand:DF 1 "gpc_reg_operand" "f"))]
+ UNSPEC_FCTIWZ))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fcirz|fctiwz} %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "btruncdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "friz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "btruncsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "friz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "ceildf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frip %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "ceilsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frip %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floordf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frim %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floorsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frim %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "rounddf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frin %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "roundsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frin %0,%1"
+ [(set_attr "type" "fp")])
+
+; An UNSPEC is used so we don't have to support SImode in FP registers.
+(define_insn "stfiwx"
+ [(set (match_operand:SI 0 "memory_operand" "=Z")
+ (unspec:SI [(match_operand:DI 1 "gpc_reg_operand" "f")]
+ UNSPEC_STFIWX))]
+ "TARGET_PPC_GFXOPT"
+ "stfiwx %1,%y0"
+ [(set_attr "type" "fpstore")])
+
+(define_expand "floatsisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float:SF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "")
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float:DF (match_operand:DI 1 "gpc_reg_operand" "*f")))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fcfid %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn_and_split "floatsidf_ppc64"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (clobber (match_operand:DI 2 "memory_operand" "=o"))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" "=r"))
+ (clobber (match_operand:DI 4 "gpc_reg_operand" "=f"))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "#"
+ "&& 1"
+ [(set (match_dup 3) (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 2))
+ (set (match_dup 0) (float:DF (match_dup 4)))]
+ "")
+
+(define_insn_and_split "floatunssidf_ppc64"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (clobber (match_operand:DI 2 "memory_operand" "=o"))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" "=r"))
+ (clobber (match_operand:DI 4 "gpc_reg_operand" "=f"))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "#"
+ "&& 1"
+ [(set (match_dup 3) (zero_extend:DI (match_dup 1)))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 2))
+ (set (match_dup 0) (float:DF (match_dup 4)))]
+ "")
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=*f")
+ (fix:DI (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fctidz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "floatdisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float:SF (match_operand:DI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "
+{
+ rtx val = operands[1];
+ if (!flag_unsafe_math_optimizations)
+ {
+ rtx label = gen_label_rtx ();
+ val = gen_reg_rtx (DImode);
+ emit_insn (gen_floatdisf2_internal2 (val, operands[1], label));
+ emit_label (label);
+ }
+ emit_insn (gen_floatdisf2_internal1 (operands[0], val));
+ DONE;
+}")
+
+;; This is not IEEE compliant if rounding mode is "round to nearest".
+;; If the DI->DF conversion is inexact, then it's possible to suffer
+;; from double rounding.
+(define_insn_and_split "floatdisf2_internal1"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float:SF (match_operand:DI 1 "gpc_reg_operand" "*f")))
+ (clobber (match_scratch:DF 2 "=f"))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (float:DF (match_dup 1)))
+ (set (match_dup 0)
+ (float_truncate:SF (match_dup 2)))]
+ "")
+
+;; Twiddles bits to avoid double rounding.
+;; Bits that might be truncated when converting to DFmode are replaced
+;; by a bit that won't be lost at that stage, but is below the SFmode
+;; rounding position.
+(define_expand "floatdisf2_internal2"
+ [(set (match_dup 3) (ashiftrt:DI (match_operand:DI 1 "" "")
+ (const_int 53)))
+ (parallel [(set (match_operand:DI 0 "" "") (and:DI (match_dup 1)
+ (const_int 2047)))
+ (clobber (scratch:CC))])
+ (set (match_dup 3) (plus:DI (match_dup 3)
+ (const_int 1)))
+ (set (match_dup 0) (plus:DI (match_dup 0)
+ (const_int 2047)))
+ (set (match_dup 4) (compare:CCUNS (match_dup 3)
+ (const_int 2)))
+ (set (match_dup 0) (ior:DI (match_dup 0)
+ (match_dup 1)))
+ (parallel [(set (match_dup 0) (and:DI (match_dup 0)
+ (const_int -2048)))
+ (clobber (scratch:CC))])
+ (set (pc) (if_then_else (geu (match_dup 4) (const_int 0))
+ (label_ref (match_operand:DI 2 "" ""))
+ (pc)))
+ (set (match_dup 0) (match_dup 1))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "
+{
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (CCUNSmode);
+}")
+
+;; Define the DImode operations that can be done in a small number
+;; of instructions. The & constraints are to prevent the register
+;; allocator from allocating registers that overlap with the inputs
+;; (for example, having an input in 7,8 and an output in 6,7). We
+;; also allow for the output being the same as one of the inputs.
+
+(define_insn "*adddi3_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,0,0")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (WORDS_BIG_ENDIAN)
+ return (GET_CODE (operands[2])) != CONST_INT
+ ? \"{a|addc} %L0,%L1,%L2\;{ae|adde} %0,%1,%2\"
+ : \"{ai|addic} %L0,%L1,%2\;{a%G2e|add%G2e} %0,%1\";
+ else
+ return (GET_CODE (operands[2])) != CONST_INT
+ ? \"{a|addc} %0,%1,%2\;{ae|adde} %L0,%L1,%L2\"
+ : \"{ai|addic} %0,%1,%2\;{a%G2e|add%G2e} %L0,%L1\";
+}"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi3_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r,r")
+ (minus:DI (match_operand:DI 1 "reg_or_short_operand" "r,I,0,r,I")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r,r,0,0")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (WORDS_BIG_ENDIAN)
+ return (GET_CODE (operands[1]) != CONST_INT)
+ ? \"{sf|subfc} %L0,%L2,%L1\;{sfe|subfe} %0,%2,%1\"
+ : \"{sfi|subfic} %L0,%L2,%1\;{sf%G1e|subf%G1e} %0,%2\";
+ else
+ return (GET_CODE (operands[1]) != CONST_INT)
+ ? \"{sf|subfc} %0,%2,%1\;{sfe|subfe} %L0,%L2,%L1\"
+ : \"{sfi|subfic} %0,%2,%1\;{sf%G1e|subf%G1e} %L0,%L2\";
+}"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*negdi2_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"{sfi|subfic} %L0,%L1,0\;{sfze|subfze} %0,%1\"
+ : \"{sfi|subfic} %0,%1,0\;{sfze|subfze} %L0,%L1\";
+}"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "! TARGET_POWERPC64"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_mull_call ());
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (operands[0], gen_rtx_REG (DImode, 3));
+ else
+ {
+ emit_move_insn (operand_subword (operands[0], 0, 0, DImode),
+ gen_rtx_REG (SImode, 3));
+ emit_move_insn (operand_subword (operands[0], 1, 0, DImode),
+ gen_rtx_REG (SImode, 4));
+ }
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_mulsidi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "mulsidi3_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWER"
+ "mul %0,%1,%2\;mfmq %L0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_insn "*mulsidi3_no_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_POWERPC && ! TARGET_POWER && ! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhw %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhw %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (const_int 32))))
+ (set (match_dup 4)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ operands[3] = operand_subword (operands[0], endian, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1 - endian, 0, DImode);
+}")
+
+(define_expand "umulsidi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64"
+ "
+{
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_umulsidi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsidi3_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhwu %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhwu %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_insn "*umulsidi3_no_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_POWERPC && ! TARGET_POWER && ! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhwu %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhwu %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
+ (zero_extend:DI (match_dup 2)))
+ (const_int 32))))
+ (set (match_dup 4)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ operands[3] = operand_subword (operands[0], endian, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1 - endian, 0, DImode);
+}")
+
+(define_expand "smulsi3_highpart"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ ""
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_mulh_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_smulsi3_highpart_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "smulsi3_highpart_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWER"
+ "mul %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "*smulsi3_highpart_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "mulhw %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_expand "umulsi3_highpart"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (const_int 32))))]
+ "TARGET_POWERPC"
+ "
+{
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_umulsi3_highpart_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsi3_highpart_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "mulhwu %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "*umulsi3_highpart_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "mulhwu %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+;; If operands 0 and 2 are in the same register, we have a problem. But
+;; operands 0 and 1 (the usual case) can be in the same register. That's
+;; why we have the strange constraints below.
+(define_insn "ashldi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,&r")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,0,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "M,i,r,r")))
+ (clobber (match_scratch:SI 3 "=X,q,q,q"))]
+ "TARGET_POWER"
+ "@
+ {sli|slwi} %0,%L1,%h2\;{cal %L0,0(0)|li %L0,0}
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2"
+ [(set_attr "length" "8")])
+
+(define_insn "lshrdi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,&r")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,0,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "M,i,r,r")))
+ (clobber (match_scratch:SI 3 "=X,q,q,q"))]
+ "TARGET_POWER"
+ "@
+ {s%A2i|s%A2wi} %L0,%1,%h2\;{cal %0,0(0)|li %0,0}
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2"
+ [(set_attr "length" "8")])
+
+;; Shift by a variable amount is too complex to be worth open-coding. We
+;; just handle shifts by constants.
+(define_insn "ashrdi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "M,i")))
+ (clobber (match_scratch:SI 3 "=X,q"))]
+ "TARGET_POWER"
+ "@
+ {srai|srawi} %0,%1,31\;{srai|srawi} %L0,%1,%h2
+ sraiq %0,%1,%h2\;srliq %L0,%L1,%h2"
+ [(set_attr "length" "8")])
+
+(define_insn "ashrdi3_no_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "M,i")))]
+ "TARGET_32BIT && !TARGET_POWERPC64 && !TARGET_POWER && WORDS_BIG_ENDIAN"
+ "@
+ {srai|srawi} %0,%1,31\;{srai|srawi} %L0,%1,%h2
+ {sri|srwi} %L0,%L1,%h2\;insrwi %L0,%1,%h2,0\;{srai|srawi} %0,%1,%h2"
+ [(set_attr "type" "two,three")
+ (set_attr "length" "8,12")])
+
+(define_insn "*ashrdisi3_noppc64"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (const_int 32)) 4))]
+ "TARGET_32BIT && !TARGET_POWERPC64"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ return \"\";
+ else
+ return \"mr %0,%1\";
+}"
+ [(set_attr "length" "4")])
+
+
+;; PowerPC64 DImode operations.
+
+(define_insn_and_split "absdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))
+ (clobber (match_scratch:DI 2 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:DI (match_dup 1) (const_int 63)))
+ (set (match_dup 0) (xor:DI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:DI (match_dup 0) (match_dup 2)))]
+ "")
+
+(define_insn_and_split "*nabsdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0"))))
+ (clobber (match_scratch:DI 2 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:DI (match_dup 1) (const_int 63)))
+ (set (match_dup 0) (xor:DI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:DI (match_dup 2) (match_dup 0)))]
+ "")
+
+(define_insn "muldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I")))]
+ "TARGET_POWERPC64"
+ "@
+ mulld %0,%1,%2
+ mulli %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "lmul")))])
+
+(define_insn "*muldi3_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ mulld. %3,%1,%2
+ #"
+ [(set_attr "type" "lmul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (mult:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*muldi3_internal2"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (mult:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ mulld. %0,%1,%2
+ #"
+ [(set_attr "type" "lmul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (mult:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "smuldi3_highpart"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (mult:TI (sign_extend:TI
+ (match_operand:DI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:TI
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (const_int 64))))]
+ "TARGET_POWERPC64"
+ "mulhd %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_insn "umuldi3_highpart"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (const_int 64))))]
+ "TARGET_POWERPC64"
+ "mulhdu %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_insn "rotldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,0")
+
+(define_insn "*rotldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %3,%1,%H2,0
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (rotate:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (rotate:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %0,%1,%H2,0
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (rotate:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (rotate:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal4"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri"))
+ (match_operand:DI 3 "mask64_operand" "n")))]
+ "TARGET_POWERPC64"
+ "rld%I2c%B3 %0,%1,%H2,%S3")
+
+(define_insn "*rotldi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:DI 3 "mask64_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rld%I2c%B3. %4,%1,%H2,%S3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:DI 3 "mask64_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_64BIT"
+ "@
+ rld%I2c%B3. %0,%1,%H2,%S3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal7"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")) 0)))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,56")
+
+(define_insn "*rotldi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %3,%1,%H2,56
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:QI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal9"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %0,%1,%H2,56
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal10"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")) 0)))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,48")
+
+(define_insn "*rotldi3_internal11"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %3,%1,%H2,48
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:HI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal12"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %0,%1,%H2,48
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal13"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")) 0)))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,32")
+
+(define_insn "*rotldi3_internal14"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %3,%1,%H2,32
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:SI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal15"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_64BIT"
+ "@
+ rld%I2cl. %0,%1,%H2,32
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_ashldi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*ashldi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "sld%I2 %0,%1,%H2")
+
+(define_insn "*ashldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ sld%I2. %3,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ sld%I2. %0,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal4"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "const_int_operand" "n")))]
+ "TARGET_POWERPC64 && includes_rldic_lshift_p (operands[2], operands[3])"
+ "rldic %0,%1,%H2,%W3")
+
+(define_insn "ashldi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_64BIT && includes_rldic_lshift_p (operands[2], operands[3])"
+ "@
+ rldic. %4,%1,%H2,%W3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldic_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 4)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_64BIT && includes_rldic_lshift_p (operands[2], operands[3])"
+ "@
+ rldic. %0,%1,%H2,%W3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldic_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 0)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal7"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "mask64_operand" "n")))]
+ "TARGET_POWERPC64 && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "rldicr %0,%1,%H2,%S3")
+
+(define_insn "ashldi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_64BIT && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "@
+ rldicr. %4,%1,%H2,%S3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldicr_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 4)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal9"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_64BIT && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "@
+ rldicr. %0,%1,%H2,%S3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldicr_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 0)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_lshrdi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*lshrdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "srd%I2 %0,%1,%H2")
+
+(define_insn "*lshrdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT "
+ "@
+ srd%I2. %3,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*lshrdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ srd%I2. %0,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "WORDS_BIG_ENDIAN"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER && GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_ashrdi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else if (TARGET_32BIT && GET_CODE (operands[2]) == CONST_INT
+ && WORDS_BIG_ENDIAN)
+ {
+ emit_insn (gen_ashrdi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*ashrdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "srad%I2 %0,%1,%H2")
+
+(define_insn "*ashrdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ srad%I2. %3,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashrdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ srad%I2. %0,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "?r,S,T,K,J,t")))
+ (clobber (match_scratch:CC 3 "=X,X,X,x,x,X"))]
+ "TARGET_POWERPC64"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2
+ #"
+ [(set_attr "type" "*,*,*,compare,compare,*")
+ (set_attr "length" "4,4,4,4,4,8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "mask64_2_operand" "")))
+ (clobber (match_scratch:CC 3 ""))]
+ "TARGET_POWERPC64
+ && (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
+ && !mask64_operand (operands[2], DImode)"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 4))
+ (match_dup 5)))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (match_dup 6))
+ (match_dup 7)))]
+{
+ build_mask64_2_operands (operands[2], &operands[4]);
+})
+
+(define_insn "*anddi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,x,?y,?y,?y,??y,??y,?y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "r,S,T,K,J,t,r,S,T,K,J,t"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r,r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT"
+ "@
+ and. %3,%1,%2
+ rldic%B2. %3,%1,0,%S2
+ rlwinm. %3,%1,0,%m2,%M2
+ andi. %3,%1,%b2
+ andis. %3,%1,%u2
+ #
+ #
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "mask64_2_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_64BIT && reload_completed
+ && (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
+ && !mask64_operand (operands[2], DImode)"
+ [(set (match_dup 3)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 5))
+ (match_dup 6)))
+ (parallel [(set (match_dup 0)
+ (compare:CC (and:DI (rotate:DI (match_dup 3)
+ (match_dup 7))
+ (match_dup 8))
+ (const_int 0)))
+ (clobber (match_dup 3))])]
+ "
+{
+ build_mask64_2_operands (operands[2], &operands[5]);
+}")
+
+(define_insn "*anddi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,x,x,?y,?y,?y,??y,??y,?y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "r,S,T,K,J,t,r,S,T,K,J,t"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r,r,r")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT"
+ "@
+ and. %0,%1,%2
+ rldic%B2. %0,%1,0,%S2
+ rlwinm. %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2
+ #
+ #
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "and64_2_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "mask64_2_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_64BIT && reload_completed
+ && (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
+ && !mask64_operand (operands[2], DImode)"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 5))
+ (match_dup 6)))
+ (parallel [(set (match_dup 3)
+ (compare:CC (and:DI (rotate:DI (match_dup 0)
+ (match_dup 7))
+ (match_dup 8))
+ (const_int 0)))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (match_dup 7))
+ (match_dup 8)))])]
+ "
+{
+ build_mask64_2_operands (operands[2], &operands[5]);
+}")
+
+(define_expand "iordi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ior:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_logical_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (non_logical_cint_operand (operands[2], DImode))
+ {
+ HOST_WIDE_INT value;
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ value = INTVAL (operands[2]);
+ emit_insn (gen_iordi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ }
+ else
+ {
+ value = CONST_DOUBLE_LOW (operands[2]);
+ emit_insn (gen_iordi3 (tmp, operands[1],
+ immed_double_const (value
+ & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode)));
+ }
+
+ emit_insn (gen_iordi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_expand "xordi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (xor:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_logical_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (non_logical_cint_operand (operands[2], DImode))
+ {
+ HOST_WIDE_INT value;
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ value = INTVAL (operands[2]);
+ emit_insn (gen_xordi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ }
+ else
+ {
+ value = CONST_DOUBLE_LOW (operands[2]);
+ emit_insn (gen_xordi3 (tmp, operands[1],
+ immed_double_const (value
+ & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode)));
+ }
+
+ emit_insn (gen_xordi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_insn "*booldi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r")
+ (match_operator:DI 3 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r,r")
+ (match_operand:DI 2 "logical_operand" "r,K,JF")]))]
+ "TARGET_POWERPC64"
+ "@
+ %q3 %0,%1,%2
+ %q3i %0,%1,%b2
+ %q3is %0,%1,%u2")
+
+(define_insn "*booldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*booldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_64BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split a logical operation that we can't do in one insn into two insns,
+;; each of which does one 16-bit part. This is used by combine.
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operator:DI 3 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "non_logical_cint_operand" "")]))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+"
+{
+ rtx i3,i4;
+
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ {
+ HOST_WIDE_INT value = CONST_DOUBLE_LOW (operands[2]);
+ i3 = immed_double_const (value & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode);
+ i4 = GEN_INT (value & 0xffff);
+ }
+ else
+ {
+ i3 = GEN_INT (INTVAL (operands[2])
+ & (~ (HOST_WIDE_INT) 0xffff));
+ i4 = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ }
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[3]), DImode,
+ operands[1], i3);
+ operands[5] = gen_rtx_fmt_ee (GET_CODE (operands[3]), DImode,
+ operands[0], i4);
+}")
+
+(define_insn "*boolcdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 3 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r")]))]
+ "TARGET_POWERPC64"
+ "%q3 %0,%2,%1")
+
+(define_insn "*boolcdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ %q4. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolcdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_64BIT"
+ "@
+ %q4. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 3 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r"))]))]
+ "TARGET_POWERPC64"
+ "%q3 %0,%1,%2")
+
+(define_insn "*boolccdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_64BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Now define ways of moving data around.
+
+;; Set up a register with a value from the GOT table
+
+(define_expand "movsi_got"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:SI 1 "got_operand" "")
+ (match_dup 2)] UNSPEC_MOVSI_GOT))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1"
+ "
+{
+ if (GET_CODE (operands[1]) == CONST)
+ {
+ rtx offset = const0_rtx;
+ HOST_WIDE_INT value;
+
+ operands[1] = eliminate_constant_term (XEXP (operands[1], 0), &offset);
+ value = INTVAL (offset);
+ if (value != 0)
+ {
+ rtx tmp = (no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode));
+ emit_insn (gen_movsi_got (tmp, operands[1]));
+ emit_insn (gen_addsi3 (operands[0], tmp, offset));
+ DONE;
+ }
+ }
+
+ operands[2] = rs6000_got_register (operands[1]);
+}")
+
+(define_insn "*movsi_got_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "b")]
+ UNSPEC_MOVSI_GOT))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1"
+ "{l|lwz} %0,%a1@got(%2)"
+ [(set_attr "type" "load")])
+
+;; Used by sched, shorten_branches and final when the GOT pseudo reg
+;; didn't get allocated to a hard register.
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
+ (match_operand:SI 2 "memory_operand" "")]
+ UNSPEC_MOVSI_GOT))]
+ "DEFAULT_ABI == ABI_V4
+ && flag_pic == 1
+ && (reload_in_progress || reload_completed)"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (unspec:SI [(match_dup 1)(match_dup 0)]
+ UNSPEC_MOVSI_GOT))]
+ "")
+
+;; For SI, we special-case integers that can't be loaded in one insn. We
+;; do the load 16-bits at a time. We could do this by loading from memory,
+;; and this is even supposed to be faster, but it is simpler not to get
+;; integers in the TOC.
+(define_insn "movsi_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "*movsi_internal1"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "=r,r,r,m,r,r,r,r,r,*q,*c*l,*h,*h")
+ (match_operand:SI 1 "input_operand" "r,U,m,r,I,L,n,R,*h,r,r,r,0"))]
+ "gpc_reg_operand (operands[0], SImode)
+ || gpc_reg_operand (operands[1], SImode)"
+ "@
+ mr %0,%1
+ {cal|la} %0,%a1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ #
+ {cal|la} %0,%a1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,*,load,store,*,*,*,*,mfjmpr,*,mtjmpr,*,*")
+ (set_attr "length" "4,4,4,4,4,4,8,4,4,4,4,4,4")])
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "(unsigned HOST_WIDE_INT) (INTVAL (operands[1]) + 0x8000) >= 0x10000
+ && (INTVAL (operands[1]) & 0xffff) != 0"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (ior:SI (match_dup 0)
+ (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], SImode, operands[1], 2);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*mov<mode>_internal2"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=y,x,?y")
+ (compare:CC (match_operand:P 1 "gpc_reg_operand" "0,r,r")
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r") (match_dup 1))]
+ ""
+ "@
+ {cmpi|cmp<wd>i} %2,%0,0
+ mr. %0,%1
+ #"
+ [(set_attr "type" "cmp,compare,cmp")
+ (set_attr "length" "4,4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operand:P 1 "gpc_reg_operand" "")
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "") (match_dup 1))]
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
+ (match_operand:HI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
+ "gpc_reg_operand (operands[0], HImode)
+ || gpc_reg_operand (operands[1], HImode)"
+ "@
+ mr %0,%1
+ lhz%U1%X1 %0,%1
+ sth%U0%X0 %1,%0
+ {lil|li} %0,%w1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,load,store,*,mfjmpr,*,mtjmpr,*")])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:INT 0 "general_operand" "")
+ (match_operand:INT 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], <MODE>mode); DONE; }")
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
+ (match_operand:QI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
+ "gpc_reg_operand (operands[0], QImode)
+ || gpc_reg_operand (operands[1], QImode)"
+ "@
+ mr %0,%1
+ lbz%U1%X1 %0,%1
+ stb%U0%X0 %1,%0
+ {lil|li} %0,%1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,load,store,*,mfjmpr,*,mtjmpr,*")])
+
+;; Here is how to move condition codes around. When we store CC data in
+;; an integer register or memory, we store just the high-order 4 bits.
+;; This lets us not shift in the most common case of CR0.
+(define_expand "movcc"
+ [(set (match_operand:CC 0 "nonimmediate_operand" "")
+ (match_operand:CC 1 "nonimmediate_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movcc_internal1"
+ [(set (match_operand:CC 0 "nonimmediate_operand" "=y,x,?y,y,r,r,r,r,r,q,cl,r,m")
+ (match_operand:CC 1 "general_operand" "y,r,r,O,x,y,r,I,h,r,r,m,r"))]
+ "register_operand (operands[0], CCmode)
+ || register_operand (operands[1], CCmode)"
+ "@
+ mcrf %0,%1
+ mtcrf 128,%1
+ {rlinm|rlwinm} %1,%1,%F0,0xffffffff\;mtcrf %R0,%1\;{rlinm|rlwinm} %1,%1,%f0,0xffffffff
+ crxor %0,%0,%0
+ mfcr %0%Q1
+ mfcr %0%Q1\;{rlinm|rlwinm} %0,%0,%f1,0xf0000000
+ mr %0,%1
+ {lil|li} %0,%1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%U1|stw%U0%U1} %1,%0"
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "0,3")
+ (const_string "cr_logical")
+ (eq_attr "alternative" "1,2")
+ (const_string "mtcr")
+ (eq_attr "alternative" "6,7,9")
+ (const_string "integer")
+ (eq_attr "alternative" "8")
+ (const_string "mfjmpr")
+ (eq_attr "alternative" "10")
+ (const_string "mtjmpr")
+ (eq_attr "alternative" "11")
+ (const_string "load")
+ (eq_attr "alternative" "12")
+ (const_string "store")
+ (ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "4,4,12,4,4,8,4,4,4,4,4,4,4")])
+
+;; For floating-point, we normally deal with the floating-point registers
+;; unless -msoft-float is used. The sole exception is that parameter passing
+;; can produce floating-point values in fixed-point registers. Unless the
+;; value is a simple constant or already in memory, we deal with this by
+;; allocating memory and copying the value explicitly via that memory location.
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], SFmode); DONE; }")
+
+(define_split
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (! TARGET_POWERPC64)
+ operands[2] = operand_subword (operands[0], 0, 0, SFmode);
+ else
+ operands[2] = gen_lowpart (SImode, operands[0]);
+
+ operands[3] = gen_int_mode (l, SImode);
+}")
+
+(define_insn "*movsf_hardfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=!r,!r,m,f,f,m,*c*l,*q,!r,*h,!r,!r")
+ (match_operand:SF 1 "input_operand" "r,m,r,f,m,f,r,r,h,0,G,Fn"))]
+ "(gpc_reg_operand (operands[0], SFmode)
+ || gpc_reg_operand (operands[1], SFmode))
+ && (TARGET_HARD_FLOAT && TARGET_FPRS)"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ fmr %0,%1
+ lfs%U1%X1 %0,%1
+ stfs%U0%X0 %1,%0
+ mt%0 %1
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #"
+ [(set_attr "type" "*,load,store,fp,fpload,fpstore,mtjmpr,*,mfjmpr,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,8")])
+
+(define_insn "*movsf_softfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,cl,q,r,r,m,r,r,r,r,r,*h")
+ (match_operand:SF 1 "input_operand" "r,r,r,h,m,r,I,L,R,G,Fn,0"))]
+ "(gpc_reg_operand (operands[0], SFmode)
+ || gpc_reg_operand (operands[1], SFmode))
+ && (TARGET_SOFT_FLOAT || !TARGET_FPRS)"
+ "@
+ mr %0,%1
+ mt%0 %1
+ mt%0 %1
+ mf%1 %0
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ {cal|la} %0,%a1
+ #
+ #
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,mtjmpr,*,mfjmpr,load,store,*,*,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,8,4")])
+
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], DFmode); DONE; }")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DFmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DFmode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DFmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DFmode);
+ operands[4] = gen_int_mode (l[endian], SImode);
+ operands[5] = gen_int_mode (l[1 - endian], SImode);
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+#if HOST_BITS_PER_WIDE_INT >= 64
+ HOST_WIDE_INT val;
+#endif
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+
+ operands[2] = gen_lowpart (DImode, operands[0]);
+ /* HIGHPART is lower memory address when WORDS_BIG_ENDIAN. */
+#if HOST_BITS_PER_WIDE_INT >= 64
+ val = ((HOST_WIDE_INT)(unsigned long)l[endian] << 32
+ | ((HOST_WIDE_INT)(unsigned long)l[1 - endian]));
+
+ operands[3] = gen_int_mode (val, DImode);
+#else
+ operands[3] = immed_double_const (l[1 - endian], l[endian], DImode);
+#endif
+}")
+
+;; Don't have reload use general registers to load a constant. First,
+;; it might not work if the output operand is the equivalent of
+;; a non-offsettable memref, but also it is less efficient than loading
+;; the constant into an FP register, since it will probably be used there.
+;; The "??" is a kludge until we can figure out a more reasonable way
+;; of handling these non-offsettable values.
+(define_insn "*movdf_hardfloat32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=!r,??r,m,f,f,m,!r,!r,!r")
+ (match_operand:DF 1 "input_operand" "r,m,r,f,m,f,G,H,F"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register
+ of operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ if (rs6000_offsettable_memref_p (operands[1])
+ || (GET_CODE (operands[1]) == MEM
+ && (GET_CODE (XEXP (operands[1], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)))
+ {
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[1], 0));
+ if (refers_to_regno_p (REGNO (operands[0]),
+ REGNO (operands[0]) + 1,
+ operands[1], 0))
+ {
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"{lx|lwzx} %0,%1\";
+ }
+ else
+ {
+ output_asm_insn (\"{lx|lwzx} %0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ }
+ case 2:
+ if (rs6000_offsettable_memref_p (operands[0])
+ || (GET_CODE (operands[0]) == MEM
+ && (GET_CODE (XEXP (operands[0], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)))
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[0], 0));
+ output_asm_insn (\"{stx|stwx} %1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{stx|stwx} %L1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ case 3:
+ return \"fmr %0,%1\";
+ case 4:
+ return \"lfd%U1%X1 %0,%1\";
+ case 5:
+ return \"stfd%U0%X0 %1,%0\";
+ case 6:
+ case 7:
+ case 8:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "two,load,store,fp,fpload,fpstore,*,*,*")
+ (set_attr "length" "8,16,16,4,4,4,8,12,16")])
+
+(define_insn "*movdf_softfloat32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,r,r,r")
+ (match_operand:DF 1 "input_operand" "r,m,r,G,H,F"))]
+ "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || TARGET_E500_SINGLE)
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ case 2:
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ case 3:
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "two,load,store,*,*,*")
+ (set_attr "length" "8,8,8,8,12,16")])
+
+; ld/std require word-aligned displacements -> 'Y' constraint.
+; List Y->r and r->Y before r->r for reload.
+(define_insn "*movdf_hardfloat64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r")
+ (match_operand:DF 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F"))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ std%U0%X0 %1,%0
+ ld%U1%X1 %0,%1
+ mr %0,%1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #
+ #"
+ [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16")])
+
+(define_insn "*movdf_softfloat64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,Y,r,cl,r,r,r,r,*h")
+ (match_operand:DF 1 "input_operand" "Y,r,r,r,h,G,H,F,0"))]
+ "TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ mr %0,%1
+ mt%0 %1
+ mf%1 %0
+ #
+ #
+ #
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "load,store,*,mtjmpr,mfjmpr,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,8,12,16,4")])
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "general_operand" "")
+ (match_operand:TF 1 "any_operand" ""))]
+ "!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128"
+ "{ rs6000_emit_move (operands[0], operands[1], TFmode); DONE; }")
+
+; It's important to list the o->f and f->o moves before f->f because
+; otherwise reload, given m->f, will try to pick f->f and reload it,
+; which doesn't make progress. Likewise r->Y must be before r->r.
+(define_insn_and_split "*movtf_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,f,r,Y,r")
+ (match_operand:TF 1 "input_operand" "f,o,f,YGHF,r,r"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128
+ && (gpc_reg_operand (operands[0], TFmode)
+ || gpc_reg_operand (operands[1], TFmode))"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
+ [(set_attr "length" "8,8,8,20,20,16")])
+
+(define_insn_and_split "*movtf_softfloat"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,Y,r")
+ (match_operand:TF 1 "input_operand" "YGHF,r,r"))]
+ "!TARGET_IEEEQUAD
+ && (TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_LONG_DOUBLE_128
+ && (gpc_reg_operand (operands[0], TFmode)
+ || gpc_reg_operand (operands[1], TFmode))"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
+ [(set_attr "length" "20,20,16")])
+
+(define_expand "extenddftf2"
+ [(parallel [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float_extend:TF (match_operand:DF 1 "input_operand" "")))
+ (use (match_dup 2))])]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+{
+ operands[2] = CONST0_RTX (DFmode);
+ /* Generate GOT reference early for SVR4 PIC. */
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ operands[2] = validize_mem (force_const_mem (DFmode, operands[2]));
+})
+
+(define_insn_and_split "*extenddftf2_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,&f,r")
+ (float_extend:TF (match_operand:DF 1 "input_operand" "fr,mf,mf,rmGHF")))
+ (use (match_operand:DF 2 "zero_reg_mem_operand" "rf,m,f,n"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+ emit_move_insn (simplify_gen_subreg (DFmode, operands[0], TFmode, hi_word),
+ operands[1]);
+ emit_move_insn (simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word),
+ operands[2]);
+ DONE;
+})
+
+(define_expand "extendsftf2"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float_extend:TF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+{
+ rtx tmp = gen_reg_rtx (DFmode);
+ emit_insn (gen_extendsfdf2 (tmp, operands[1]));
+ emit_insn (gen_extenddftf2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "trunctfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "")
+
+(define_insn_and_split "trunctfdf2_internal1"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "0,f")))]
+ "!TARGET_IEEEQUAD && !TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "@
+ #
+ fmr %0,%1"
+ "&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfdf2_internal2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD && TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "fadd %0,%1,%L1"
+ [(set_attr "type" "fp")])
+
+(define_insn_and_split "trunctfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float_truncate:SF (match_operand:TF 1 "gpc_reg_operand" "f")))
+ (clobber (match_scratch:DF 2 "=f"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (float_truncate:DF (match_dup 1)))
+ (set (match_dup 0)
+ (float_truncate:SF (match_dup 2)))]
+ "")
+
+(define_expand "floatsitf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (float:TF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+{
+ rtx tmp = gen_reg_rtx (DFmode);
+ expand_float (tmp, operands[1], false);
+ emit_insn (gen_extenddftf2 (operands[0], tmp));
+ DONE;
+})
+
+; fadd, but rounding towards zero.
+; This is probably not the optimal code sequence.
+(define_insn "fix_trunc_helper"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:TF 1 "gpc_reg_operand" "f")]
+ UNSPEC_FIX_TRUNC_TF))
+ (clobber (match_operand:DF 2 "gpc_reg_operand" "=&f"))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "mffs %2\n\tmtfsb1 31\n\tmtfsb0 30\n\tfadd %0,%1,%L1\n\tmtfsf 1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "length" "20")])
+
+(define_expand "fix_trunctfsi2"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "")))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))])]
+ "!TARGET_IEEEQUAD
+ && (TARGET_POWER2 || TARGET_POWERPC)
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+{
+ operands[2] = gen_reg_rtx (DFmode);
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+})
+
+(define_insn_and_split "*fix_trunctfsi2_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DF 2 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DF 3 "gpc_reg_operand" "=&f"))
+ (clobber (match_operand:DI 4 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DI 5 "memory_operand" "=o"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& (!no_new_pseudos || offsettable_nonstrict_memref_p (operands[5]))"
+ [(pc)]
+{
+ rtx lowword;
+ emit_insn (gen_fix_trunc_helper (operands[2], operands[1], operands[3]));
+
+ gcc_assert (MEM_P (operands[5]));
+ lowword = adjust_address (operands[5], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
+
+ emit_insn (gen_fctiwz (operands[4], operands[2]));
+ emit_move_insn (operands[5], operands[4]);
+ emit_move_insn (operands[0], lowword);
+ DONE;
+})
+
+(define_insn "negtf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (neg:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"fneg %L0,%L1\;fneg %0,%1\";
+ else
+ return \"fneg %0,%1\;fneg %L0,%L1\";
+}"
+ [(set_attr "type" "fp")
+ (set_attr "length" "8")])
+
+(define_expand "abstf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (abs:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "
+{
+ rtx label = gen_label_rtx ();
+ emit_insn (gen_abstf2_internal (operands[0], operands[1], label));
+ emit_label (label);
+ DONE;
+}")
+
+(define_expand "abstf2_internal"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (match_operand:TF 1 "gpc_reg_operand" "f"))
+ (set (match_dup 3) (match_dup 5))
+ (set (match_dup 5) (abs:DF (match_dup 5)))
+ (set (match_dup 4) (compare:CCFP (match_dup 3) (match_dup 5)))
+ (set (pc) (if_then_else (eq (match_dup 4) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 6) (neg:DF (match_dup 6)))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "
+{
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (CCFPmode);
+ operands[5] = simplify_gen_subreg (DFmode, operands[0], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word);
+}")
+
+;; Next come the multi-word integer load and store and the load and store
+;; multiple insns.
+
+; List r->r after r->"o<>", otherwise reload will try to reload a
+; non-offsettable address by using r->r which won't make progress.
+(define_insn "*movdi_internal32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=o<>,r,r,*f,*f,m,r")
+ (match_operand:DI 1 "input_operand" "r,r,m,f,m,f,IJKnGHF"))]
+ "! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "@
+ #
+ #
+ #
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ #"
+ [(set_attr "type" "load,*,store,fp,fpload,fpstore,*")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ DImode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "input_operand" ""))]
+ "reload_completed && !TARGET_POWERPC64
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+;; APPLE LOCAL begin 4874526 5234419
+; ld/std require word-aligned displacements -> 'Y' constraint
+; List Y->r and r->Y before r->r for reload.
+(define_insn "*movdi_internal64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,Y,r,r,r,r,r,*f,*f,Y,r,*h,*h")
+ (match_operand:DI 1 "input_operand" "Y,r,r,I,L,nF,R,f,Y,f,*h,r,0"))]
+ "TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "@
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ mr %0,%1
+ li %0,%1
+ lis %0,%v1
+ #
+ {cal|la} %0,%a1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mf%1 %0
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "load,store,*,*,*,*,*,fp,fpload,fpstore,mfjmpr,mtjmpr,*")
+ (set_attr "length" "4,4,4,4,4,20,4,4,4,4,4,4,4")])
+;; APPLE LOCAL end 4874526 5234419
+
+;; immediate value valid for a single instruction hiding in a const_double
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operand:DI 1 "const_double_operand" "F"))]
+ "HOST_BITS_PER_WIDE_INT == 32 && TARGET_POWERPC64
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && num_insns_constant (operands[1], DImode) == 1"
+ "*
+{
+ return ((unsigned HOST_WIDE_INT)
+ (CONST_DOUBLE_LOW (operands[1]) + 0x8000) < 0x10000)
+ ? \"li %0,%1\" : \"lis %0,%v1\";
+}")
+
+;; Generate all one-bits and clear left or right.
+;; Use (and:DI (rotate:DI ...)) to avoid anddi3 unnecessary clobber.
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "mask64_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (const_int -1))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (const_int 0))
+ (match_dup 1)))]
+ "")
+
+;; Split a load of a large constant into the appropriate five-instruction
+;; sequence. Handle anything in a constant number of insns.
+;; When non-easy constants can go in the TOC, this should use
+;; easy_fp_constant predicate.
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], DImode, operands[1], 5);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], DImode, operands[1], 5);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+;; TImode is similar, except that we usually want to compute the address into
+;; a register and use lsi/stsi (the exception is during reload). MQ is also
+;; clobbered in stsi for POWER, so we need a SCRATCH for it.
+
+;; We say that MQ is clobbered in the last alternative because the first
+;; alternative would never get used otherwise since it would need a reload
+;; while the 2nd alternative would not. We put memory cases first so they
+;; are preferred. Otherwise, we'd try to reload the output instead of
+;; giving the SCRATCH mq.
+
+(define_insn "*movti_power"
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r,r")
+ (match_operand:TI 1 "input_operand" "r,r,r,Q,m,n"))
+ (clobber (match_scratch:SI 2 "=q,q#X,X,X,X,X"))]
+ "TARGET_POWER && ! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+
+ case 0:
+ if (TARGET_STRING)
+ return \"{stsi|stswi} %1,%P0,16\";
+ case 1:
+ case 2:
+ return \"#\";
+ case 3:
+ /* If the address is not used in the output, we can use lsi. Otherwise,
+ fall through to generating four loads. */
+ if (TARGET_STRING
+ && ! reg_overlap_mentioned_p (operands[0], operands[1]))
+ return \"{lsi|lswi} %0,%P1,16\";
+ /* ... fall through ... */
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "store,store,*,load,load,*")])
+
+(define_insn "*movti_string"
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,o<>,????r,????r,????r,r")
+ (match_operand:TI 1 "input_operand" "r,r,r,Q,m,n"))]
+ "! TARGET_POWER && ! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ if (TARGET_STRING)
+ return \"{stsi|stswi} %1,%P0,16\";
+ case 1:
+ case 2:
+ return \"#\";
+ case 3:
+ /* If the address is not used in the output, we can use lsi. Otherwise,
+ fall through to generating four loads. */
+ if (TARGET_STRING
+ && ! reg_overlap_mentioned_p (operands[0], operands[1]))
+ return \"{lsi|lswi} %0,%P1,16\";
+ /* ... fall through ... */
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "store_ux,store_ux,*,load_ux,load_ux,*")])
+
+(define_insn "*movti_ppc64"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o<>,r")
+ (match_operand:TI 1 "input_operand" "r,r,m"))]
+ "TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
+ || gpc_reg_operand (operands[1], TImode))"
+ "#"
+ [(set_attr "type" "*,load,store")])
+
+(define_split
+ [(set (match_operand:TI 0 "gpc_reg_operand" "")
+ (match_operand:TI 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ TImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ TImode);
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0));
+ operands[5] = operands[1];
+ }
+ else
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "input_operand" ""))]
+ "reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ "TARGET_STRING && !TARGET_POWERPC64"
+ "
+{
+ int regno;
+ int count;
+ rtx op1;
+ int i;
+
+ /* Support only loading a constant number of fixed-point registers from
+ memory and only bother with this if more than two; the machine
+ doesn't support more than eight. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 2
+ || INTVAL (operands[2]) > 8
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) >= 32)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[0]);
+
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+ op1 = replace_equiv_address (operands[1],
+ force_reg (SImode, XEXP (operands[1], 0)));
+
+ for (i = 0; i < count; i++)
+ XVECEXP (operands[3], 0, i)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno + i),
+ adjust_address_nv (op1, SImode, i * 4));
+}")
+
+(define_insn "*ldmsi8"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))
+ (set (match_operand:SI 7 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 20))))
+ (set (match_operand:SI 8 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 24))))
+ (set (match_operand:SI 9 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 28))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 8"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi7"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))
+ (set (match_operand:SI 7 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 20))))
+ (set (match_operand:SI 8 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 24))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 7"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi6"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))
+ (set (match_operand:SI 7 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 20))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 6"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi5"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 5"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi4"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 4"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi3"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 3"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (clobber (scratch:SI))
+ (use (match_operand:SI 2 "" ""))])]
+ "TARGET_STRING && !TARGET_POWERPC64"
+ "
+{
+ int regno;
+ int count;
+ rtx to;
+ rtx op0;
+ int i;
+
+ /* Support only storing a constant number of fixed-point registers to
+ memory and only bother with this if more than two; the machine
+ doesn't support more than eight. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 2
+ || INTVAL (operands[2]) > 8
+ || GET_CODE (operands[0]) != MEM
+ || GET_CODE (operands[1]) != REG
+ || REGNO (operands[1]) >= 32)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[1]);
+
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1));
+ to = force_reg (SImode, XEXP (operands[0], 0));
+ op0 = replace_equiv_address (operands[0], to);
+
+ XVECEXP (operands[3], 0, 0)
+ = gen_rtx_SET (VOIDmode, adjust_address_nv (op0, SImode, 0), operands[1]);
+ XVECEXP (operands[3], 0, 1) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_SCRATCH (SImode));
+
+ for (i = 1; i < count; i++)
+ XVECEXP (operands[3], 0, i + 1)
+ = gen_rtx_SET (VOIDmode,
+ adjust_address_nv (op0, SImode, i * 4),
+ gen_rtx_REG (SImode, regno + i));
+}")
+
+(define_insn "*stmsi8"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 28)))
+ (match_operand:SI 10 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 9"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi7"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 8"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi6"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 7"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi5"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 6"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi4"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 5"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi3"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 4"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi8_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 28)))
+ (match_operand:SI 10 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 9"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi7_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 8"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi6_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 7"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi5_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 6"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi4_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 5"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*stmsi3_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 4"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")])
+
+(define_expand "setmemsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 3 "" ""))])]
+ ""
+ "
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (expand_block_clear (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; String/block move insn.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand:BLK 1 "" ""))
+ (use (match_operand:SI 2 "" ""))
+ (use (match_operand:SI 3 "" ""))])]
+ ""
+ "
+{
+ if (expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; Move up to 32 bytes at a time. The fixed registers are needed because the
+;; register allocator doesn't have a clue about allocating 8 word registers.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movmemsi_8reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+;; Move up to 24 bytes at a time. The fixed registers are needed because the
+;; register allocator doesn't have a clue about allocating 6 word registers.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movmemsi_6reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 24
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 32
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+;; Move up to 16 bytes at a time, using 4 fixed registers to avoid spill
+;; problems with TImode.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movmemsi_4reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+;; Move up to 8 bytes at a time.
+(define_expand "movmemsi_2reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (match_scratch:DI 4 ""))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_STRING && ! TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:DI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER && ! TARGET_POWERPC64
+ && INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:DI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER && ! TARGET_POWERPC64
+ && INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+;; Move up to 4 bytes at a time.
+(define_expand "movmemsi_1reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "length" "8")])
+
+;; Define insns that do load or store with update. Some of these we can
+;; get by using pre-decrement or pre-increment, but the hardware can also
+;; do cases where the increment is not the size of the object.
+;;
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+(define_insn "*movdi_update1"
+ [(set (match_operand:DI 3 "gpc_reg_operand" "=r,r")
+ (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
+ (match_operand:DI 2 "reg_or_aligned_short_operand" "r,I"))))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && TARGET_UPDATE"
+ "@
+ ldux %3,%0,%2
+ ldu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "movdi_<mode>_update"
+ [(set (mem:DI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
+ (match_operand:P 2 "reg_or_aligned_short_operand" "r,I")))
+ (match_operand:DI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:P 0 "gpc_reg_operand" "=b,b")
+ (plus:P (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && TARGET_UPDATE"
+ "@
+ stdux %3,%0,%2
+ stdu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movsi_update1"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ {lux|lwzux} %3,%0,%2
+ {lu|lwzu} %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movsi_update2"
+ [(set (match_operand:DI 3 "gpc_reg_operand" "=r")
+ (sign_extend:DI
+ (mem:SI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0")
+ (match_operand:DI 2 "gpc_reg_operand" "r")))))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "lwaux %3,%0,%2"
+ [(set_attr "type" "load_ext_ux")])
+
+(define_insn "movsi_update"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movhi_update1"
+ [(set (match_operand:HI 3 "gpc_reg_operand" "=r,r")
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lhzux %3,%0,%2
+ lhzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movhi_update2"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lhzux %3,%0,%2
+ lhzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movhi_update3"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lhaux %3,%0,%2
+ lhau %3,%2(%0)"
+ [(set_attr "type" "load_ext_ux,load_ext_u")])
+
+(define_insn "*movhi_update4"
+ [(set (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:HI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ sthux %3,%0,%2
+ sthu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movqi_update1"
+ [(set (match_operand:QI 3 "gpc_reg_operand" "=r,r")
+ (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lbzux %3,%0,%2
+ lbzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movqi_update2"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lbzux %3,%0,%2
+ lbzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movqi_update3"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:QI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ stbux %3,%0,%2
+ stbu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movsf_update1"
+ [(set (match_operand:SF 3 "gpc_reg_operand" "=f,f")
+ (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_UPDATE"
+ "@
+ lfsux %3,%0,%2
+ lfsu %3,%2(%0)"
+ [(set_attr "type" "fpload_ux,fpload_u")])
+
+(define_insn "*movsf_update2"
+ [(set (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SF 3 "gpc_reg_operand" "f,f"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_UPDATE"
+ "@
+ stfsux %3,%0,%2
+ stfsu %3,%2(%0)"
+ [(set_attr "type" "fpstore_ux,fpstore_u")])
+
+(define_insn "*movsf_update3"
+ [(set (match_operand:SF 3 "gpc_reg_operand" "=r,r")
+ (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE"
+ "@
+ {lux|lwzux} %3,%0,%2
+ {lu|lwzu} %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movsf_update4"
+ [(set (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SF 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE"
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movdf_update1"
+ [(set (match_operand:DF 3 "gpc_reg_operand" "=f,f")
+ (mem:DF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_UPDATE"
+ "@
+ lfdux %3,%0,%2
+ lfdu %3,%2(%0)"
+ [(set_attr "type" "fpload_ux,fpload_u")])
+
+(define_insn "*movdf_update2"
+ [(set (mem:DF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:DF 3 "gpc_reg_operand" "f,f"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_UPDATE"
+ "@
+ stfdux %3,%0,%2
+ stfdu %3,%2(%0)"
+ [(set_attr "type" "fpstore_ux,fpstore_u")])
+
+;; Peephole to convert two consecutive FP loads or stores into lfq/stfq.
+
+(define_insn "*lfq_power2"
+ [(set (match_operand:V2DF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2DF 1 "memory_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "lfq%U1%X1 %0,%1")
+
+(define_peephole2
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "memory_operand" ""))
+ (set (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_operand:DF 3 "memory_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && registers_ok_for_quad_peep (operands[0], operands[2])
+ && mems_ok_for_quad_peep (operands[1], operands[3])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[1] = widen_memory_access (operands[1], V2DFmode, 0);
+ operands[0] = gen_rtx_REG (V2DFmode, REGNO (operands[0]));")
+
+(define_insn "*stfq_power2"
+ [(set (match_operand:V2DF 0 "memory_operand" "")
+ (match_operand:V2DF 1 "gpc_reg_operand" "f"))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "stfq%U0%X0 %1,%0")
+
+
+(define_peephole2
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (match_operand:DF 1 "gpc_reg_operand" ""))
+ (set (match_operand:DF 2 "memory_operand" "")
+ (match_operand:DF 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && registers_ok_for_quad_peep (operands[1], operands[3])
+ && mems_ok_for_quad_peep (operands[0], operands[2])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[0] = widen_memory_access (operands[0], V2DFmode, 0);
+ operands[1] = gen_rtx_REG (V2DFmode, REGNO (operands[1]));")
+
+;; After inserting conditional returns we can sometimes have
+;; unnecessary register moves. Unfortunately we cannot have a
+;; modeless peephole here, because some single SImode sets have early
+;; clobber outputs. Although those sets expand to multi-ppc-insn
+;; sequences, using get_attr_length here will smash the operands
+;; array. Neither is there an early_cobbler_p predicate.
+;; Disallow subregs for E500 so we don't munge frob_di_df_2.
+(define_peephole2
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "any_operand" ""))
+ (set (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_dup 0))]
+ "!(TARGET_E500_DOUBLE && GET_CODE (operands[2]) == SUBREG)
+ && peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2) (match_dup 1))])
+
+(define_peephole2
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "any_operand" ""))
+ (set (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_dup 0))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2) (match_dup 1))])
+
+
+;; TLS support.
+
+;; "b" output constraint here and on tls_ld to support tls linker optimization.
+(define_insn "tls_gd_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGD))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addi %0,%1,%2@got@tlsgd")
+
+(define_insn "tls_gd_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGD))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addi %0,%1,%2@got@tlsgd")
+
+(define_insn "tls_ld_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")]
+ UNSPEC_TLSLD))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addi %0,%1,%&@got@tlsld")
+
+(define_insn "tls_ld_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")]
+ UNSPEC_TLSLD))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addi %0,%1,%&@got@tlsld")
+
+(define_insn "tls_dtprel_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPREL))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addi %0,%1,%2@dtprel")
+
+(define_insn "tls_dtprel_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPREL))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addi %0,%1,%2@dtprel")
+
+(define_insn "tls_dtprel_ha_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPRELHA))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addis %0,%1,%2@dtprel@ha")
+
+(define_insn "tls_dtprel_ha_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPRELHA))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addis %0,%1,%2@dtprel@ha")
+
+(define_insn "tls_dtprel_lo_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPRELLO))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addi %0,%1,%2@dtprel@l")
+
+(define_insn "tls_dtprel_lo_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPRELLO))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addi %0,%1,%2@dtprel@l")
+
+(define_insn "tls_got_dtprel_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGOTDTPREL))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "lwz %0,%2@got@dtprel(%1)")
+
+(define_insn "tls_got_dtprel_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGOTDTPREL))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "ld %0,%2@got@dtprel(%1)")
+
+(define_insn "tls_tprel_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPREL))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addi %0,%1,%2@tprel")
+
+(define_insn "tls_tprel_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPREL))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addi %0,%1,%2@tprel")
+
+(define_insn "tls_tprel_ha_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPRELHA))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addis %0,%1,%2@tprel@ha")
+
+(define_insn "tls_tprel_ha_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPRELHA))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addis %0,%1,%2@tprel@ha")
+
+(define_insn "tls_tprel_lo_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPRELLO))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "addi %0,%1,%2@tprel@l")
+
+(define_insn "tls_tprel_lo_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPRELLO))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "addi %0,%1,%2@tprel@l")
+
+;; "b" output constraint here and on tls_tls input to support linker tls
+;; optimization. The linker may edit the instructions emitted by a
+;; tls_got_tprel/tls_tls pair to addis,addi.
+(define_insn "tls_got_tprel_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGOTTPREL))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "lwz %0,%2@got@tprel(%1)")
+
+(define_insn "tls_got_tprel_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGOTTPREL))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "ld %0,%2@got@tprel(%1)")
+
+(define_insn "tls_tls_32"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTLS))]
+ "HAVE_AS_TLS && !TARGET_64BIT"
+ "add %0,%1,%2@tls")
+
+(define_insn "tls_tls_64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand:DI 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTLS))]
+ "HAVE_AS_TLS && TARGET_64BIT"
+ "add %0,%1,%2@tls")
+
+;; Next come insns related to the calling sequence.
+;;
+;; First, an insn to allocate new stack space for dynamic use (e.g., alloca).
+;; We move the back-chain and decrement the stack pointer.
+
+(define_expand "allocate_stack"
+ [(set (match_operand 0 "gpc_reg_operand" "=r")
+ (minus (reg 1) (match_operand 1 "reg_or_short_operand" "")))
+ (set (reg 1)
+ (minus (reg 1) (match_dup 1)))]
+ ""
+ "
+{ rtx chain = gen_reg_rtx (Pmode);
+ rtx stack_bot = gen_rtx_MEM (Pmode, stack_pointer_rtx);
+ rtx neg_op0;
+
+ emit_move_insn (chain, stack_bot);
+
+ /* Check stack bounds if necessary. */
+ if (current_function_limit_stack)
+ {
+ rtx available;
+ available = expand_binop (Pmode, sub_optab,
+ stack_pointer_rtx, stack_limit_rtx,
+ NULL_RTX, 1, OPTAB_WIDEN);
+ emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
+ }
+
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -32767
+ || INTVAL (operands[1]) > 32768)
+ {
+ neg_op0 = gen_reg_rtx (Pmode);
+ if (TARGET_32BIT)
+ emit_insn (gen_negsi2 (neg_op0, operands[1]));
+ else
+ emit_insn (gen_negdi2 (neg_op0, operands[1]));
+ }
+ else
+ neg_op0 = GEN_INT (- INTVAL (operands[1]));
+
+ if (TARGET_UPDATE)
+ emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update : gen_movdi_di_update))
+ (stack_pointer_rtx, stack_pointer_rtx, neg_op0, chain));
+
+ else
+ {
+ emit_insn ((* ((TARGET_32BIT) ? gen_addsi3 : gen_adddi3))
+ (stack_pointer_rtx, stack_pointer_rtx, neg_op0));
+ emit_move_insn (gen_rtx_MEM (Pmode, stack_pointer_rtx), chain);
+ }
+
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+}")
+
+;; These patterns say how to save and restore the stack pointer. We need not
+;; save the stack pointer at function level since we are careful to
+;; preserve the backchain. At block level, we have to restore the backchain
+;; when we restore the stack pointer.
+;;
+;; For nonlocal gotos, we must save both the stack pointer and its
+;; backchain and restore both. Note that in the nonlocal case, the
+;; save area is a memory location.
+
+(define_expand "save_stack_function"
+ [(match_operand 0 "any_operand" "")
+ (match_operand 1 "any_operand" "")]
+ ""
+ "DONE;")
+
+(define_expand "restore_stack_function"
+ [(match_operand 0 "any_operand" "")
+ (match_operand 1 "any_operand" "")]
+ ""
+ "DONE;")
+
+;; Adjust stack pointer (op0) to a new value (op1).
+;; First copy old stack backchain to new location, and ensure that the
+;; scheduler won't reorder the sp assignment before the backchain write.
+(define_expand "restore_stack_block"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 2))
+ (set (match_dup 5) (unspec:BLK [(match_dup 5)] UNSPEC_TIE))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 1 "register_operand" ""))]
+ ""
+ "
+{
+ operands[2] = gen_reg_rtx (Pmode);
+ operands[3] = gen_frame_mem (Pmode, operands[0]);
+ operands[4] = gen_frame_mem (Pmode, operands[1]);
+ operands[5] = gen_frame_mem (BLKmode, operands[0]);
+}")
+
+(define_expand "save_stack_nonlocal"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_operand 0 "memory_operand" "") (match_dup 3))
+ (set (match_dup 2) (match_operand 1 "register_operand" ""))]
+ ""
+ "
+{
+ int units_per_word = (TARGET_32BIT) ? 4 : 8;
+
+ /* Copy the backchain to the first word, sp to the second. */
+ operands[0] = adjust_address_nv (operands[0], Pmode, 0);
+ operands[2] = adjust_address_nv (operands[0], Pmode, units_per_word);
+ operands[3] = gen_reg_rtx (Pmode);
+ operands[4] = gen_frame_mem (Pmode, operands[1]);
+}")
+
+(define_expand "restore_stack_nonlocal"
+ [(set (match_dup 2) (match_operand 1 "memory_operand" ""))
+ (set (match_dup 3) (match_dup 4))
+ (set (match_dup 5) (match_dup 2))
+ (set (match_dup 6) (unspec:BLK [(match_dup 6)] UNSPEC_TIE))
+ (set (match_operand 0 "register_operand" "") (match_dup 3))]
+ ""
+ "
+{
+ int units_per_word = (TARGET_32BIT) ? 4 : 8;
+
+ /* Restore the backchain from the first word, sp from the second. */
+ operands[2] = gen_reg_rtx (Pmode);
+ operands[3] = gen_reg_rtx (Pmode);
+ operands[1] = adjust_address_nv (operands[1], Pmode, 0);
+ operands[4] = adjust_address_nv (operands[1], Pmode, units_per_word);
+ operands[5] = gen_frame_mem (Pmode, operands[3]);
+ operands[6] = gen_frame_mem (BLKmode, operands[0]);
+}")
+
+;; TOC register handling.
+
+;; Code to initialize the TOC register...
+
+(define_insn "load_toc_aix_si"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_TOC))
+ (use (reg:SI 2))])]
+ "DEFAULT_ABI == ABI_AIX && TARGET_32BIT"
+ "*
+{
+ char buf[30];
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ operands[2] = gen_rtx_REG (Pmode, 2);
+ return \"{l|lwz} %0,%1(%2)\";
+}"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_aix_di"
+ [(parallel [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_TOC))
+ (use (reg:DI 2))])]
+ "DEFAULT_ABI == ABI_AIX && TARGET_64BIT"
+ "*
+{
+ char buf[30];
+#ifdef TARGET_RELOCATABLE
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\",
+ !TARGET_MINIMAL_TOC || TARGET_RELOCATABLE);
+#else
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+#endif
+ if (TARGET_ELF)
+ strcat (buf, \"@toc\");
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ operands[2] = gen_rtx_REG (Pmode, 2);
+ return \"ld %0,%1(%2)\";
+}"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_v4_pic_si"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec:SI [(const_int 0)] UNSPEC_TOC))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1 && TARGET_32BIT"
+ "bl _GLOBAL_OFFSET_TABLE_@local-4"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_toc_v4_PIC_1"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "immediate_operand" "s"))
+ (use (unspec [(match_dup 1)] UNSPEC_TOC))]
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX
+ && (flag_pic == 2 || (flag_pic && TARGET_SECURE_PLT))"
+ "bcl 20,31,%1\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_toc_v4_PIC_1b"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "s")]
+ UNSPEC_TOCPTR))]
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
+ "bcl 20,31,$+8\\n\\t.long %1-$"
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "load_toc_v4_PIC_2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (minus:SI (match_operand:SI 2 "immediate_operand" "s")
+ (match_operand:SI 3 "immediate_operand" "s")))))]
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
+ "{l|lwz} %0,%2-%3(%1)"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_v4_PIC_3b"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (high:SI
+ (minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
+ (match_operand:SI 3 "symbol_ref_operand" "s")))))]
+ "TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic"
+ "{cau|addis} %0,%1,%2-%3@ha")
+
+(define_insn "load_toc_v4_PIC_3c"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
+ (match_operand:SI 3 "symbol_ref_operand" "s"))))]
+ "TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic"
+ "{cal|addi} %0,%1,%2-%3@l")
+
+;; If the TOC is shared over a translation unit, as happens with all
+;; the kinds of PIC that we support, we need to restore the TOC
+;; pointer only when jumping over units of translation.
+;; On Darwin, we need to reload the picbase.
+
+(define_expand "builtin_setjmp_receiver"
+ [(use (label_ref (match_operand 0 "" "")))]
+ "(DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)"
+ "
+{
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ const char *picbase = machopic_function_base_name ();
+ rtx picrtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (picbase));
+ rtx picreg = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+ rtx tmplabrtx;
+ char tmplab[20];
+
+ ASM_GENERATE_INTERNAL_LABEL(tmplab, \"LSJR\",
+ CODE_LABEL_NUMBER (operands[0]));
+ tmplabrtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tmplab));
+
+ emit_insn (gen_load_macho_picbase (picreg, tmplabrtx));
+ emit_insn (gen_macho_correct_pic (picreg, picreg, picrtx, tmplabrtx));
+ }
+ else
+#endif
+ rs6000_emit_load_toc_table (FALSE);
+ DONE;
+}")
+
+;; Elf specific ways of loading addresses for non-PIC code.
+;; The output of this could be r0, but we make a very strong
+;; preference for a base register because it will usually
+;; be needed there.
+(define_insn "elf_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "{liu|lis} %0,%1@ha")
+
+(define_insn "elf_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "@
+ {cal|la} %0,%2@l(%1)
+ {ai|addic} %0,%1,%K2")
+
+;; A function pointer under AIX is a pointer to a data area whose first word
+;; contains the actual address of the function, whose second word contains a
+;; pointer to its TOC, and whose third word contains a value to place in the
+;; static chain register (r11). Note that if we load the static chain, our
+;; "trampoline" need not have any executable code.
+
+(define_expand "call_indirect_aix32"
+ [(set (match_dup 2)
+ (mem:SI (match_operand:SI 0 "gpc_reg_operand" "")))
+ (set (mem:SI (plus:SI (reg:SI 1) (const_int 20)))
+ (reg:SI 2))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (match_dup 0)
+ (const_int 4))))
+ (set (reg:SI 11)
+ (mem:SI (plus:SI (match_dup 0)
+ (const_int 8))))
+ (parallel [(call (mem:SI (match_dup 2))
+ (match_operand 1 "" ""))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (scratch:SI))])]
+ "TARGET_32BIT"
+ "
+{ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "call_indirect_aix64"
+ [(set (match_dup 2)
+ (mem:DI (match_operand:DI 0 "gpc_reg_operand" "")))
+ (set (mem:DI (plus:DI (reg:DI 1) (const_int 40)))
+ (reg:DI 2))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (match_dup 0)
+ (const_int 8))))
+ (set (reg:DI 11)
+ (mem:DI (plus:DI (match_dup 0)
+ (const_int 16))))
+ (parallel [(call (mem:SI (match_dup 2))
+ (match_operand 1 "" ""))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (scratch:SI))])]
+ "TARGET_64BIT"
+ "
+{ operands[2] = gen_reg_rtx (DImode); }")
+
+(define_expand "call_value_indirect_aix32"
+ [(set (match_dup 3)
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (set (mem:SI (plus:SI (reg:SI 1) (const_int 20)))
+ (reg:SI 2))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (match_dup 1)
+ (const_int 4))))
+ (set (reg:SI 11)
+ (mem:SI (plus:SI (match_dup 1)
+ (const_int 8))))
+ (parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_dup 3))
+ (match_operand 2 "" "")))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (scratch:SI))])]
+ "TARGET_32BIT"
+ "
+{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "call_value_indirect_aix64"
+ [(set (match_dup 3)
+ (mem:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (set (mem:DI (plus:DI (reg:DI 1) (const_int 40)))
+ (reg:DI 2))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (match_dup 1)
+ (const_int 8))))
+ (set (reg:DI 11)
+ (mem:DI (plus:DI (match_dup 1)
+ (const_int 16))))
+ (parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_dup 3))
+ (match_operand 2 "" "")))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (scratch:SI))])]
+ "TARGET_64BIT"
+ "
+{ operands[3] = gen_reg_rtx (DImode); }")
+
+;; Now the definitions for the call and call_value insns
+(define_expand "call"
+ [(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (scratch:SI))])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
+
+ operands[0] = XEXP (operands[0], 0);
+
+ if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
+ && flag_pic
+ && GET_CODE (operands[0]) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (operands[0]))
+ {
+ rtx call;
+ rtvec tmp;
+
+ tmp = gen_rtvec (3,
+ gen_rtx_CALL (VOIDmode,
+ gen_rtx_MEM (SImode, operands[0]),
+ operands[1]),
+ gen_rtx_USE (VOIDmode, operands[2]),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ call = emit_call_insn (gen_rtx_PARALLEL (VOIDmode, tmp));
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call), pic_offset_table_rtx);
+ DONE;
+ }
+
+ if (GET_CODE (operands[0]) != SYMBOL_REF
+ || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (operands[0]))
+ || (DEFAULT_ABI != ABI_DARWIN && (INTVAL (operands[2]) & CALL_LONG) != 0))
+ {
+ if (INTVAL (operands[2]) & CALL_LONG)
+ operands[0] = rs6000_longcall_ref (operands[0]);
+
+ switch (DEFAULT_ABI)
+ {
+ case ABI_V4:
+ case ABI_DARWIN:
+ operands[0] = force_reg (Pmode, operands[0]);
+ break;
+
+ case ABI_AIX:
+ /* AIX function pointers are really pointers to a three word
+ area. */
+ emit_call_insn (TARGET_32BIT
+ ? gen_call_indirect_aix32 (force_reg (SImode,
+ operands[0]),
+ operands[1])
+ : gen_call_indirect_aix64 (force_reg (DImode,
+ operands[0]),
+ operands[1]));
+ DONE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+}")
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand 1 "address_operand" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (scratch:SI))])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+
+ operands[1] = XEXP (operands[1], 0);
+
+ if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
+ && flag_pic
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (operands[1]))
+ {
+ rtx call;
+ rtvec tmp;
+
+ tmp = gen_rtvec (3,
+ gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_CALL (VOIDmode,
+ gen_rtx_MEM (SImode,
+ operands[1]),
+ operands[2])),
+ gen_rtx_USE (VOIDmode, operands[3]),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ call = emit_call_insn (gen_rtx_PARALLEL (VOIDmode, tmp));
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call), pic_offset_table_rtx);
+ DONE;
+ }
+
+ if (GET_CODE (operands[1]) != SYMBOL_REF
+ || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (operands[1]))
+ || (DEFAULT_ABI != ABI_DARWIN && (INTVAL (operands[3]) & CALL_LONG) != 0))
+ {
+ if (INTVAL (operands[3]) & CALL_LONG)
+ operands[1] = rs6000_longcall_ref (operands[1]);
+
+ switch (DEFAULT_ABI)
+ {
+ case ABI_V4:
+ case ABI_DARWIN:
+ operands[1] = force_reg (Pmode, operands[1]);
+ break;
+
+ case ABI_AIX:
+ /* AIX function pointers are really pointers to a three word
+ area. */
+ emit_call_insn (TARGET_32BIT
+ ? gen_call_value_indirect_aix32 (operands[0],
+ force_reg (SImode,
+ operands[1]),
+ operands[2])
+ : gen_call_value_indirect_aix64 (operands[0],
+ force_reg (DImode,
+ operands[1]),
+ operands[2]));
+ DONE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+}")
+
+;; Call to function in current module. No TOC pointer reload needed.
+;; Operand2 is nonzero if we are using the V.4 calling sequence and
+;; either the function was not prototyped, or it was prototyped as a
+;; variable argument function. It is > 0 if FP registers were passed
+;; and < 0 if they were not.
+
+(define_insn "*call_local32"
+ [(call (mem:SI (match_operand:SI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "(INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+/* APPLE LOCAL begin -mlongcall */
+#ifdef RS6000_LONG_BRANCH
+ if (!flag_pic)
+ return output_call(insn, operands, 0, 0);
+ else
+#endif
+/* APPLE LOCAL end -mlongcall */
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_local64"
+ [(call (mem:SI (match_operand:DI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "TARGET_64BIT && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_local32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "(INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@local\" : \"bl %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*call_value_local64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "TARGET_64BIT && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@local\" : \"bl %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+;; Call to function which may be in another module. Restore the TOC
+;; pointer (r2) after the call unless this is System V.
+;; Operand2 is nonzero if we are using the V.4 calling sequence and
+;; either the function was not prototyped, or it was prototyped as a
+;; variable argument function. It is > 0 if FP registers were passed
+;; and < 0 if they were not.
+
+(define_insn "*call_indirect_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (match_scratch:SI 2 "=l,l"))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T0l\;{l|lwz} 2,20(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "bl %z0\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "*call_indirect_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (match_scratch:SI 2 "=l,l"))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T0l\;ld 2,40(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "bl %z0\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_indirect_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T1l\;{l|lwz} 2,20(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 4 "=l"))]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "bl %z1\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_indirect_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T1l\;ld 2,40(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 4 "=l"))]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "bl %z1\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+;; A function pointer under System V is just a normal pointer
+;; operands[0] is the function pointer
+;; operands[1] is the stack size to clean up
+;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
+;; which indicates how to set cr1
+
+(define_insn "*call_indirect_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_DARWIN"
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ return "b%T0l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || (DEFAULT_ABI == ABI_V4
+ && (INTVAL (operands[2]) & CALL_LONG) == 0))"
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ /* APPLE LOCAL begin ObjC direct dispatch. */
+ /* Generate 'bla' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ sprintf (buf, \"bla \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch. */
+
+#if TARGET_MACHO
+ return output_call(insn, operands, 0, 2);
+#else
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ /* The magic 32768 offset here and in the other sysv call insns
+ corresponds to the offset of r30 in .got2, as given by LCTOC1.
+ See sysv4.h:toc_section. */
+ return "bl %z0+32768@plt";
+ else
+ return "bl %z0@plt";
+ }
+ else
+ return "bl %z0";
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_indirect_nonlocal_sysv<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_DARWIN"
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ return "b%T1l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_value_nonlocal_sysv<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || (DEFAULT_ABI == ABI_V4
+ && (INTVAL (operands[3]) & CALL_LONG) == 0))"
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ /* APPLE LOCAL begin ObjC direct dispatch. */
+ /* Generate 'bla' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ sprintf (buf, \"bla \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch. */
+
+#if TARGET_MACHO
+ return output_call(insn, operands, 1, 3);
+#else
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return "bl %z1+32768@plt";
+ else
+ return "bl %z1@plt";
+ }
+ else
+ return "bl %z1";
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+;; Call subroutine returning any type.
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, const0_rtx, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; APPLE LOCAL begin sibcall patterns
+;; APPLE MERGE modify FSF patterns below instead?
+;; this and similar patterns must be marked as using LR, otherwise
+;; dataflow will try to delete the store into it. This is true
+;; even when the actual reg to jump to is in CTR, when LR was
+;; saved and restored around the PIC-setting BCL.
+(define_insn "*sibcall_symbolic"
+ [(call (mem:SI (match_operand:SI 0 "call_operand" "s,c"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ /* APPLE LOCAL begin ObjC direct dispatch */
+ /* Generate 'ba' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ if (which_alternative != 0)
+ abort ();
+ sprintf (buf, \"ba \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch */
+
+ switch (which_alternative)
+ {
+ case 0: return \"b %z0\";
+ case 1: return \"b%T0\";
+ default: abort();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbolic"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand" "s,c"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "" ""))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ /* APPLE LOCAL begin ObjC direct dispatch */
+ /* Generate 'ba' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ if (which_alternative != 0)
+ abort ();
+ sprintf (buf, \"ba \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch */
+
+ switch (which_alternative)
+ {
+ case 0: return \"b %z1\";
+ case 1: return \"b%T1\";
+ default: abort();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "longjump"
+ [(parallel
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))
+ (use (match_operand 1 "" ""))])]
+ ""
+ "jmp %z1,%l0"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+;; APPLE LOCAL end sibcall patterns
+
+;; sibling call patterns
+(define_expand "sibcall"
+ [(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (return)])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
+
+ operands[0] = XEXP (operands[0], 0);
+ operands[3] = gen_reg_rtx (SImode);
+
+}")
+
+;; this and similar patterns must be marked as using LR, otherwise
+;; dataflow will try to delete the store into it. This is true
+;; even when the actual reg to jump to is in CTR, when LR was
+;; saved and restored around the PIC-setting BCL.
+(define_insn "*sibcall_local32"
+ [(call (mem:SI (match_operand:SI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "(INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z0@local\" : \"b %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_local64"
+ [(call (mem:SI (match_operand:DI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "TARGET_64BIT && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z0@local\" : \"b %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_value_local32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "(INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z1@local\" : \"b %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*sibcall_value_local64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "TARGET_64BIT && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z1@local\" : \"b %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (use (match_operand:SI 3 "register_operand" "l"))
+ (return)]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "b %z0"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (use (match_operand:SI 3 "register_operand" "l"))
+ (return)]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "b %z0"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (use (match_operand:SI 4 "register_operand" "l"))
+ (return)]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "b %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (use (match_operand:SI 4 "register_operand" "l"))
+ (return)]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "b %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "immediate_operand" "O,n"))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || DEFAULT_ABI == ABI_V4)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return \"b %z0+32768@plt\";
+ else
+ return \"b %z0@plt\";
+ }
+ else
+ return \"b %z0\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "address_operand" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (use (match_operand 4 "" ""))
+ (return)])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+
+ operands[1] = XEXP (operands[1], 0);
+ operands[4] = gen_reg_rtx (SImode);
+
+}")
+
+(define_insn "*sibcall_value_nonlocal_sysv<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || DEFAULT_ABI == ABI_V4)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return \"b %z1+32768@plt\";
+ else
+ return \"b %z1@plt\";
+ }
+ else
+ return \"b %z1\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_expand "sibcall_epilogue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_epilogue (TRUE);
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCK)]
+ ""
+ "")
+
+;; Compare insns are next. Note that the RS/6000 has two types of compares,
+;; signed & unsigned, and one type of branch.
+;;
+;; Start with the DEFINE_EXPANDs to generate the rtl for compares, scc
+;; insns, and branches. We store the operands of compares until we see
+;; how it is used.
+(define_expand "cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:GPR 0 "gpc_reg_operand" "")
+ (match_operand:GPR 1 "reg_or_short_operand" "")))]
+ ""
+ "
+{
+ /* Take care of the possibility that operands[1] might be negative but
+ this might be a logical operation. That insn doesn't exist. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 0;
+ DONE;
+}")
+
+(define_expand "cmp<mode>"
+ [(set (cc0) (compare (match_operand:FP 0 "gpc_reg_operand" "")
+ (match_operand:FP 1 "gpc_reg_operand" "")))]
+ ""
+ "
+{
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "beq"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (EQ, operands[0]); DONE; }")
+
+(define_expand "bne"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (NE, operands[0]); DONE; }")
+
+(define_expand "bge"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GE, operands[0]); DONE; }")
+
+(define_expand "bgt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GT, operands[0]); DONE; }")
+
+(define_expand "ble"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LE, operands[0]); DONE; }")
+
+(define_expand "blt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LT, operands[0]); DONE; }")
+
+(define_expand "bgeu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GEU, operands[0]); DONE; }")
+
+(define_expand "bgtu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GTU, operands[0]); DONE; }")
+
+(define_expand "bleu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LEU, operands[0]); DONE; }")
+
+(define_expand "bltu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LTU, operands[0]); DONE; }")
+
+(define_expand "bunordered"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNORDERED, operands[0]); DONE; }")
+
+(define_expand "bordered"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (ORDERED, operands[0]); DONE; }")
+
+(define_expand "buneq"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNEQ, operands[0]); DONE; }")
+
+(define_expand "bunge"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNGE, operands[0]); DONE; }")
+
+(define_expand "bungt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNGT, operands[0]); DONE; }")
+
+(define_expand "bunle"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNLE, operands[0]); DONE; }")
+
+(define_expand "bunlt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNLT, operands[0]); DONE; }")
+
+(define_expand "bltgt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LTGT, operands[0]); DONE; }")
+
+;; For SNE, we would prefer that the xor/abs sequence be used for integers.
+;; For SEQ, likewise, except that comparisons with zero should be done
+;; with an scc insns. However, due to the order that combine see the
+;; resulting insns, we must, in fact, allow SEQ for integers. Fail in
+;; the cases we don't want to handle.
+(define_expand "seq"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (EQ, operands[0]); DONE; }")
+
+(define_expand "sne"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p)
+ FAIL;
+
+ rs6000_emit_sCOND (NE, operands[0]);
+ DONE;
+}")
+
+;; A >= 0 is best done the portable way for A an integer.
+(define_expand "sge"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (GE, operands[0]);
+ DONE;
+}")
+
+;; A > 0 is best done using the portable sequence, so fail in that case.
+(define_expand "sgt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (GT, operands[0]);
+ DONE;
+}")
+
+;; A <= 0 is best done the portable way for A an integer.
+(define_expand "sle"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (LE, operands[0]);
+ DONE;
+}")
+
+;; A < 0 is best done in the portable way for A an integer.
+(define_expand "slt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (LT, operands[0]);
+ DONE;
+}")
+
+(define_expand "sgeu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (GEU, operands[0]); DONE; }")
+
+(define_expand "sgtu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (GTU, operands[0]); DONE; }")
+
+(define_expand "sleu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LEU, operands[0]); DONE; }")
+
+(define_expand "sltu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LTU, operands[0]); DONE; }")
+
+(define_expand "sunordered"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNORDERED, operands[0]); DONE; }")
+
+(define_expand "sordered"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && TARGET_E500 && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (ORDERED, operands[0]); DONE; }")
+
+(define_expand "suneq"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (UNEQ, operands[0]); DONE; }")
+
+(define_expand "sunge"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (UNGE, operands[0]); DONE; }")
+
+(define_expand "sungt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (UNGT, operands[0]); DONE; }")
+
+(define_expand "sunle"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (UNLE, operands[0]); DONE; }")
+
+(define_expand "sunlt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (UNLT, operands[0]); DONE; }")
+
+(define_expand "sltgt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LTGT, operands[0]); DONE; }")
+
+(define_expand "stack_protect_set"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
+ rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+#endif
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_setdi (operands[0], operands[1]));
+ else
+ emit_insn (gen_stack_protect_setsi (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "stack_protect_setsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:SI 2 "=&r") (const_int 0))]
+ "TARGET_32BIT"
+ "{l%U1%X1|lwz%U1%X1} %2,%1\;{st%U0%X0|stw%U0%X0} %2,%0\;{lil|li} %2,0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "stack_protect_setdi"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:DI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:DI 2 "=&r") (const_int 0))]
+ "TARGET_64BIT"
+ "ld%U1%X1 %2,%1\;std%U0%X0 %2,%0\;{lil|li} %2,0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_expand "stack_protect_test"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
+ rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+#endif
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, operands[1]),
+ UNSPEC_SP_TEST);
+ rs6000_compare_fp_p = 0;
+ emit_jump_insn (gen_beq (operands[2]));
+ DONE;
+})
+
+(define_insn "stack_protect_testsi"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
+ (unspec:CCEQ [(match_operand:SI 1 "memory_operand" "m,m")
+ (match_operand:SI 2 "memory_operand" "m,m")]
+ UNSPEC_SP_TEST))
+ (set (match_scratch:SI 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {l%U1%X1|lwz%U1%X1} %3,%1\;{l%U2%X2|lwz%U2%X2} %4,%2\;xor. %3,%3,%4\;{lil|li} %4,0
+ {l%U1%X1|lwz%U1%X1} %3,%1\;{l%U2%X2|lwz%U2%X2} %4,%2\;{cmpl|cmplw} %0,%3,%4\;{lil|li} %3,0\;{lil|li} %4,0"
+ [(set_attr "length" "16,20")])
+
+(define_insn "stack_protect_testdi"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
+ (unspec:CCEQ [(match_operand:DI 1 "memory_operand" "m,m")
+ (match_operand:DI 2 "memory_operand" "m,m")]
+ UNSPEC_SP_TEST))
+ (set (match_scratch:DI 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ ld%U1%X1 %3,%1\;ld%U2%X2 %4,%2\;xor. %3,%3,%4\;{lil|li} %4,0
+ ld%U1%X1 %3,%1\;ld%U2%X2 %4,%2\;cmpld %0,%3,%4\;{lil|li} %3,0\;{lil|li} %4,0"
+ [(set_attr "length" "16,20")])
+
+
+;; Here are the actual compare insns.
+(define_insn "*cmp<mode>_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "reg_or_short_operand" "rI")))]
+ ""
+ "{cmp%I2|cmp<wd>%I2} %0,%1,%2"
+ [(set_attr "type" "cmp")])
+
+;; If we are comparing a register for equality with a large constant,
+;; we can do this with an XOR followed by a compare. But this is profitable
+;; only if the large constant is only used for the comparison (and in this
+;; case we already have a register to reuse as scratch).
+;;
+;; For 64-bit registers, we could only do so if the constant's bit 15 is clear:
+;; otherwise we'd need to XOR with FFFFFFFF????0000 which is not available.
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "logical_const_operand" ""))
+ (set (match_dup 0) (match_operator:SI 3 "boolean_or_operator"
+ [(match_dup 0)
+ (match_operand:SI 2 "logical_const_operand" "")]))
+ (set (match_operand:CC 4 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 5 "gpc_reg_operand" "")
+ (match_dup 0)))
+ (set (pc)
+ (if_then_else (match_operator 6 "equality_operator"
+ [(match_dup 4) (const_int 0)])
+ (match_operand 7 "" "")
+ (match_operand 8 "" "")))]
+ "peep2_reg_dead_p (3, operands[0])
+ && peep2_reg_dead_p (4, operands[4])"
+ [(set (match_dup 0) (xor:SI (match_dup 5) (match_dup 9)))
+ (set (match_dup 4) (compare:CC (match_dup 0) (match_dup 10)))
+ (set (pc) (if_then_else (match_dup 6) (match_dup 7) (match_dup 8)))]
+
+{
+ /* Get the constant we are comparing against, and see what it looks like
+ when sign-extended from 16 to 32 bits. Then see what constant we could
+ XOR with SEXTC to get the sign-extended value. */
+ rtx cnst = simplify_const_binary_operation (GET_CODE (operands[3]),
+ SImode,
+ operands[1], operands[2]);
+ HOST_WIDE_INT c = INTVAL (cnst);
+ HOST_WIDE_INT sextc = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT xorv = c ^ sextc;
+
+ operands[9] = GEN_INT (xorv);
+ operands[10] = GEN_INT (sextc);
+})
+
+(define_insn "*cmpsi_internal2"
+ [(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_u_short_operand" "rK")))]
+ ""
+ "{cmpl%I2|cmplw%I2} %0,%1,%b2"
+ [(set_attr "type" "cmp")])
+
+(define_insn "*cmpdi_internal2"
+ [(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_u_short_operand" "rK")))]
+ ""
+ "cmpld%I2 %0,%1,%b2"
+ [(set_attr "type" "cmp")])
+
+;; The following two insns don't exist as single insns, but if we provide
+;; them, we can swap an add and compare, which will enable us to overlap more
+;; of the required delay between a compare and branch. We generate code for
+;; them by splitting.
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "short_cint_operand" "i")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "i")))]
+ ""
+ "#"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CCUNS 3 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "u_short_cint_operand" "i")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "i")))]
+ ""
+ "#"
+ [(set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "")))]
+ ""
+ [(set (match_dup 3) (compare:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 4)))])
+
+(define_split
+ [(set (match_operand:CCUNS 3 "cc_reg_operand" "")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "u_short_cint_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "")))]
+ ""
+ [(set (match_dup 3) (compare:CCUNS (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 4)))])
+
+(define_insn "*cmpsf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "*cmpdf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+;; Only need to compare second words if first words equal
+(define_insn "*cmptf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
+ (match_operand:TF 2 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD && !TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "fcmpu %0,%1,%2\;bne %0,$+8\;fcmpu %0,%L1,%L2"
+ [(set_attr "type" "fpcompare")
+ (set_attr "length" "12")])
+
+(define_insn_and_split "*cmptf_internal2"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
+ (match_operand:TF 2 "gpc_reg_operand" "f")))
+ (clobber (match_scratch:DF 3 "=f"))
+ (clobber (match_scratch:DF 4 "=f"))
+ (clobber (match_scratch:DF 5 "=f"))
+ (clobber (match_scratch:DF 6 "=f"))
+ (clobber (match_scratch:DF 7 "=f"))
+ (clobber (match_scratch:DF 8 "=f"))
+ (clobber (match_scratch:DF 9 "=f"))
+ (clobber (match_scratch:DF 10 "=f"))]
+ "!TARGET_IEEEQUAD && TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 13))
+ (set (match_dup 4) (match_dup 14))
+ (set (match_dup 9) (abs:DF (match_dup 5)))
+ (set (match_dup 0) (compare:CCFP (match_dup 9) (match_dup 3)))
+ (set (pc) (if_then_else (ne (match_dup 0) (const_int 0))
+ (label_ref (match_dup 11))
+ (pc)))
+ (set (match_dup 0) (compare:CCFP (match_dup 5) (match_dup 7)))
+ (set (pc) (label_ref (match_dup 12)))
+ (match_dup 11)
+ (set (match_dup 10) (minus:DF (match_dup 5) (match_dup 7)))
+ (set (match_dup 9) (minus:DF (match_dup 6) (match_dup 8)))
+ (set (match_dup 9) (plus:DF (match_dup 10) (match_dup 9)))
+ (set (match_dup 0) (compare:CCFP (match_dup 7) (match_dup 4)))
+ (match_dup 12)]
+{
+ REAL_VALUE_TYPE rv;
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+
+ operands[5] = simplify_gen_subreg (DFmode, operands[1], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[1], TFmode, lo_word);
+ operands[7] = simplify_gen_subreg (DFmode, operands[2], TFmode, hi_word);
+ operands[8] = simplify_gen_subreg (DFmode, operands[2], TFmode, lo_word);
+ operands[11] = gen_label_rtx ();
+ operands[12] = gen_label_rtx ();
+ real_inf (&rv);
+ operands[13] = force_const_mem (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (rv, DFmode));
+ operands[14] = force_const_mem (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (dconst0,
+ DFmode));
+ if (TARGET_TOC)
+ {
+ operands[13] = gen_const_mem (DFmode,
+ create_TOC_reference (XEXP (operands[13], 0)));
+ operands[14] = gen_const_mem (DFmode,
+ create_TOC_reference (XEXP (operands[14], 0)));
+ set_mem_alias_set (operands[13], get_TOC_alias_set ());
+ set_mem_alias_set (operands[14], get_TOC_alias_set ());
+ }
+})
+
+;; Now we have the scc insns. We can do some combinations because of the
+;; way the machine works.
+;;
+;; Note that this is probably faster if we can put an insn between the
+;; mfcr and rlinm, but this is tricky. Let's leave it for now. In most
+;; cases the insns below which don't use an intermediate CR field will
+;; be used instead.
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ ""
+ "mfcr %0%Q2\;{rlinm|rlwinm} %0,%0,%J1,1"
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "8")])
+
+;; Same as above, but get the GT bit.
+(define_insn "move_from_CR_gt_bit"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_GT))]
+ "TARGET_E500"
+ "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,31,31"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "8")])
+
+;; Same as above, but get the OV/ORDERED bit.
+(define_insn "move_from_CR_ov_bit"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_OV))]
+ "TARGET_ISEL"
+ "mfcr %0\;{rlinm|rlwinm} %0,%0,%t1,1"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "TARGET_POWERPC64"
+ "mfcr %0%Q2\;{rlinm|rlwinm} %0,%0,%J1,1"
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (const_int 0)))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+ "TARGET_32BIT"
+ "@
+ mfcr %3%Q2\;{rlinm.|rlwinm.} %3,%3,%J1,1
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "8,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "")
+ (const_int 0)])
+ (const_int 0)))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ int is_bit = ccr_bit (operands[1], 1);
+ int put_bit = 31 - (INTVAL (operands[3]) & 31);
+ int count;
+
+ if (is_bit >= put_bit)
+ count = is_bit - put_bit;
+ else
+ count = 32 - (put_bit - is_bit);
+
+ operands[4] = GEN_INT (count);
+ operands[5] = GEN_INT (put_bit);
+
+ return \"mfcr %0%Q2\;{rlinm|rlwinm} %0,%0,%4,%5,%5\";
+}"
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 4 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))]
+ ""
+ "*
+{
+ int is_bit = ccr_bit (operands[1], 1);
+ int put_bit = 31 - (INTVAL (operands[3]) & 31);
+ int count;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ if (is_bit >= put_bit)
+ count = is_bit - put_bit;
+ else
+ count = 32 - (put_bit - is_bit);
+
+ operands[5] = GEN_INT (count);
+ operands[6] = GEN_INT (put_bit);
+
+ return \"mfcr %4%Q2\;{rlinm.|rlwinm.} %4,%4,%5,%6,%6\";
+}"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "8,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))]
+ "reload_completed"
+ [(set (match_dup 4)
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+;; There is a 3 cycle delay between consecutive mfcr instructions
+;; so it is useful to combine 2 scc instructions to use only one mfcr.
+
+(define_peephole
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=r")
+ (match_operator:SI 4 "scc_comparison_operator"
+ [(match_operand 5 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "REGNO (operands[2]) != REGNO (operands[5])"
+ "mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "12")])
+
+(define_peephole
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))
+ (set (match_operand:DI 3 "gpc_reg_operand" "=r")
+ (match_operator:DI 4 "scc_comparison_operator"
+ [(match_operand 5 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "TARGET_POWERPC64 && REGNO (operands[2]) != REGNO (operands[5])"
+ "mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "12")])
+
+;; There are some scc insns that can be done directly, without a compare.
+;; These are faster because they don't involve the communications between
+;; the FXU and branch units. In fact, we will be replacing all of the
+;; integer scc insns here or in the portable methods in emit_store_flag.
+;;
+;; Also support (neg (scc ..)) since that construct is used to replace
+;; branches, (plus (scc ..) ..) since that construct is common and
+;; takes no more insns than scc, and (and (neg (scc ..)) ..) in the
+;; cases where it is no more expensive than (neg (scc ..)).
+
+;; Have reload force a constant into a register for the simple insns that
+;; otherwise won't accept constants. We do this because it is faster than
+;; the cmp/mfcr sequence we would otherwise generate.
+
+(define_mode_attr scc_eq_op2 [(SI "rKLI")
+ (DI "rKJI")])
+
+(define_insn_and_split "*eq<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))]
+ "!TARGET_POWER"
+ "#"
+ "!TARGET_POWER"
+ [(set (match_dup 0)
+ (clz:GPR (match_dup 3)))
+ (set (match_dup 0)
+ (lshiftrt:GPR (match_dup 0) (match_dup 4)))]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[3] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[3] = operands[1];
+
+ operands[4] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
+ })
+
+(define_insn_and_split "*eq<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=y")
+ (compare:CC
+ (eq:P (match_operand:P 1 "gpc_reg_operand" "=r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (eq:P (match_dup 1) (match_dup 2)))]
+ "!TARGET_POWER && optimize_size"
+ "#"
+ "!TARGET_POWER && optimize_size"
+ [(set (match_dup 0)
+ (clz:P (match_dup 4)))
+ (parallel [(set (match_dup 3)
+ (compare:CC (lshiftrt:P (match_dup 0) (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 0)
+ (lshiftrt:P (match_dup 0) (match_dup 5)))])]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[4] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[4] = operands[1];
+
+ operands[5] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
+ })
+
+(define_insn "*eqsi_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I")))
+ (clobber (match_scratch:SI 3 "=r,&r,r,r,r"))]
+ "TARGET_POWER"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {sfi|subfic} %3,%1,0\;{ae|adde} %0,%3,%1
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0"
+ [(set_attr "type" "three,two,three,three,three")
+ (set_attr "length" "12,8,12,12,12")])
+
+;; We have insns of the form shown by the first define_insn below. If
+;; there is something inside the comparison operation, we must split it.
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "" "")
+ (match_operand:SI 3
+ "reg_or_cint_operand" "")])
+ (match_operand:SI 4 "gpc_reg_operand" "")))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "! gpc_reg_operand (operands[2], SImode)"
+ [(set (match_dup 5) (match_dup 2))
+ (set (match_dup 2) (plus:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])
+ (match_dup 4)))])
+
+(define_insn "*plus_eqsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r")
+ (plus:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r")))]
+ "TARGET_32BIT"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {sfi|subfic} %0,%1,0\;{aze|addze} %0,%3
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3"
+ [(set_attr "type" "three,two,three,three,three")
+ (set_attr "length" "12,8,12,12,12")])
+
+(define_insn "*compare_plus_eqsi"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r"))]
+ "TARGET_32BIT && optimize_size"
+ "@
+ xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {sfi|subfic} %4,%1,0\;{aze.|addze.} %4,%3
+ {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "scc_eq_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && optimize_size && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (eq:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_eqsi_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && optimize_size"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %0,%1,0\;{aze.|addze.} %0,%3
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "scc_eq_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && optimize_size && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*neg_eq0<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (const_int 0))))]
+ ""
+ "{ai|addic} %0,%1,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*neg_eq<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "%r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (eq:P (match_dup 3) (const_int 0))))]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[3] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[3] = operands[1];
+ })
+
+;; Simplify (ne X (const_int 0)) on the PowerPC. No need to on the Power,
+;; since it nabs/sr is just as fast.
+(define_insn "*ne0si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (lshiftrt:SI (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (const_int 31)))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "! TARGET_POWER && TARGET_32BIT && !TARGET_ISEL"
+ "{ai|addic} %2,%1,-1\;{sfe|subfe} %0,%2,%1"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*ne0di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (lshiftrt:DI (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
+ (const_int 63)))
+ (clobber (match_scratch:DI 2 "=&r"))]
+ "TARGET_64BIT"
+ "addic %2,%1,-1\;subfe %0,%2,%1"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+;; This is what (plus (ne X (const_int 0)) Y) looks like.
+(define_insn "*plus_ne0si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "TARGET_32BIT"
+ "{ai|addic} %3,%1,-1\;{aze|addze} %0,%2"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*plus_ne0di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ "TARGET_64BIT"
+ "addic %3,%1,-1\;addze %0,%2"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*compare_plus_ne0si"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))
+ (clobber (match_scratch:SI 4 "=X,&r"))]
+ "TARGET_32BIT"
+ "@
+ {ai|addic} %3,%1,-1\;{aze.|addze.} %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1)))
+ (const_int 31))
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*compare_plus_ne0di"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ addic %3,%1,-1\;addze. %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1)))
+ (const_int 63))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_ne0si_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {ai|addic} %3,%1,-1\;{aze.|addze.} %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_ne0di_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ addic %3,%1,-1\;addze. %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O")))
+ (clobber (match_scratch:SI 3 "=r,X"))]
+ "TARGET_POWER"
+ "@
+ doz %3,%2,%1\;{sfi|subfic} %0,%3,0\;{ae|adde} %0,%0,%3
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,X,r,X"))]
+ "TARGET_POWER"
+ "@
+ doz %3,%2,%1\;{sfi|subfic} %0,%3,0\;{ae.|adde.} %0,%0,%3
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{sri.|srwi.} %0,%0,31
+ #
+ #"
+ [(set_attr "type" "compare,delayed_compare,compare,delayed_compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {srai|srawi} %0,%1,31\;{sf|subfc} %0,%1,%0\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {srai|srawi} %4,%1,31\;{sf|subfc} %4,%1,%4\;{aze.|addze.} %4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (le:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {srai|srawi} %0,%1,31\;{sf|subfc} %0,%1,%0\;{aze.|addze.} %0,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (neg:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O"))))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn "*leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*leu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (leu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (leu:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (leu:P (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))
+ (match_operand:P 3 "gpc_reg_operand" "r")))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{aze|addze} %0,%3"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*neg_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*and_neg_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (and:P (neg:P
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))
+ (match_operand:P 3 "gpc_reg_operand" "r")))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;andc. %0,%3,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;nabs %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;nabs %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (lt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{ai|addic} %4,%4,-1\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;nabs %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn_and_split "*ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (neg:P (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*ltu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (ltu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 3)
+ (compare:CC (neg:P (match_dup 0)) (const_int 0)))
+ (set (match_dup 0) (neg:P (match_dup 0)))])]
+ "")
+
+(define_insn_and_split "*plus_ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,r")
+ (plus:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:P 3 "reg_or_short_operand" "rI,rI")))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*plus_ltu<mode>_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:P (ltu:P (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 4)
+ (compare:CC (minus:P (match_dup 3) (match_dup 0))
+ (const_int 0)))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))])]
+ "")
+
+(define_insn "*neg_ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))
+ (clobber (match_scratch:SI 3 "=r"))]
+ "TARGET_POWER"
+ "doz%I2 %3,%1,%2\;{sfi|subfic} %0,%3,0\;{ae|adde} %0,%0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %3,%1,%2\;{sfi|subfic} %0,%3,0\;{ae.|adde.} %0,%0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn "*geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*geu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (geu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_neg_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (geu:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (geu:P (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,&r")
+ (plus:P (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r")))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{aze|addze} %0,%3
+ {ai|addic} %0,%1,%n2\;{aze|addze} %0,%3"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %4,%2,%1\;{aze.|addze.} %4,%3
+ {ai|addic} %4,%1,%n2\;{aze.|addze.} %4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,8,12,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %0,%2,%1\;{aze.|addze.} %0,%3
+ {ai|addic} %0,%1,%n2\;{aze.|addze.} %0,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,8,12,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*neg_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I"))))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0
+ {sfi|subfic} %0,%1,-1\;{a%I2|add%I2c} %0,%0,%2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*and_neg_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,&r")
+ (and:P (neg:P
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))
+ (match_operand:P 3 "gpc_reg_operand" "r,r")))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;andc. %0,%3,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;andc. %0,%3,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r")))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;nabs %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;nabs %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (gt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_gt0<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (gt:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (const_int 0))
+ (match_operand:P 2 "gpc_reg_operand" "r")))]
+ ""
+ "{a|addc} %0,%1,%1\;{sfe|subfe} %0,%1,%0\;{aze|addze} %0,%2"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {a|addc} %3,%1,%1\;{sfe|subfe} %3,%1,%3\;{aze.|addze.} %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (plus:SI (gt:SI (match_dup 1) (const_int 0))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ addc %3,%1,%1\;subfe %3,%1,%3\;addze. %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (gt:DI (match_dup 1) (const_int 0))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_32BIT"
+ "@
+ {a|addc} %0,%1,%1\;{sfe|subfe} %0,%1,%0\;{aze.|addze.} %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ addc %0,%1,%1\;subfe %0,%1,%0\;addze. %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{ai|addic} %4,%4,-1\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r"))))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;nabs %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn_and_split "*gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (neg:P (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*gtu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gtu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (gtu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 3)
+ (compare:CC (neg:P (match_dup 0)) (const_int 0)))
+ (set (match_dup 0) (neg:P (match_dup 0)))])]
+ "")
+
+(define_insn_and_split "*plus_gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))
+ (match_operand:P 3 "reg_or_short_operand" "rI")))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*plus_gtu<mode>_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:P (gtu:P (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 4)
+ (compare:CC (minus:P (match_dup 3) (match_dup 0))
+ (const_int 0)))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))])]
+ "")
+
+(define_insn "*neg_gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+
+;; Define both directions of branch and return. If we need a reload
+;; register, we'd rather use CR0 since it is much easier to copy a
+;; register CC value to there.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], \"%l0\", 0, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_comparison_operator"
+ [(match_operand 1
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (return)
+ (pc)))]
+ "direct_return ()"
+ "*
+{
+ return output_cbranch (operands[0], NULL, 0, insn);
+}"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], \"%l0\", 1, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_comparison_operator"
+ [(match_operand 1
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (pc)
+ (return)))]
+ "direct_return ()"
+ "*
+{
+ return output_cbranch (operands[0], NULL, 1, insn);
+}"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "4")])
+
+;; Logic on condition register values.
+
+; This pattern matches things like
+; (set (reg:CCEQ 68) (compare:CCEQ (ior:SI (gt:SI (reg:CCFP 68) (const_int 0))
+; (eq:SI (reg:CCFP 68) (const_int 0)))
+; (const_int 1)))
+; which are generated by the branch logic.
+; Prefer destructive operations where BT = BB (for crXX BT,BA,BB)
+
+(define_insn "*cceq_ior_compare"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
+ (compare:CCEQ (match_operator:SI 1 "boolean_operator"
+ [(match_operator:SI 2
+ "branch_positive_comparison_operator"
+ [(match_operand 3
+ "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (match_operator:SI 4
+ "branch_positive_comparison_operator"
+ [(match_operand 5
+ "cc_reg_operand" "0,y")
+ (const_int 0)])])
+ (const_int 1)))]
+ ""
+ "cr%q1 %E0,%j2,%j4"
+ [(set_attr "type" "cr_logical,delayed_cr")])
+
+; Why is the constant -1 here, but 1 in the previous pattern?
+; Because ~1 has all but the low bit set.
+(define_insn ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
+ (compare:CCEQ (match_operator:SI 1 "boolean_or_operator"
+ [(not:SI (match_operator:SI 2
+ "branch_positive_comparison_operator"
+ [(match_operand 3
+ "cc_reg_operand" "y,y")
+ (const_int 0)]))
+ (match_operator:SI 4
+ "branch_positive_comparison_operator"
+ [(match_operand 5
+ "cc_reg_operand" "0,y")
+ (const_int 0)])])
+ (const_int -1)))]
+ ""
+ "cr%q1 %E0,%j2,%j4"
+ [(set_attr "type" "cr_logical,delayed_cr")])
+
+(define_insn "*cceq_rev_compare"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
+ (compare:CCEQ (match_operator:SI 1
+ "branch_positive_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "0,y")
+ (const_int 0)])
+ (const_int 0)))]
+ ""
+ "{crnor %E0,%j1,%j1|crnot %E0,%j1}"
+ [(set_attr "type" "cr_logical,delayed_cr")])
+
+;; If we are comparing the result of two comparisons, this can be done
+;; using creqv or crxor.
+
+(define_insn_and_split ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
+ (compare:CCEQ (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operator 3 "branch_comparison_operator"
+ [(match_operand 4 "cc_reg_operand" "y")
+ (const_int 0)])))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (compare:CCEQ (xor:SI (match_dup 1) (match_dup 3))
+ (match_dup 5)))]
+ "
+{
+ int positive_1, positive_2;
+
+ positive_1 = branch_positive_comparison_operator (operands[1],
+ GET_MODE (operands[1]));
+ positive_2 = branch_positive_comparison_operator (operands[3],
+ GET_MODE (operands[3]));
+
+ if (! positive_1)
+ operands[1] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[2]),
+ GET_CODE (operands[1])),
+ SImode,
+ operands[2], const0_rtx);
+ else if (GET_MODE (operands[1]) != SImode)
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]), SImode,
+ operands[2], const0_rtx);
+
+ if (! positive_2)
+ operands[3] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[4]),
+ GET_CODE (operands[3])),
+ SImode,
+ operands[4], const0_rtx);
+ else if (GET_MODE (operands[3]) != SImode)
+ operands[3] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[4], const0_rtx);
+
+ if (positive_1 == positive_2)
+ {
+ operands[1] = gen_rtx_NOT (SImode, operands[1]);
+ operands[5] = constm1_rtx;
+ }
+ else
+ {
+ operands[5] = const1_rtx;
+ }
+}")
+
+;; Unconditional branch and return.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "b %l0"
+ [(set_attr "type" "branch")])
+
+/* APPLE LOCAL begin CW asm blocks */
+(define_insn "return"
+ [(return)]
+ "direct_return ()"
+ "*
+{
+ if (cfun->iasm_noreturn)
+ return \";{br|blr}\";
+ else
+ return \"{br|blr}\";
+}"
+ [(set_attr "type" "jmpreg")])
+/* APPLE LOCAL end CW asm blocks */
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand" ""))])
+
+(define_insn "*indirect_jump<mode>"
+ [(set (pc) (match_operand:P 0 "register_operand" "c,*l"))]
+ ""
+ "@
+ bctr
+ {br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+;; Table jump for switch statements:
+(define_expand "tablejump"
+ [(use (match_operand 0 "" ""))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "
+{
+ if (TARGET_32BIT)
+ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
+ else
+ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_expand "tablejumpsi"
+ [(set (match_dup 3)
+ (plus:SI (match_operand:SI 0 "" "")
+ (match_dup 2)))
+ (parallel [(set (pc) (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_32BIT"
+ "
+{ operands[0] = force_reg (SImode, operands[0]);
+ operands[2] = force_reg (SImode, gen_rtx_LABEL_REF (SImode, operands[1]));
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "tablejumpdi"
+ [(set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 0 "lwa_operand" "rm")))
+ (set (match_dup 3)
+ (plus:DI (match_dup 4)
+ (match_dup 2)))
+ (parallel [(set (pc) (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_64BIT"
+ "
+{ operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (DImode, operands[1]));
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+}")
+
+(define_insn "*tablejump<mode>_internal1"
+ [(set (pc)
+ (match_operand:P 0 "register_operand" "c,*l"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "@
+ bctr
+ {br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "{cror 0,0,0|nop}")
+
+;; Define the subtract-one-and-jump insns, starting with the template
+;; so loop.c knows what to generate.
+
+(define_expand "doloop_end"
+ [(use (match_operand 0 "" "")) ; loop pseudo
+ (use (match_operand 1 "" "")) ; iterations; zero if unknown
+ (use (match_operand 2 "" "")) ; max iterations
+ (use (match_operand 3 "" "")) ; loop level
+ (use (match_operand 4 "" ""))] ; label
+ ""
+ "
+{
+ /* Only use this on innermost loops. */
+ if (INTVAL (operands[3]) > 1)
+ FAIL;
+ if (TARGET_64BIT)
+ {
+ if (GET_MODE (operands[0]) != DImode)
+ FAIL;
+ emit_jump_insn (gen_ctrdi (operands[0], operands[4]));
+ }
+ else
+ {
+ if (GET_MODE (operands[0]) != SImode)
+ FAIL;
+ emit_jump_insn (gen_ctrsi (operands[0], operands[4]));
+ }
+ DONE;
+}")
+
+(define_expand "ctr<mode>"
+ [(parallel [(set (pc)
+ (if_then_else (ne (match_operand:P 0 "register_operand" "")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:P (match_dup 0)
+ (const_int -1)))
+ (clobber (match_scratch:CC 2 ""))
+ (clobber (match_scratch:P 3 ""))])]
+ ""
+ "")
+
+;; We need to be able to do this for any operand, including MEM, or we
+;; will cause reload to blow up since we don't allow output reloads on
+;; JUMP_INSNs.
+;; For the length attribute to be calculated correctly, the
+;; label MUST be operand 0.
+
+(define_insn "*ctr<mode>_internal1"
+ [(set (pc)
+ (if_then_else (ne (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+(define_insn "*ctr<mode>_internal2"
+ [(set (pc)
+ (if_then_else (ne (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+;; Similar but use EQ
+
+(define_insn "*ctr<mode>_internal5"
+ [(set (pc)
+ (if_then_else (eq (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+(define_insn "*ctr<mode>_internal6"
+ [(set (pc)
+ (if_then_else (eq (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+;; Now the splitters if we could not allocate the CTR register
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:P 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (plus:P (match_dup 1) (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:P 4 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:P (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0)
+ (plus:P (match_dup 1)
+ (const_int -1)))])
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode,
+ operands[3], const0_rtx); }")
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:P 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:P 0 "nonimmediate_operand" "")
+ (plus:P (match_dup 1) (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:P 4 ""))]
+ "reload_completed && ! gpc_reg_operand (operands[0], SImode)"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:P (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 4)
+ (plus:P (match_dup 1)
+ (const_int -1)))])
+ (set (match_dup 0)
+ (match_dup 4))
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode,
+ operands[3], const0_rtx); }")
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+ "{t 31,0,0|trap}")
+
+(define_expand "conditional_trap"
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_dup 2) (match_dup 3)])
+ (match_operand 1 "const_int_operand" ""))]
+ ""
+ "if (rs6000_compare_fp_p || operands[1] != const0_rtx) FAIL;
+ operands[2] = rs6000_compare_op0;
+ operands[3] = rs6000_compare_op1;")
+
+(define_insn ""
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_operand:GPR 1 "register_operand" "r")
+ (match_operand:GPR 2 "reg_or_short_operand" "rI")])
+ (const_int 0))]
+ ""
+ "{t|t<wd>}%V0%I2 %1,%2")
+
+;; Insns related to generating the function prologue and epilogue.
+
+(define_expand "prologue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_prologue ();
+ DONE;
+}")
+
+(define_insn "*movesi_from_cr_one"
+ [(match_parallel 0 "mfcr_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:CC 2 "cc_reg_operand" "y")
+ (match_operand 3 "immediate_operand" "n")]
+ UNSPEC_MOVESI_FROM_CR))])]
+ "TARGET_MFCRF"
+ "*
+{
+ int mask = 0;
+ int i;
+ for (i = 0; i < XVECLEN (operands[0], 0); i++)
+ {
+ mask = INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
+ operands[4] = GEN_INT (mask);
+ output_asm_insn (\"mfcr %1,%4\", operands);
+ }
+ return \"\";
+}"
+ [(set_attr "type" "mfcrf")])
+
+(define_insn "movesi_from_cr"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71)
+ (reg:CC 72) (reg:CC 73) (reg:CC 74) (reg:CC 75)]
+ UNSPEC_MOVESI_FROM_CR))]
+ ""
+ "mfcr %0"
+ [(set_attr "type" "mfcr")])
+
+(define_insn "*stmw"
+ [(match_parallel 0 "stmw_operation"
+ [(set (match_operand:SI 1 "memory_operand" "=m")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))])]
+ "TARGET_MULTIPLE"
+ "{stm|stmw} %2,%1"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*save_fpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:P 1 "register_operand" "=l"))
+ (use (match_operand:P 2 "call_operand" "s"))
+ (set (match_operand:DF 3 "memory_operand" "=m")
+ (match_operand:DF 4 "gpc_reg_operand" "f"))])]
+ ""
+ "bl %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+/* APPLE LOCAL begin long-branch */
+(define_insn "*save_fpregs_with_label_si"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:SI 1 "register_operand" "=l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (use (match_operand:SI 3 "" ""))
+ (set (match_operand:DF 4 "memory_operand" "=m")
+ (match_operand:DF 5 "gpc_reg_operand" "f"))])]
+ "TARGET_32BIT"
+ "*
+#if TARGET_MACHO
+ const char *picbase = machopic_function_base_name ();
+ char *tmp;
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+ if (rs6000_default_long_calls)
+ {
+ tmp = ggc_alloc (strlen (XSTR (operands[2], 0)) + strlen (XSTR (operands[3], 0)) + 2);
+ strcpy (tmp, output_call(insn, operands, 2, 2));
+ strcat (tmp, \"\\n%3:\");
+ return tmp;
+ }
+ else
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+/* APPLE LOCAL end long-branch */
+
+; These are to explain that changes to the stack pointer should
+; not be moved over stores to stack memory.
+(define_insn "stack_tie"
+ [(set (match_operand:BLK 0 "memory_operand" "+m")
+ (unspec:BLK [(match_dup 0)] UNSPEC_TIE))]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+
+(define_expand "epilogue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_epilogue (FALSE);
+ DONE;
+}")
+
+; On some processors, doing the mtcrf one CC register at a time is
+; faster (like on the 604e). On others, doing them all at once is
+; faster; for instance, on the 601 and 750.
+
+(define_expand "movsi_to_cr_one"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_dup 2)] UNSPEC_MOVESI_TO_CR))]
+ ""
+ "operands[2] = GEN_INT (1 << (75 - REGNO (operands[0])));")
+
+(define_insn "*movsi_to_cr"
+ [(match_parallel 0 "mtcrf_operation"
+ [(set (match_operand:CC 1 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand 3 "immediate_operand" "n")]
+ UNSPEC_MOVESI_TO_CR))])]
+ ""
+ "*
+{
+ int mask = 0;
+ int i;
+ for (i = 0; i < XVECLEN (operands[0], 0); i++)
+ mask |= INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
+ operands[4] = GEN_INT (mask);
+ return \"mtcrf %4,%2\";
+}"
+ [(set_attr "type" "mtcr")])
+
+(define_insn "*mtcrfsi"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand 2 "immediate_operand" "n")]
+ UNSPEC_MOVESI_TO_CR))]
+ "GET_CODE (operands[0]) == REG
+ && CR_REGNO_P (REGNO (operands[0]))
+ && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 1 << (75 - REGNO (operands[0]))"
+ "mtcrf %R0,%1"
+ [(set_attr "type" "mtcr")])
+
+; The load-multiple instructions have similar properties.
+; Note that "load_multiple" is a name known to the machine-independent
+; code that actually corresponds to the PowerPC load-string.
+
+(define_insn "*lmw"
+ [(match_parallel 0 "lmw_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))])]
+ "TARGET_MULTIPLE"
+ "{lm|lmw} %1,%2"
+ [(set_attr "type" "load_ux")])
+
+/* APPLE LOCAL begin CW asm blocks */
+(define_insn "*return_internal_<mode>"
+ [(return)
+ (use (match_operand:P 0 "register_operand" "lc"))]
+ ""
+ "*
+{
+ if (cfun->iasm_noreturn)
+ return \"; b%T0\";
+ else
+ return \"b%T0\";
+}"
+ [(set_attr "type" "jmpreg")])
+/* APPLE LOCAL end CW asm blocks */
+
+; FIXME: This would probably be somewhat simpler if the Cygnus sibcall
+; stuff was in GCC. Oh, and "any_parallel_operand" is a bit flexible...
+
+(define_insn "*return_and_restore_fpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(return)
+ (use (match_operand:P 1 "register_operand" "l"))
+ (use (match_operand:P 2 "call_operand" "s"))
+ (set (match_operand:DF 3 "gpc_reg_operand" "=f")
+ (match_operand:DF 4 "memory_operand" "m"))])]
+ ""
+; APPLE LOCAL begin -mlongcall
+ {
+#if TARGET_MACHO
+ if (rs6000_default_long_calls)
+ return output_call(insn, operands, 2, 2);
+ else
+#endif
+ return "b %z2";
+ })
+; APPLE LOCAL end -mlongcall
+
+; This is used in compiling the unwind routines.
+(define_expand "eh_return"
+ [(use (match_operand 0 "general_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_eh_set_lr_si (operands[0]));
+ else
+ emit_insn (gen_eh_set_lr_di (operands[0]));
+ DONE;
+}")
+
+; We can't expand this before we know where the link register is stored.
+(define_insn "eh_set_lr_<mode>"
+ [(unspec_volatile [(match_operand:P 0 "register_operand" "r")]
+ UNSPECV_EH_RR)
+ (clobber (match_scratch:P 1 "=&b"))]
+ ""
+ "#")
+
+(define_split
+ [(unspec_volatile [(match_operand 0 "register_operand" "")] UNSPECV_EH_RR)
+ (clobber (match_scratch 1 ""))]
+ "reload_completed"
+ [(const_int 0)]
+ "
+{
+ rs6000_emit_eh_reg_restore (operands[0], operands[1]);
+ DONE;
+}")
+
+(define_insn "prefetch"
+ [(prefetch (match_operand 0 "indexed_or_indirect_address" "a")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))]
+ "TARGET_POWERPC"
+ "*
+{
+ if (GET_CODE (operands[0]) == REG)
+ return INTVAL (operands[1]) ? \"dcbtst 0,%0\" : \"dcbt 0,%0\";
+ return INTVAL (operands[1]) ? \"dcbtst %a0\" : \"dcbt %a0\";
+}"
+ [(set_attr "type" "load")])
+
+; APPLE LOCAL begin 3399553
+
+; Load FPSCR into bits 32:63 of a floating point register.
+
+(define_insn "mffs"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (unspec:DF [(reg:SI 114)] UNSPEC_MFFS))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "mffs %0"
+)
+
+; Expand the builtin_flt_rounds by reading the x87 FPSCR rounding bits.
+
+(define_expand "flt_rounds"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(reg:SI 114)] UNSPEC_FLT_ROUNDS))]
+ ""
+ "
+ {
+ rs6000_expand_flt_rounds (operands[0]);
+ DONE;
+ }
+ "
+)
+; APPLE LOCAL end 3399553
+
+
+(include "sync.md")
+(include "altivec.md")
+(include "spe.md")
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.opt b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.opt
new file mode 100644
index 000000000..76c66eda4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs6000.opt
@@ -0,0 +1,262 @@
+; Options for the rs6000 port of the compiler
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mpower
+Target Report RejectNegative Mask(POWER)
+Use POWER instruction set
+
+mno-power
+Target Report RejectNegative
+Do not use POWER instruction set
+
+mpower2
+Target Report Mask(POWER2)
+Use POWER2 instruction set
+
+mpowerpc
+Target Report RejectNegative Mask(POWERPC)
+Use PowerPC instruction set
+
+mno-powerpc
+Target Report RejectNegative
+Do not use PowerPC instruction set
+
+mpowerpc64
+Target Report Mask(POWERPC64)
+Use PowerPC-64 instruction set
+
+mpowerpc-gpopt
+Target Report Mask(PPC_GPOPT)
+Use PowerPC General Purpose group optional instructions
+
+mpowerpc-gfxopt
+Target Report Mask(PPC_GFXOPT)
+Use PowerPC Graphics group optional instructions
+
+mmfcrf
+Target Report Mask(MFCRF)
+Use PowerPC V2.01 single field mfcr instruction
+
+mpopcntb
+Target Report Mask(POPCNTB)
+Use PowerPC V2.02 popcntb instruction
+
+mfprnd
+Target Report Mask(FPRND)
+Use PowerPC V2.02 floating point rounding instructions
+
+maltivec
+Target Report Mask(ALTIVEC)
+Use AltiVec instructions
+
+; APPLE LOCAL begin AltiVec
+mpim-altivec
+Target Report Var(rs6000_altivec_pim)
+Enable use of Motorola AltiVec PIM operations and predicates
+
+mmask-pim-altivec
+Target Undocumented Mask(PIM_ALTIVEC)
+; APPLE LOCAL end AltiVec
+
+mmulhw
+Target Report Mask(MULHW)
+Use 4xx half-word multiply instructions
+
+mdlmzb
+Target Report Mask(DLMZB)
+Use 4xx string-search dlmzb instruction
+
+mmultiple
+Target Report Mask(MULTIPLE)
+Generate load/store multiple instructions
+
+mstring
+Target Report Mask(STRING)
+Generate string instructions for block moves
+
+mnew-mnemonics
+Target Report RejectNegative Mask(NEW_MNEMONICS)
+Use new mnemonics for PowerPC architecture
+
+mold-mnemonics
+Target Report RejectNegative InverseMask(NEW_MNEMONICS)
+Use old mnemonics for PowerPC architecture
+
+msoft-float
+Target Report RejectNegative Mask(SOFT_FLOAT)
+Do not use hardware floating point
+
+mhard-float
+Target Report RejectNegative InverseMask(SOFT_FLOAT, HARD_FLOAT)
+Use hardware floating point
+
+mno-update
+Target Report RejectNegative Mask(NO_UPDATE)
+Do not generate load/store with update instructions
+
+mupdate
+Target Report RejectNegative InverseMask(NO_UPDATE, UPDATE)
+Generate load/store with update instructions
+
+mno-fused-madd
+Target Report RejectNegative Mask(NO_FUSED_MADD)
+Do not generate fused multiply/add instructions
+
+mfused-madd
+Target Report RejectNegative InverseMask(NO_FUSED_MADD, FUSED_MADD)
+Generate fused multiply/add instructions
+
+msched-prolog
+Target Report Var(TARGET_SCHED_PROLOG) Init(1)
+Schedule the start and end of the procedure
+
+msched-epilog
+Target Undocumented Var(TARGET_SCHED_PROLOG) VarExists
+
+maix-struct-return
+Target Report RejectNegative Var(aix_struct_return)
+Return all structures in memory (AIX default)
+
+msvr4-struct-return
+Target Report RejectNegative Var(aix_struct_return,0) VarExists
+Return small structures in registers (SVR4 default)
+
+mxl-compat
+Target Report Var(TARGET_XL_COMPAT)
+Conform more closely to IBM XLC semantics
+
+mswdiv
+Target Report Var(swdiv)
+Generate software floating point divide for better throughput
+
+mno-fp-in-toc
+Target Report RejectNegative Var(TARGET_NO_FP_IN_TOC)
+Do not place floating point constants in TOC
+
+mfp-in-toc
+Target Report RejectNegative Var(TARGET_NO_FP_IN_TOC,0)
+Place floating point constants in TOC
+
+mno-sum-in-toc
+Target RejectNegative Var(TARGET_NO_SUM_IN_TOC)
+Do not place symbol+offset constants in TOC
+
+msum-in-toc
+Target RejectNegative Var(TARGET_NO_SUM_IN_TOC,0) VarExists
+Place symbol+offset constants in TOC
+
+; Output only one TOC entry per module. Normally linking fails if
+; there are more than 16K unique variables/constants in an executable. With
+; this option, linking fails only if there are more than 16K modules, or
+; if there are more than 16K unique variables/constant in a single module.
+;
+; This is at the cost of having 2 extra loads and one extra store per
+; function, and one less allocable register.
+mminimal-toc
+Target Report Mask(MINIMAL_TOC)
+Use only one TOC entry per procedure
+
+mfull-toc
+Target Report
+Put everything in the regular TOC
+
+mvrsave
+Target Report Var(TARGET_ALTIVEC_VRSAVE)
+Generate VRSAVE instructions when generating AltiVec code
+
+mvrsave=
+Target RejectNegative Joined
+-mvrsave=yes/no Deprecated option. Use -mvrsave/-mno-vrsave instead
+
+misel
+Target Var(rs6000_isel)
+Generate isel instructions
+
+misel=
+Target RejectNegative Joined
+-misel=yes/no Deprecated option. Use -misel/-mno-isel instead
+
+mspe
+Target Var(rs6000_spe)
+Generate SPE SIMD instructions on E500
+
+mspe=
+Target RejectNegative Joined
+-mspe=yes/no Deprecated option. Use -mspe/-mno-spe instead
+
+mdebug=
+Target RejectNegative Joined
+-mdebug= Enable debug output
+
+mabi=
+Target RejectNegative Joined
+-mabi= Specify ABI to use
+
+mcpu=
+Target RejectNegative Joined
+-mcpu= Use features of and schedule code for given CPU
+
+mtune=
+Target RejectNegative Joined
+-mtune= Schedule code for given CPU
+
+mtraceback=
+Target RejectNegative Joined
+-mtraceback= Select full, part, or no traceback table
+
+mlongcall
+Target Report Var(rs6000_default_long_calls)
+Avoid all range limits on call instructions
+
+; APPLE LOCAL begin long-branch
+mlong-branch
+Target Report Var(rs6000_default_long_calls)
+Deprecated option. Use -mlongcall instead
+; APPLE LOCAL end long-branch
+
+mwarn-altivec-long
+Target Var(rs6000_warn_altivec_long) Init(1)
+Warn about deprecated 'vector long ...' AltiVec type usage
+
+mfloat-gprs=
+Target RejectNegative Joined
+-mfloat-gprs= Select GPR floating point method
+
+mlong-double-
+Target RejectNegative Joined UInteger
+-mlong-double-<n> Specify size of long double (64 or 128 bits)
+
+msched-costly-dep=
+Target RejectNegative Joined
+Determine which dependences between insns are considered costly
+
+minsert-sched-nops=
+Target RejectNegative Joined
+Specify which post scheduling nop insertion scheme to apply
+
+malign-
+Target RejectNegative Joined
+Specify alignment of structure fields default/natural
+
+mprioritize-restricted-insns=
+Target RejectNegative Joined UInteger Var(rs6000_sched_restricted_insns_priority)
+Specify scheduling priority for dispatch slot restricted insns
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/rs64.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs64.md
new file mode 100644
index 000000000..71ec61de5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/rs64.md
@@ -0,0 +1,153 @@
+;; Scheduling description for IBM RS64 processors.
+;; Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_automaton "rs64,rs64fp")
+(define_cpu_unit "iu_rs64" "rs64")
+(define_cpu_unit "mciu_rs64" "rs64")
+(define_cpu_unit "fpu_rs64" "rs64fp")
+(define_cpu_unit "lsu_rs64,bpu_rs64" "rs64")
+
+;; RS64a 64-bit IU, LSU, FPU, BPU
+
+(define_insn_reservation "rs64a-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-fpload" 3
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-llsc" 2
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-integer" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64")
+
+(define_insn_reservation "rs64a-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,iu_rs64")
+
+(define_insn_reservation "rs64a-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,iu_rs64,iu_rs64")
+
+(define_insn_reservation "rs64a-imul" 20
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*13")
+
+(define_insn_reservation "rs64a-imul2" 12
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*5")
+
+(define_insn_reservation "rs64a-imul3" 8
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*2")
+
+(define_insn_reservation "rs64a-lmul" 34
+ (and (eq_attr "type" "lmul,lmul_compare")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*34")
+
+(define_insn_reservation "rs64a-idiv" 66
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*66")
+
+(define_insn_reservation "rs64a-ldiv" 66
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*66")
+
+(define_insn_reservation "rs64a-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,nothing,bpu_rs64")
+
+(define_insn_reservation "rs64a-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64,bpu_rs64")
+
+(define_insn_reservation "rs64a-fp" 4
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64")
+
+(define_insn_reservation "rs64a-sdiv" 31
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64*31")
+
+(define_insn_reservation "rs64a-sqrt" 49
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64*49")
+
+(define_insn_reservation "rs64a-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-mtjmpr" 3
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr")
+ (eq_attr "cpu" "rs64a"))
+ "bpu_rs64")
+
+(define_insn_reservation "rs64a-isync" 6
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "rs64a"))
+ "bpu_rs64")
+
+(define_insn_reservation "rs64a-sync" 1
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/spe.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/spe.md
new file mode 100644
index 000000000..7d05e0882
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/spe.md
@@ -0,0 +1,2814 @@
+;; e500 SPE description
+;; Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_constants
+ [(SPE_ACC_REGNO 111)
+ (SPEFSCR_REGNO 112)
+
+ (CMPDFEQ_GPR 1006)
+ (TSTDFEQ_GPR 1007)
+ (CMPDFGT_GPR 1008)
+ (TSTDFGT_GPR 1009)
+ (CMPDFLT_GPR 1010)
+ (TSTDFLT_GPR 1011)
+ (E500_CR_IOR_COMPARE 1012)
+ ])
+
+;; Modes using a 64-bit register.
+(define_mode_macro SPE64 [DF V4HI V2SF V1DI V2SI])
+
+(define_insn "*negsf2_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsneg %0,%1"
+ [(set_attr "type" "fpsimple")])
+
+(define_insn "*abssf2_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsabs %0,%1"
+ [(set_attr "type" "fpsimple")])
+
+(define_insn "*nabssf2_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (neg:SF (abs:SF (match_operand:SF 1 "gpc_reg_operand" "r"))))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsnabs %0,%1"
+ [(set_attr "type" "fpsimple")])
+
+(define_insn "*addsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsadd %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*subsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efssub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*mulsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsmul %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*divsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsdiv %0,%1,%2"
+ [(set_attr "type" "vecfdiv")])
+
+;; Floating point conversion instructions.
+
+(define_insn "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdctuiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (float_extend:DF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsctuiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_fix_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsctsiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_fix_truncdfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdctsiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatunssisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (unsigned_float:SF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efscfui %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatunssidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfui %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatsisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (float:SF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efscfsi %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatsidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfsi %0,%1"
+ [(set_attr "type" "fp")])
+
+;; SPE SIMD instructions
+
+(define_insn "spe_evabs"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (abs:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evabs %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evandc"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (not:V2SI (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evand"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evand %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+;; Vector compare instructions
+
+(define_insn "spe_evcmpeq"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 500))]
+ "TARGET_SPE"
+ "evcmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmpgts"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 501))]
+ "TARGET_SPE"
+ "evcmpgts %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmpgtu"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 502))]
+ "TARGET_SPE"
+ "evcmpgtu %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmplts"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 503))]
+ "TARGET_SPE"
+ "evcmplts %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmpltu"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 504))]
+ "TARGET_SPE"
+ "evcmpltu %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+;; Floating point vector compare instructions
+
+(define_insn "spe_evfscmpeq"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 538))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfscmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscmpgt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 539))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfscmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscmplt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 540))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfscmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfststeq"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 541))]
+ "TARGET_SPE"
+ "evfststeq %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfststgt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 542))]
+ "TARGET_SPE"
+ "evfststgt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfststlt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 543))]
+ "TARGET_SPE"
+ "evfststlt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+;; End of vector compare instructions
+
+(define_insn "spe_evcntlsw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 505))]
+ "TARGET_SPE"
+ "evcntlsw %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcntlzw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 506))]
+ "TARGET_SPE"
+ "evcntlzw %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_eveqv"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (not:V2SI (xor:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "eveqv %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evextsb"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 507))]
+ "TARGET_SPE"
+ "evextsb %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evextsh"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 508))]
+ "TARGET_SPE"
+ "evextsh %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhesplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 509)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlhhesplat %0,%2*2(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhesplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 510)]
+ "TARGET_SPE"
+ "evlhhesplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhossplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 511)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlhhossplat %0,%2*2(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhossplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 512)]
+ "TARGET_SPE"
+ "evlhhossplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhousplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 513)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlhhousplat %0,%2*2(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhousplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 514)]
+ "TARGET_SPE"
+ "evlhhousplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhsplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 515)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhsplat %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhsplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 516)]
+ "TARGET_SPE"
+ "evlwhsplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwwsplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 517)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwwsplat %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwwsplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 518)]
+ "TARGET_SPE"
+ "evlwwsplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergehi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (vec_select:V2SI
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergehi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergehilo"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergehilo %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergelo"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (vec_select:V2SI
+ (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergelo %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergelohi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (vec_select:V2SI
+ (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (vec_select:V2SI
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergelohi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evnand"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (not:V2SI (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evnand %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "negv2si2"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (neg:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evneg %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evnor"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (not:V2SI (ior:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evnor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evorc"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (ior:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (not:V2SI (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evorc %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evor"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (ior:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evrlwi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 519))]
+ "TARGET_SPE"
+ "evrlwi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evrlw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 520))]
+ "TARGET_SPE"
+ "evrlw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evrndw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 521))]
+ "TARGET_SPE"
+ "evrndw %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsel"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (match_operand:CC 3 "cc_reg_operand" "y")] 522))]
+ "TARGET_SPE"
+ "evsel %0,%1,%2,%3"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsel_fs"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")
+ (match_operand:CC 3 "cc_reg_operand" "y")] 725))]
+ "TARGET_SPE"
+ "evsel %0,%1,%2,%3"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evslwi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ 523))]
+ "TARGET_SPE"
+ "evslwi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evslw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 524))]
+ "TARGET_SPE"
+ "evslw %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrwis"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ 525))]
+ "TARGET_SPE"
+ "evsrwis %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrwiu"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ 526))]
+ "TARGET_SPE"
+ "evsrwiu %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrws"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 527))]
+ "TARGET_SPE"
+ "evsrws %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrwu"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 528))]
+ "TARGET_SPE"
+ "evsrwu %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+;; vector xors
+
+(define_insn "xorv2si3"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (xor:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "xorv4hi3"
+ [(set (match_operand:V4HI 0 "gpc_reg_operand" "=r")
+ (xor:V4HI (match_operand:V4HI 1 "gpc_reg_operand" "r")
+ (match_operand:V4HI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "xorv1di3"
+ [(set (match_operand:V1DI 0 "gpc_reg_operand" "=r")
+ (xor:V1DI (match_operand:V1DI 1 "gpc_reg_operand" "r")
+ (match_operand:V1DI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+;; end of vector xors
+
+(define_insn "spe_evfsabs"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evfsabs %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsadd"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfsadd %0,%1,%2"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfsf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 529))]
+ "TARGET_SPE"
+ "evfscfsf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfsi"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (float:V2SF (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evfscfsi %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfuf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 530))]
+ "TARGET_SPE"
+ "evfscfuf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfui"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 701))]
+ "TARGET_SPE"
+ "evfscfui %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctsf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 531))]
+ "TARGET_SPE"
+ "evfsctsf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctsi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 532))]
+ "TARGET_SPE"
+ "evfsctsi %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctsiz"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 533))]
+ "TARGET_SPE"
+ "evfsctsiz %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctuf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 534))]
+ "TARGET_SPE"
+ "evfsctuf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctui"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 535))]
+ "TARGET_SPE"
+ "evfsctui %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctuiz"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 536))]
+ "TARGET_SPE"
+ "evfsctuiz %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsdiv"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfsdiv %0,%1,%2"
+ [(set_attr "type" "vecfdiv")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsmul"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfsmul %0,%1,%2"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsnabs"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 537))]
+ "TARGET_SPE"
+ "evfsnabs %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsneg"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evfsneg %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfssub"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfssub %0,%1,%2"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+;; SPE SIMD load instructions.
+
+;; Only the hardware engineer who designed the SPE understands the
+;; plethora of load and store instructions ;-). We have no way of
+;; differentiating between them with RTL so use an unspec of const_int 0
+;; to avoid identical RTL.
+
+(define_insn "spe_evldd"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 544)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evldd %0,%2*8(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlddx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 545)]
+ "TARGET_SPE"
+ "evlddx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldh"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 546)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evldh %0,%2*8(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldhx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 547)]
+ "TARGET_SPE"
+ "evldhx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 548)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evldw %0,%2*8(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldwx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 549)]
+ "TARGET_SPE"
+ "evldwx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhe"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 550)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhe %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhex"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 551)]
+ "TARGET_SPE"
+ "evlwhex %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhos"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 552)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhos %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhosx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 553)]
+ "TARGET_SPE"
+ "evlwhosx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhou"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 554)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhou %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhoux"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 555)]
+ "TARGET_SPE"
+ "evlwhoux %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_brinc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")] 556))]
+ "TARGET_SPE"
+ "brinc %0,%1,%2"
+ [(set_attr "type" "brinc")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 557))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 558))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 559))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 560))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 561))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 562))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 563))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 564))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 565))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 566))]
+ "TARGET_SPE"
+ "evmhesmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 567))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 568))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 569))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 570))]
+ "TARGET_SPE"
+ "evmhesmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 571))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 572))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 573))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 574))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmhessf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 575))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 576))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 577))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheumiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 578))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheumianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 579))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 580))]
+ "TARGET_SPE"
+ "evmheumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 581))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheusiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheusianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 582))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheusianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 583))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 584))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 585))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 586))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 587))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 588))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 589))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 590))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 591))]
+ "TARGET_SPE"
+ "evmhosmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 592))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 593))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 594))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 595))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 596))]
+ "TARGET_SPE"
+ "evmhosmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 597))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 598))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 599))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 600))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmhossf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 601))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 602))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 603))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhoumiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 604))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhoumianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 605))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhoumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 606))]
+ "TARGET_SPE"
+ "evmhoumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhousiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 607))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhousiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhousianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 608))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhousianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmmlssfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 609))]
+ "TARGET_SPE"
+ "evmmlssfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmmlssf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 610))]
+ "TARGET_SPE"
+ "evmmlssf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 611))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 612))]
+ "TARGET_SPE"
+ "evmwhsmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 613))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 614))]
+ "TARGET_SPE"
+ "evmwhsmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 615))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhusian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 626))]
+ "TARGET_SPE"
+ "evmwhusian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 628))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmwhssf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 629))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 630))]
+ "TARGET_SPE"
+ "evmwhumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlsmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 635))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlsmiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlsmianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 636))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlsmianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlssiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 641))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlssiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlssianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 642))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlssianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 643))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlumiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 644))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlumianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 645))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 646))]
+ "TARGET_SPE"
+ "evmwlumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 647))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlusiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlusianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 648))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlusianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 649))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 650))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 651))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 652))]
+ "TARGET_SPE"
+ "evmwsmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 653))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 654))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 655))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 656))]
+ "TARGET_SPE"
+ "evmwsmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 657))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwssfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 658))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwssfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 659))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwssfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 660))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmwssf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 661))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 662))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 663))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 664))]
+ "TARGET_SPE"
+ "evmwumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (plus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evaddw %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 673))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddusiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 674))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddumiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddssiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 675))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddssiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddsmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 676))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddsmiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddiw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 677))]
+ "TARGET_SPE"
+ "evaddiw %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubifw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 678))]
+ "TARGET_SPE"
+ "evsubifw %0,%2,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (minus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evsubfw %0,%2,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 679))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfusiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 680))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfumiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfssiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 681))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfssiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfsmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 682))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfsmiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmra"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (match_operand:V2SI 1 "gpc_reg_operand" "r"))
+ (set (reg:V2SI SPE_ACC_REGNO)
+ (unspec:V2SI [(match_dup 1)] 726))]
+ "TARGET_SPE"
+ "evmra %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evdivws"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (div:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evdivws %0,%1,%2"
+ [(set_attr "type" "vecdiv")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evdivwu"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (udiv:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evdivwu %0,%1,%2"
+ [(set_attr "type" "vecdiv")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsplatfi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:QI 1 "immediate_operand" "i")] 684))]
+ "TARGET_SPE"
+ "evsplatfi %0,%1"
+ [(set_attr "type" "vecperm")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsplati"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:QI 1 "immediate_operand" "i")] 685))]
+ "TARGET_SPE"
+ "evsplati %0,%1"
+ [(set_attr "type" "vecperm")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdd"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 686)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstdd %2,%1*8(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstddx"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 687)]
+ "TARGET_SPE"
+ "evstddx %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdh"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 688)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstdh %2,%1*8(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdhx"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 689)]
+ "TARGET_SPE"
+ "evstdhx %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdw"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 690)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstdw %2,%1*8(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdwx"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 691)]
+ "TARGET_SPE"
+ "evstdwx %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwhe"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 692)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwhe %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwhex"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 693)]
+ "TARGET_SPE"
+ "evstwhex %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwho"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 694)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwho %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwhox"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 695)]
+ "TARGET_SPE"
+ "evstwhox %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwe"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 696)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwwe %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwex"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 697)]
+ "TARGET_SPE"
+ "evstwwex %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwo"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 698)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwwo %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwox"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 699)]
+ "TARGET_SPE"
+ "evstwwox %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+;; Double-precision floating point instructions.
+
+;; FIXME: Add o=r option.
+(define_insn "*frob_df_di"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r")
+ (subreg:DF (match_operand:DI 1 "input_operand" "r,m") 0))]
+ "TARGET_E500_DOUBLE"
+ "@
+ evmergelo %0,%1,%L1
+ evldd%X1 %0,%y1")
+
+(define_insn "*frob_di_df"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=&r")
+ (subreg:DI (match_operand:DF 1 "input_operand" "r") 0))]
+ "TARGET_E500_DOUBLE"
+ "evmergehi %0,%1,%1\;mr %L0,%1"
+ [(set_attr "length" "8")])
+
+(define_insn "*frob_di_df_2"
+ [(set (subreg:DF (match_operand:DI 0 "register_operand" "=&r,r") 0)
+ (match_operand:DF 1 "input_operand" "r,m"))]
+ "TARGET_E500_DOUBLE"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ return \"evmergehi %0,%1,%1\;mr %L0,%1\";
+ case 1:
+ /* If the address is not offsettable we need to load the whole
+ doubleword into a 64-bit register and then copy the high word
+ to form the correct output layout. */
+ if (!offsettable_nonstrict_memref_p (operands[1]))
+ return \"evldd%X1 %L0,%y1\;evmergehi %0,%L0,%L0\";
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1%X1|lwz%U1%X1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+}"
+ [(set_attr "length" "8,8")])
+
+(define_insn "*mov_si<mode>_e500_subreg0"
+ [(set (subreg:SI (match_operand:SPE64 0 "register_operand" "+r,&r") 0)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ evmergelo %0,%1,%0
+ evmergelohi %0,%0,%0\;{l%U1%X1|lwz%U1%X1} %0,%1\;evmergelohi %0,%0,%0")
+
+;; ??? Could use evstwwe for memory stores in some cases, depending on
+;; the offset.
+(define_insn "*mov_si<mode>_e500_subreg0_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:SPE64 1 "register_operand" "+r,&r") 0))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ evmergehi %0,%0,%1
+ evmergelohi %1,%1,%1\;{st%U0%X0|stw%U0%X0} %1,%0")
+
+(define_insn "*mov_si<mode>_e500_subreg4"
+ [(set (subreg:SI (match_operand:SPE64 0 "register_operand" "+r,r") 4)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1")
+
+(define_insn "*mov_si<mode>_e500_subreg4_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:SPE64 1 "register_operand" "r,r") 4))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode) || (TARGET_SPE && <MODE>mode != DFmode)"
+ "@
+ mr %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0")
+
+;; FIXME: Allow r=CONST0.
+(define_insn "*movdf_e500_double"
+ [(set (match_operand:DF 0 "rs6000_nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "input_operand" "r,m,r"))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"evor %0,%1,%1\";
+ case 1:
+ return \"evldd%X1 %0,%y1\";
+ case 2:
+ return \"evstdd%X0 %1,%y0\";
+ default:
+ gcc_unreachable ();
+ }
+ }"
+ [(set_attr "type" "*,vecload,vecstore")
+ (set_attr "length" "*,*,*")])
+
+(define_insn "spe_truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efscfd %0,%1")
+
+(define_insn "spe_absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdabs %0,%1")
+
+(define_insn "spe_nabsdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "r"))))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdnabs %0,%1")
+
+(define_insn "spe_negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdneg %0,%1")
+
+(define_insn "spe_adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdadd %0,%1,%2")
+
+(define_insn "spe_subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdsub %0,%1,%2")
+
+(define_insn "spe_muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdmul %0,%1,%2")
+
+(define_insn "spe_divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efddiv %0,%1,%2")
+
+;; Vector move instructions.
+
+(define_expand "movv2si"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "")
+ (match_operand:V2SI 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V2SImode); DONE; }")
+
+(define_insn "*movv2si_internal"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V2SI 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V2SImode)
+ || gpc_reg_operand (operands[1], V2SImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"evstdd%X0 %1,%y0\";
+ case 1: return \"evldd%X1 %0,%y1\";
+ case 2: return \"evor %0,%1,%1\";
+ case 3: return output_vec_const_move (operands);
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "vecload,vecstore,*,*")
+ (set_attr "length" "*,*,*,12")])
+
+(define_split
+ [(set (match_operand:V2SI 0 "register_operand" "")
+ (match_operand:V2SI 1 "zero_constant" ""))]
+ "TARGET_SPE && reload_completed"
+ [(set (match_dup 0)
+ (xor:V2SI (match_dup 0) (match_dup 0)))]
+ "")
+
+(define_expand "movv1di"
+ [(set (match_operand:V1DI 0 "nonimmediate_operand" "")
+ (match_operand:V1DI 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V1DImode); DONE; }")
+
+(define_insn "*movv1di_internal"
+ [(set (match_operand:V1DI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V1DI 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V1DImode)
+ || gpc_reg_operand (operands[1], V1DImode))"
+ "@
+ evstdd%X0 %1,%y0
+ evldd%X1 %0,%y1
+ evor %0,%1,%1
+ evxor %0,%0,%0"
+ [(set_attr "type" "vecload,vecstore,*,*")
+ (set_attr "length" "*,*,*,*")])
+
+(define_expand "movv4hi"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "")
+ (match_operand:V4HI 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V4HImode); DONE; }")
+
+(define_insn "*movv4hi_internal"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V4HI 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V4HImode)
+ || gpc_reg_operand (operands[1], V4HImode))"
+ "@
+ evstdd%X0 %1,%y0
+ evldd%X1 %0,%y1
+ evor %0,%1,%1
+ evxor %0,%0,%0"
+ [(set_attr "type" "vecload")])
+
+(define_expand "movv2sf"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "")
+ (match_operand:V2SF 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V2SFmode); DONE; }")
+
+(define_insn "*movv2sf_internal"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V2SF 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V2SFmode)
+ || gpc_reg_operand (operands[1], V2SFmode))"
+ "@
+ evstdd%X0 %1,%y0
+ evldd%X1 %0,%y1
+ evor %0,%1,%1
+ evxor %0,%0,%0"
+ [(set_attr "type" "vecload,vecstore,*,*")
+ (set_attr "length" "*,*,*,*")])
+
+;; End of vector move instructions.
+
+(define_insn "spe_evmwhssfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 702))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssmaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 703))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssmaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 704))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 705))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhusiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 706))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhusiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 707))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 708))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 709))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 710))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 711))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 713))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgssfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 714))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgssfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 715))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 716))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 717))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgssfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 718))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgssfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 719))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 720))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 721))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_mtspefscr"
+ [(set (reg:SI SPEFSCR_REGNO)
+ (unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
+ 722))]
+ "TARGET_SPE"
+ "mtspefscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "spe_mfspefscr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(reg:SI SPEFSCR_REGNO)] 723))]
+ "TARGET_SPE"
+ "mfspefscr %0"
+ [(set_attr "type" "vecsimple")])
+
+;; FP comparison stuff.
+
+;; Flip the GT bit.
+(define_insn "e500_flip_gt_bit"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(match_operand:CCFP 1 "cc_reg_operand" "y")] 999))]
+ "!TARGET_FPRS && TARGET_HARD_FLOAT"
+ "*
+{
+ return output_e500_flip_gt_bit (operands[0], operands[1]);
+}"
+ [(set_attr "type" "cr_logical")])
+
+;; MPC8540 single-precision FP instructions on GPRs.
+;; We have 2 variants for each. One for IEEE compliant math and one
+;; for non IEEE compliant math.
+
+(define_insn "cmpsfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1000))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations"
+ "efscmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstsfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1001))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations"
+ "efststeq %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpsfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1002))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations"
+ "efscmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstsfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1003))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations"
+ "efststgt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpsflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1004))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && !flag_unsafe_math_optimizations"
+ "efscmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstsflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1005))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && flag_unsafe_math_optimizations"
+ "efststlt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+;; Same thing, but for double-precision.
+
+(define_insn "cmpdfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFEQ_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations"
+ "efdcmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFEQ_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
+ "efdtsteq %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpdfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFGT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations"
+ "efdcmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFGT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
+ "efdtstgt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpdflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFLT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && !flag_unsafe_math_optimizations"
+ "efdcmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFLT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
+ "efdtstlt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+;; Like cceq_ior_compare, but compare the GT bits.
+(define_insn "e500_cr_ior_compare"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP [(match_operand 1 "cc_reg_operand" "y")
+ (match_operand 2 "cc_reg_operand" "y")]
+ E500_CR_IOR_COMPARE))]
+ "TARGET_E500"
+ "cror 4*%0+gt,4*%1+gt,4*%2+gt"
+ [(set_attr "type" "cr_logical")])
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/sync.md b/gcc-4.2.1-5666.3/gcc/config/rs6000/sync.md
new file mode 100644
index 000000000..b7166473e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/sync.md
@@ -0,0 +1,625 @@
+;; Machine description for PowerPC synchronization instructions.
+;; Copyright (C) 2005 Free Software Foundation, Inc.
+;; Contributed by Geoffrey Keating.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 2, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the
+;; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+;; MA 02110-1301, USA.
+
+(define_mode_attr larx [(SI "lwarx") (DI "ldarx")])
+(define_mode_attr stcx [(SI "stwcx.") (DI "stdcx.")])
+
+(define_code_macro FETCHOP [plus minus ior xor and])
+(define_code_attr fetchop_name
+ [(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
+(define_code_attr fetchop_pred
+ [(plus "add_operand") (minus "gpc_reg_operand")
+ (ior "logical_operand") (xor "logical_operand") (and "and_operand")])
+(define_code_attr fetchopsi_constr
+ [(plus "rIL") (minus "r") (ior "rKL") (xor "rKL") (and "rTKL")])
+(define_code_attr fetchopdi_constr
+ [(plus "rIL") (minus "r") (ior "rKJF") (xor "rKJF") (and "rSTKJ")])
+
+(define_expand "memory_barrier"
+ [(set (mem:BLK (match_dup 0))
+ (unspec:BLK [(mem:BLK (match_dup 0))] UNSPEC_SYNC))]
+ ""
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*sync_internal"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_operand:BLK 1 "" "")] UNSPEC_SYNC))]
+ ""
+ "{dcs|sync}"
+ [(set_attr "type" "sync")])
+
+(define_insn "load_locked_<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (unspec_volatile:GPR
+ [(match_operand:GPR 1 "memory_operand" "Z")] UNSPECV_LL))]
+ "TARGET_POWERPC"
+ "<larx> %0,%y1"
+ [(set_attr "type" "load_l")])
+
+(define_insn "store_conditional_<mode>"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_SC))
+ (set (match_operand:GPR 1 "memory_operand" "=Z")
+ (match_operand:GPR 2 "gpc_reg_operand" "r"))]
+ "TARGET_POWERPC"
+ "<stcx> %2,%y1"
+ [(set_attr "type" "store_c")])
+
+(define_insn_and_split "sync_compare_and_swap<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(match_operand:GPR 2 "reg_or_short_operand" "rI")
+ (match_operand:GPR 3 "gpc_reg_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:GPR 4 "=&r"))
+ (clobber (match_scratch:CC 5 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_compare_and_swap (operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+})
+
+(define_expand "sync_compare_and_swaphi"
+ [(match_operand:HI 0 "gpc_reg_operand" "")
+ (match_operand:HI 1 "memory_operand" "")
+ (match_operand:HI 2 "gpc_reg_operand" "")
+ (match_operand:HI 3 "gpc_reg_operand" "")]
+ "TARGET_POWERPC"
+{
+ rs6000_expand_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_expand "sync_compare_and_swapqi"
+ [(match_operand:QI 0 "gpc_reg_operand" "")
+ (match_operand:QI 1 "memory_operand" "")
+ (match_operand:QI 2 "gpc_reg_operand" "")
+ (match_operand:QI 3 "gpc_reg_operand" "")]
+ "TARGET_POWERPC"
+{
+ rs6000_expand_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "sync_compare_and_swapqhi_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (match_operand:SI 4 "memory_operand" "+Z"))
+ (set (match_dup 4)
+ (unspec:SI
+ [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand:SI 3 "gpc_reg_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:SI 5 "=&r"))
+ (clobber (match_scratch:CC 6 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3], operands[4],
+ operands[5]);
+ DONE;
+})
+
+(define_insn_and_split "sync_lock_test_and_set<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(match_operand:GPR 2 "reg_or_short_operand" "rL")]
+ UNSPEC_XCHG))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_lock_test_and_set (operands[0], operands[1], operands[2],
+ operands[3]);
+ DONE;
+})
+
+(define_expand "sync_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "memory_operand" "")
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 0)
+ (match_operand:INT1 1 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "memory_operand" "+Z")
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 0)
+ (match_operand:SI 1 "<fetchop_pred>" "<fetchopsi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "memory_operand" "+Z")
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 0)
+ (match_operand:DI 1 "<fetchop_pred>" "<fetchopdi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_expand "sync_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "memory_operand" "")
+ (unspec:INT1
+ [(and:INT1 (not:INT1 (match_dup 0))
+ (match_operand:INT1 1 "gpc_reg_operand" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (AND, <MODE>mode,
+ gen_rtx_NOT (<MODE>mode, operands[0]),
+ operands[1],
+ NULL_RTX, NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "memory_operand" "+Z")
+ (unspec:GPR
+ [(and:GPR (not:GPR (match_dup 0))
+ (match_operand:GPR 1 "gpc_reg_operand" "r"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 2 "=&r"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_expand "sync_old_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (match_operand:INT1 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 1)
+ (match_operand:INT1 2 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[1], operands[2],
+ operands[0], NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_old_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (match_operand:SI 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchopsi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_old_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (match_operand:DI 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 1)
+ (match_operand:DI 2 "<fetchop_pred>" "<fetchopdi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_expand "sync_old_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (match_operand:INT1 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(and:INT1 (not:INT1 (match_dup 1))
+ (match_operand:INT1 2 "gpc_reg_operand" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (AND, <MODE>mode,
+ gen_rtx_NOT (<MODE>mode, operands[1]),
+ operands[2],
+ operands[0], NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_old_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(and:GPR (not:GPR (match_dup 1))
+ (match_operand:GPR 2 "gpc_reg_operand" "r"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_expand "sync_new_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (FETCHOP:INT1
+ (match_operand:INT1 1 "memory_operand" "")
+ (match_operand:INT1 2 "<fetchop_pred>" "")))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[1], operands[2],
+ NULL_RTX, operands[0], true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_new_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (FETCHOP:SI
+ (match_operand:SI 1 "memory_operand" "+Z")
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchopsi_constr>")))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_new_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (FETCHOP:DI
+ (match_operand:DI 1 "memory_operand" "+Z")
+ (match_operand:DI 2 "<fetchop_pred>" "<fetchopdi_constr>")))
+ (set (match_dup 1)
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+(define_expand "sync_new_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (and:INT1
+ (not:INT1 (match_operand:INT1 1 "memory_operand" ""))
+ (match_operand:INT1 2 "gpc_reg_operand" "")))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(and:INT1 (not:INT1 (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (AND, <MODE>mode,
+ gen_rtx_NOT (<MODE>mode, operands[1]),
+ operands[2],
+ NULL_RTX, operands[0], true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_new_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (and:GPR
+ (not:GPR (match_operand:GPR 1 "memory_operand" "+Z"))
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(and:GPR (not:GPR (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+; and<mode> without cr0 clobber to avoid generation of additional clobber
+; in atomic splitters causing internal consistency failure.
+; cr0 already clobbered by larx/stcx.
+(define_insn "*atomic_andsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "and_operand" "?r,T,K,L")]
+ UNSPEC_AND))]
+ ""
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2"
+ [(set_attr "type" "*,*,compare,compare")])
+
+(define_insn "*atomic_anddi"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:DI 2 "and_operand" "?r,S,T,K,J")]
+ UNSPEC_AND))]
+ "TARGET_POWERPC64"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2"
+ [(set_attr "type" "*,*,*,compare,compare")
+ (set_attr "length" "4,4,4,4,4")])
+
+; the sync_*_internal patterns all have these operands:
+; 0 - memory location
+; 1 - operand
+; 2 - value in memory after operation
+; 3 - value in memory immediately before operation
+
+(define_insn "*sync_addshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (ior:SI (and:SI (plus:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 1 "add_operand" "rI"))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0))))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(ior:SI (and:SI (plus:SI (match_dup 0) (match_dup 1))
+ (match_dup 4))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0)))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x"))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\tadd%I1 %2,%3,%1\n\tandc %6,%3,%4\n\tand %2,%2,%4\n\tor %2,%2,%6\n\tstwcx. %2,%y0\n\tbne- $-24"
+ [(set_attr "length" "28")])
+
+(define_insn "*sync_subshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (ior:SI (and:SI (minus:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 1 "add_operand" "rI"))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0))))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(ior:SI (and:SI (minus:SI (match_dup 0) (match_dup 1))
+ (match_dup 4))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0)))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x"))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\tsubf %2,%1,%3\n\tandc %6,%3,%4\n\tand %2,%2,%4\n\tor %2,%2,%6\n\tstwcx. %2,%y0\n\tbne- $-24"
+ [(set_attr "length" "28")])
+
+(define_insn "*sync_andsi_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (and:SI (match_operand:SI 0 "memory_operand" "+Z,Z,Z,Z")
+ (match_operand:SI 1 "and_operand" "r,T,K,L")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b,&b,&b,&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(and:SI (match_dup 0) (match_dup 1))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 4 "=&x,&x,&x,&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "@
+ lwarx %3,%y0\n\tand %2,%3,%1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\trlwinm %2,%3,0,%m1,%M1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\tandi. %2,%3,%b1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\tandis. %2,%3,%u1\n\tstwcx. %2,%y0\n\tbne- $-12"
+ [(set_attr "length" "16,16,16,16")])
+
+(define_insn "*sync_boolsi_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r,&r,&r")
+ (match_operator:SI 4 "boolean_or_operator"
+ [(match_operand:SI 0 "memory_operand" "+Z,Z,Z")
+ (match_operand:SI 1 "logical_operand" "r,K,L")]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b,&b,&b") (match_dup 0))
+ (set (match_dup 0) (unspec:SI [(match_dup 4)] UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x,&x,&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "@
+ lwarx %3,%y0\n\t%q4 %2,%3,%1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\t%q4i %2,%3,%b1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\t%q4is %2,%3,%u1\n\tstwcx. %2,%y0\n\tbne- $-12"
+ [(set_attr "length" "16,16,16")])
+
+; This pattern could also take immediate values of operand 1,
+; since the non-NOT version of the operator is used; but this is not
+; very useful, since in practice operand 1 is a full 32-bit value.
+; Likewise, operand 5 is in practice either <= 2^16 or it is a register.
+(define_insn "*sync_boolcshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (match_operator:SI 4 "boolean_operator"
+ [(xor:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 5 "logical_operand" "rK"))
+ (match_operand:SI 1 "gpc_reg_operand" "r")]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0) (unspec:SI [(match_dup 4)] UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 6 "=&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\txor%I2 %2,%3,%5\n\t%q4 %2,%2,%1\n\tstwcx. %2,%y0\n\tbne- $-16"
+ [(set_attr "length" "20")])
+
+(define_insn "isync"
+ [(set (mem:BLK (match_scratch 0 "X"))
+ (unspec_volatile:BLK [(mem:BLK (match_scratch 1 "X"))] UNSPEC_ISYNC))]
+ ""
+ "{ics|isync}"
+ [(set_attr "type" "isync")])
+
+(define_expand "sync_lock_release<mode>"
+ [(set (match_operand:INT 0 "memory_operand")
+ (match_operand:INT 1 "any_operand"))]
+ ""
+ "
+{
+ emit_insn (gen_lwsync ());
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+}")
+
+; Some AIX assemblers don't accept lwsync, so we use a .long.
+(define_insn "lwsync"
+ [(set (mem:BLK (match_scratch 0 "X"))
+ (unspec_volatile:BLK [(mem:BLK (match_scratch 1 "X"))] UNSPEC_LWSYNC))]
+ ""
+{
+ if (TARGET_NO_LWSYNC)
+ return "sync";
+ else
+ return ".long 0x7c2004ac";
+}
+ [(set_attr "type" "sync")])
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin b/gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin
new file mode 100644
index 000000000..563ccc561
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin
@@ -0,0 +1,46 @@
+# APPLE LOCAL begin fpsave.asm moved from _STATIC_EXTRA to _EXTRA --dbj
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm \
+ $(srcdir)/config/rs6000/darwin-fpsave.asm \
+ $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/darwin-64.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c \
+ $(srcdir)/config/rs6000/darwin-world.asm
+
+LIB2FUNCS_STATIC_EXTRA = \
+ $(srcdir)/config/rs6000/darwin-vecsave.asm
+# APPLE LOCAL end fpsave.asm moved from _STATIC_EXTRA to _EXTRA --dbj
+
+DARWIN_EXTRA_CRT_BUILD_CFLAGS = -mlongcall
+
+# The .asm files above are designed to run on all processors,
+# even though they use AltiVec instructions. -Wa is used because
+# -force_cpusubtype_ALL doesn't work with -dynamiclib.
+#
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+# APPLE LOCAL begin gcov 5573505
+TARGET_LIBGCC2_CFLAGS = -Wa,-force_cpusubtype_ALL -pipe
+TARGET_LIBGCC2_STATIC_CFLAGS = -mmacosx-version-min=10.4
+# APPLE LOCAL end gcov 5573505
+
+# APPLE LOCAL 5901604 6499452
+MULTILIB_EXTRA_OPTS = isysroot/Developer/SDKs/MacOSX10.5.sdk mmacosx-version-min=10.5
+
+# Export the _xlq* symbols from darwin-ldouble.c.
+SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc64.ver
+
+SHLIB_VERPFX = $(srcdir)/config/rs6000/darwin-libgcc
+
+LIB2ADDEH += $(srcdir)/config/rs6000/darwin-fallback.c
+
+darwin-fpsave.o: $(srcdir)/config/rs6000/darwin-asm.h
+darwin-tramp.o: $(srcdir)/config/rs6000/darwin-asm.h
+
+# Explain how to build crt2.o
+$(T)crt2$(objext): $(srcdir)/config/darwin-crt2.c $(GCC_PASSES) \
+ $(TCONFIG_H) stmp-int-hdrs tsystem.h
+ # APPLE LOCAL use -mlongcall for large text support
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -mlongcall \
+ -c $(srcdir)/config/darwin-crt2.c -o $(T)crt2$(objext)
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin8 b/gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin8
new file mode 100644
index 000000000..2f3bb32f8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/t-darwin8
@@ -0,0 +1,3 @@
+# 64-bit libraries can only be built in Darwin 8.x or later.
+MULTILIB_OPTIONS = m64
+MULTILIB_DIRNAMES = ppc64
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/t-rs6000 b/gcc-4.2.1-5666.3/gcc/config/rs6000/t-rs6000
new file mode 100644
index 000000000..3eb099cc7
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/t-rs6000
@@ -0,0 +1,22 @@
+# General rules that all rs6000/ targets must have.
+
+gt-rs6000.h: s-gtype ; @true
+
+# APPLE LOCAL -Os 4178595
+rs6000.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(PARAMS_H) \
+ $(RTL_H) $(REGS_H) hard-reg-set.h \
+ real.h insn-config.h conditions.h insn-attr.h flags.h $(RECOG_H) \
+ $(OBSTACK_H) $(TREE_H) $(EXPR_H) $(OPTABS_H) except.h function.h \
+ output.h $(BASIC_BLOCK_H) $(INTEGRATE_H) toplev.h $(GGC_H) $(HASHTAB_H) \
+ $(TM_P_H) $(TARGET_H) $(TARGET_DEF_H) langhooks.h reload.h gt-rs6000.h \
+ cfglayout.h
+
+# APPLE LOCAL AltiVec
+rs6000-c.o: $(srcdir)/config/rs6000/rs6000-c.c options.h \
+ $(srcdir)/config/rs6000/rs6000-protos.h \
+ $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(CPPLIB_H) \
+ $(TM_P_H) c-pragma.h errors.h coretypes.h $(TM_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/rs6000/rs6000-c.c
+
+# The rs6000 backend doesn't cause warnings in these files.
+insn-conditions.o-warn =
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/vec.h b/gcc-4.2.1-5666.3/gcc/config/rs6000/vec.h
new file mode 100644
index 000000000..56e8786f2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/vec.h
@@ -0,0 +1,4515 @@
+/* APPLE LOCAL file AltiVec */
+/* This file is generated by ops-to-gp. Do not edit. */
+
+/* To regenerate execute:
+ ops-to-gp -gcc vec.ops builtin.ops
+ with the current directory being gcc/config/rs6000. */
+
+static const struct builtin B1_vec_abs = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 11, "vec_abs:1", "4", CODE_FOR_xfx_perm, B_UID(0) };
+static const struct builtin B2_vec_abs = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abs:2", "2", CODE_FOR_xfx_perm, B_UID(1) };
+static const struct builtin B3_vec_abs = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abs:3", "3", CODE_FOR_xfx_perm, B_UID(2) };
+static const struct builtin B4_vec_abs = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abs:4", "1", CODE_FOR_xfx_perm, B_UID(3) };
+static const struct builtin B1_vec_abss = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abss:1", "6", CODE_FOR_xfx_perm, B_UID(4) };
+static const struct builtin B2_vec_abss = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abss:2", "7", CODE_FOR_xfx_perm, B_UID(5) };
+static const struct builtin B3_vec_abss = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abss:3", "5", CODE_FOR_xfx_perm, B_UID(6) };
+static const struct builtin B1_vec_vadduhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:1", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(7) };
+static const struct builtin B2_vec_vadduhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:2", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(8) };
+static const struct builtin B1_vec_vadduwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:1", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(9) };
+static const struct builtin B2_vec_vadduwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:2", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(10) };
+static const struct builtin B1_vec_vaddubm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:1", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(11) };
+static const struct builtin B2_vec_vaddubm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:2", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(12) };
+static const struct builtin B_vec_vaddfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vaddfp", "*vaddfp", CODE_FOR_xfxx_fp, B_UID(13) };
+static const struct builtin B3_vec_vadduhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:3", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(14) };
+static const struct builtin B4_vec_vadduhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:4", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(15) };
+static const struct builtin B3_vec_vadduwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:3", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(16) };
+static const struct builtin B4_vec_vadduwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:4", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(17) };
+static const struct builtin B3_vec_vaddubm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:3", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(18) };
+static const struct builtin B4_vec_vaddubm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:4", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(19) };
+static const struct builtin B5_vec_vadduhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:5", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(20) };
+static const struct builtin B6_vec_vadduhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:6", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(21) };
+static const struct builtin B5_vec_vadduwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:5", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(22) };
+static const struct builtin B6_vec_vadduwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:6", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(23) };
+static const struct builtin B5_vec_vaddubm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:5", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(24) };
+static const struct builtin B6_vec_vaddubm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:6", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(25) };
+static const struct builtin B_vec_vaddcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vaddcuw", "*vaddcuw", CODE_FOR_xfxx_simple, B_UID(26) };
+static const struct builtin B1_vec_vaddshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:1", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(27) };
+static const struct builtin B1_vec_vadduhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:1", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(28) };
+static const struct builtin B1_vec_vaddsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:1", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(29) };
+static const struct builtin B1_vec_vadduws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:1", "*vadduws", CODE_FOR_xfxx_simple, B_UID(30) };
+static const struct builtin B1_vec_vaddsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:1", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(31) };
+static const struct builtin B1_vec_vaddubs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:1", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(32) };
+static const struct builtin B2_vec_vaddshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:2", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(33) };
+static const struct builtin B3_vec_vaddshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:3", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(34) };
+static const struct builtin B2_vec_vaddsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:2", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(35) };
+static const struct builtin B3_vec_vaddsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:3", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(36) };
+static const struct builtin B2_vec_vaddsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:2", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(37) };
+static const struct builtin B3_vec_vaddsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:3", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(38) };
+static const struct builtin B2_vec_vadduhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:2", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(39) };
+static const struct builtin B3_vec_vadduhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:3", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(40) };
+static const struct builtin B2_vec_vadduws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:2", "*vadduws", CODE_FOR_xfxx_simple, B_UID(41) };
+static const struct builtin B3_vec_vadduws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:3", "*vadduws", CODE_FOR_xfxx_simple, B_UID(42) };
+static const struct builtin B2_vec_vaddubs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:2", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(43) };
+static const struct builtin B3_vec_vaddubs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:3", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(44) };
+static const struct builtin B1_vec_all_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:1", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(45) };
+static const struct builtin B2_vec_all_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:2", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(46) };
+static const struct builtin B3_vec_all_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:3", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(47) };
+static const struct builtin B4_vec_all_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:4", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(48) };
+static const struct builtin B5_vec_all_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:5", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(49) };
+static const struct builtin B6_vec_all_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:6", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(50) };
+static const struct builtin B7_vec_all_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:7", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(51) };
+static const struct builtin B8_vec_all_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:8", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(52) };
+static const struct builtin B9_vec_all_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:9", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(53) };
+static const struct builtin B10_vec_all_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:10", "*vcmpeqfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(54) };
+static const struct builtin B11_vec_all_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:11", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(55) };
+static const struct builtin B12_vec_all_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:12", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(56) };
+static const struct builtin B13_vec_all_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:13", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(57) };
+static const struct builtin B14_vec_all_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:14", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(58) };
+static const struct builtin B15_vec_all_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:15", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(59) };
+static const struct builtin B16_vec_all_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:16", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(60) };
+static const struct builtin B17_vec_all_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:17", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(61) };
+static const struct builtin B18_vec_all_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:18", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(62) };
+static const struct builtin B19_vec_all_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:19", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(63) };
+static const struct builtin B20_vec_all_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:20", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(64) };
+static const struct builtin B21_vec_all_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:21", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(65) };
+static const struct builtin B22_vec_all_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:22", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(66) };
+static const struct builtin B23_vec_all_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:23", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(67) };
+static const struct builtin B1_vec_all_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:1", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(68) };
+static const struct builtin B2_vec_all_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:2", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(69) };
+static const struct builtin B3_vec_all_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:3", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(70) };
+static const struct builtin B4_vec_all_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:4", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(71) };
+static const struct builtin B5_vec_all_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:5", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(72) };
+static const struct builtin B6_vec_all_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:6", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(73) };
+static const struct builtin B7_vec_all_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_ge:7", "*vcmpgefp.", CODE_FOR_j_24_t_fxx_simple, B_UID(74) };
+static const struct builtin B8_vec_all_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:8", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(75) };
+static const struct builtin B9_vec_all_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:9", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(76) };
+static const struct builtin B10_vec_all_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:10", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(77) };
+static const struct builtin B11_vec_all_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:11", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(78) };
+static const struct builtin B12_vec_all_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:12", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(79) };
+static const struct builtin B13_vec_all_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:13", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(80) };
+static const struct builtin B14_vec_all_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:14", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(81) };
+static const struct builtin B15_vec_all_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:15", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(82) };
+static const struct builtin B16_vec_all_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:16", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(83) };
+static const struct builtin B17_vec_all_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:17", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(84) };
+static const struct builtin B18_vec_all_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:18", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(85) };
+static const struct builtin B19_vec_all_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:19", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(86) };
+static const struct builtin B1_vec_all_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(87) };
+static const struct builtin B2_vec_all_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(88) };
+static const struct builtin B3_vec_all_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(89) };
+static const struct builtin B4_vec_all_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(90) };
+static const struct builtin B5_vec_all_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(91) };
+static const struct builtin B6_vec_all_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:6", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(92) };
+static const struct builtin B7_vec_all_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(93) };
+static const struct builtin B8_vec_all_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(94) };
+static const struct builtin B9_vec_all_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(95) };
+static const struct builtin B10_vec_all_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(96) };
+static const struct builtin B11_vec_all_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(97) };
+static const struct builtin B12_vec_all_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(98) };
+static const struct builtin B13_vec_all_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(99) };
+static const struct builtin B14_vec_all_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(100) };
+static const struct builtin B15_vec_all_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(101) };
+static const struct builtin B16_vec_all_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(102) };
+static const struct builtin B17_vec_all_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(103) };
+static const struct builtin B18_vec_all_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:18", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(104) };
+static const struct builtin B19_vec_all_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:19", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(105) };
+static const struct builtin B_vec_all_in = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_in", "*vcmpbfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(106) };
+static const struct builtin B1_vec_all_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:1", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(107) };
+static const struct builtin B2_vec_all_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:2", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(108) };
+static const struct builtin B3_vec_all_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:3", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(109) };
+static const struct builtin B4_vec_all_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:4", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(110) };
+static const struct builtin B5_vec_all_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:5", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(111) };
+static const struct builtin B6_vec_all_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:6", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(112) };
+static const struct builtin B7_vec_all_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_le:7", "*vcmpgefp.", CODE_FOR_j_24_t_frxx_simple, B_UID(113) };
+static const struct builtin B8_vec_all_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:8", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(114) };
+static const struct builtin B9_vec_all_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:9", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(115) };
+static const struct builtin B10_vec_all_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:10", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(116) };
+static const struct builtin B11_vec_all_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:11", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(117) };
+static const struct builtin B12_vec_all_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:12", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(118) };
+static const struct builtin B13_vec_all_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:13", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(119) };
+static const struct builtin B14_vec_all_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:14", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(120) };
+static const struct builtin B15_vec_all_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:15", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(121) };
+static const struct builtin B16_vec_all_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:16", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(122) };
+static const struct builtin B17_vec_all_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:17", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(123) };
+static const struct builtin B18_vec_all_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:18", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(124) };
+static const struct builtin B19_vec_all_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:19", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(125) };
+static const struct builtin B1_vec_all_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(126) };
+static const struct builtin B2_vec_all_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(127) };
+static const struct builtin B3_vec_all_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(128) };
+static const struct builtin B4_vec_all_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(129) };
+static const struct builtin B5_vec_all_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(130) };
+static const struct builtin B6_vec_all_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:6", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(131) };
+static const struct builtin B7_vec_all_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_frxx_simple, B_UID(132) };
+static const struct builtin B8_vec_all_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(133) };
+static const struct builtin B9_vec_all_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(134) };
+static const struct builtin B10_vec_all_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(135) };
+static const struct builtin B11_vec_all_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(136) };
+static const struct builtin B12_vec_all_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(137) };
+static const struct builtin B13_vec_all_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(138) };
+static const struct builtin B14_vec_all_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(139) };
+static const struct builtin B15_vec_all_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(140) };
+static const struct builtin B16_vec_all_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(141) };
+static const struct builtin B17_vec_all_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(142) };
+static const struct builtin B18_vec_all_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:18", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(143) };
+static const struct builtin B19_vec_all_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:19", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(144) };
+static const struct builtin B_vec_all_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26td, 1, FALSE, FALSE, 0, "vec_all_nan", "*vcmpeqfp.", CODE_FOR_j_26_t_fx_simple, B_UID(145) };
+static const struct builtin B1_vec_all_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:1", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(146) };
+static const struct builtin B2_vec_all_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:2", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(147) };
+static const struct builtin B3_vec_all_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:3", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(148) };
+static const struct builtin B4_vec_all_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:4", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(149) };
+static const struct builtin B5_vec_all_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:5", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(150) };
+static const struct builtin B6_vec_all_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:6", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(151) };
+static const struct builtin B7_vec_all_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:7", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(152) };
+static const struct builtin B8_vec_all_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:8", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(153) };
+static const struct builtin B9_vec_all_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:9", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(154) };
+static const struct builtin B10_vec_all_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:10", "*vcmpeqfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(155) };
+static const struct builtin B11_vec_all_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:11", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(156) };
+static const struct builtin B12_vec_all_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:12", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(157) };
+static const struct builtin B13_vec_all_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:13", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(158) };
+static const struct builtin B14_vec_all_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:14", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(159) };
+static const struct builtin B15_vec_all_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:15", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(160) };
+static const struct builtin B16_vec_all_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:16", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(161) };
+static const struct builtin B17_vec_all_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:17", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(162) };
+static const struct builtin B18_vec_all_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:18", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(163) };
+static const struct builtin B19_vec_all_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:19", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(164) };
+static const struct builtin B20_vec_all_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:20", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(165) };
+static const struct builtin B21_vec_all_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:21", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(166) };
+static const struct builtin B22_vec_all_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:22", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(167) };
+static const struct builtin B23_vec_all_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:23", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(168) };
+static const struct builtin B_vec_all_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_nge", "*vcmpgefp.", CODE_FOR_j_26_t_fxx_simple, B_UID(169) };
+static const struct builtin B_vec_all_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ngt", "*vcmpgtfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(170) };
+static const struct builtin B_vec_all_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nle", "*vcmpgefp.", CODE_FOR_j_26_t_frxx_simple, B_UID(171) };
+static const struct builtin B_vec_all_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nlt", "*vcmpgtfp.", CODE_FOR_j_26_t_frxx_simple, B_UID(172) };
+static const struct builtin B_vec_all_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24td, 1, FALSE, FALSE, 0, "vec_all_numeric", "*vcmpeqfp.", CODE_FOR_j_24_t_fx_simple, B_UID(173) };
+static const struct builtin B1_vec_vand = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vand:1", "*vand", CODE_FOR_xfxx_simple, B_UID(174) };
+static const struct builtin B2_vec_vand = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:2", "*vand", CODE_FOR_xfxx_simple, B_UID(175) };
+static const struct builtin B3_vec_vand = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:3", "*vand", CODE_FOR_xfxx_simple, B_UID(176) };
+static const struct builtin B4_vec_vand = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vand:4", "*vand", CODE_FOR_xfxx_simple, B_UID(177) };
+static const struct builtin B5_vec_vand = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:5", "*vand", CODE_FOR_xfxx_simple, B_UID(178) };
+static const struct builtin B6_vec_vand = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:6", "*vand", CODE_FOR_xfxx_simple, B_UID(179) };
+static const struct builtin B7_vec_vand = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:7", "*vand", CODE_FOR_xfxx_simple, B_UID(180) };
+static const struct builtin B8_vec_vand = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vand:8", "*vand", CODE_FOR_xfxx_simple, B_UID(181) };
+static const struct builtin B9_vec_vand = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:9", "*vand", CODE_FOR_xfxx_simple, B_UID(182) };
+static const struct builtin B10_vec_vand = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:10", "*vand", CODE_FOR_xfxx_simple, B_UID(183) };
+static const struct builtin B11_vec_vand = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:11", "*vand", CODE_FOR_xfxx_simple, B_UID(184) };
+static const struct builtin B12_vec_vand = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:12", "*vand", CODE_FOR_xfxx_simple, B_UID(185) };
+static const struct builtin B13_vec_vand = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:13", "*vand", CODE_FOR_xfxx_simple, B_UID(186) };
+static const struct builtin B14_vec_vand = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:14", "*vand", CODE_FOR_xfxx_simple, B_UID(187) };
+static const struct builtin B15_vec_vand = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:15", "*vand", CODE_FOR_xfxx_simple, B_UID(188) };
+static const struct builtin B16_vec_vand = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:16", "*vand", CODE_FOR_xfxx_simple, B_UID(189) };
+static const struct builtin B17_vec_vand = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:17", "*vand", CODE_FOR_xfxx_simple, B_UID(190) };
+static const struct builtin B18_vec_vand = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:18", "*vand", CODE_FOR_xfxx_simple, B_UID(191) };
+static const struct builtin B19_vec_vand = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:19", "*vand", CODE_FOR_xfxx_simple, B_UID(192) };
+static const struct builtin B20_vec_vand = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:20", "*vand", CODE_FOR_xfxx_simple, B_UID(193) };
+static const struct builtin B21_vec_vand = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:21", "*vand", CODE_FOR_xfxx_simple, B_UID(194) };
+static const struct builtin B22_vec_vand = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:22", "*vand", CODE_FOR_xfxx_simple, B_UID(195) };
+static const struct builtin B23_vec_vand = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:23", "*vand", CODE_FOR_xfxx_simple, B_UID(196) };
+static const struct builtin B24_vec_vand = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:24", "*vand", CODE_FOR_xfxx_simple, B_UID(197) };
+static const struct builtin B1_vec_vandc = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vandc:1", "*vandc", CODE_FOR_xfxx_simple, B_UID(198) };
+static const struct builtin B2_vec_vandc = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:2", "*vandc", CODE_FOR_xfxx_simple, B_UID(199) };
+static const struct builtin B3_vec_vandc = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:3", "*vandc", CODE_FOR_xfxx_simple, B_UID(200) };
+static const struct builtin B4_vec_vandc = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vandc:4", "*vandc", CODE_FOR_xfxx_simple, B_UID(201) };
+static const struct builtin B5_vec_vandc = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:5", "*vandc", CODE_FOR_xfxx_simple, B_UID(202) };
+static const struct builtin B6_vec_vandc = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:6", "*vandc", CODE_FOR_xfxx_simple, B_UID(203) };
+static const struct builtin B7_vec_vandc = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:7", "*vandc", CODE_FOR_xfxx_simple, B_UID(204) };
+static const struct builtin B8_vec_vandc = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vandc:8", "*vandc", CODE_FOR_xfxx_simple, B_UID(205) };
+static const struct builtin B9_vec_vandc = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:9", "*vandc", CODE_FOR_xfxx_simple, B_UID(206) };
+static const struct builtin B10_vec_vandc = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:10", "*vandc", CODE_FOR_xfxx_simple, B_UID(207) };
+static const struct builtin B11_vec_vandc = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:11", "*vandc", CODE_FOR_xfxx_simple, B_UID(208) };
+static const struct builtin B12_vec_vandc = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:12", "*vandc", CODE_FOR_xfxx_simple, B_UID(209) };
+static const struct builtin B13_vec_vandc = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:13", "*vandc", CODE_FOR_xfxx_simple, B_UID(210) };
+static const struct builtin B14_vec_vandc = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:14", "*vandc", CODE_FOR_xfxx_simple, B_UID(211) };
+static const struct builtin B15_vec_vandc = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:15", "*vandc", CODE_FOR_xfxx_simple, B_UID(212) };
+static const struct builtin B16_vec_vandc = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:16", "*vandc", CODE_FOR_xfxx_simple, B_UID(213) };
+static const struct builtin B17_vec_vandc = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:17", "*vandc", CODE_FOR_xfxx_simple, B_UID(214) };
+static const struct builtin B18_vec_vandc = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:18", "*vandc", CODE_FOR_xfxx_simple, B_UID(215) };
+static const struct builtin B19_vec_vandc = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:19", "*vandc", CODE_FOR_xfxx_simple, B_UID(216) };
+static const struct builtin B20_vec_vandc = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:20", "*vandc", CODE_FOR_xfxx_simple, B_UID(217) };
+static const struct builtin B21_vec_vandc = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:21", "*vandc", CODE_FOR_xfxx_simple, B_UID(218) };
+static const struct builtin B22_vec_vandc = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:22", "*vandc", CODE_FOR_xfxx_simple, B_UID(219) };
+static const struct builtin B23_vec_vandc = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:23", "*vandc", CODE_FOR_xfxx_simple, B_UID(220) };
+static const struct builtin B24_vec_vandc = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:24", "*vandc", CODE_FOR_xfxx_simple, B_UID(221) };
+static const struct builtin B1_vec_any_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:1", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(222) };
+static const struct builtin B2_vec_any_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:2", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(223) };
+static const struct builtin B3_vec_any_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:3", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(224) };
+static const struct builtin B4_vec_any_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:4", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(225) };
+static const struct builtin B5_vec_any_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:5", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(226) };
+static const struct builtin B6_vec_any_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:6", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(227) };
+static const struct builtin B7_vec_any_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:7", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(228) };
+static const struct builtin B8_vec_any_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:8", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(229) };
+static const struct builtin B9_vec_any_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:9", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(230) };
+static const struct builtin B10_vec_any_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:10", "*vcmpeqfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(231) };
+static const struct builtin B11_vec_any_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:11", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(232) };
+static const struct builtin B12_vec_any_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:12", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(233) };
+static const struct builtin B13_vec_any_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:13", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(234) };
+static const struct builtin B14_vec_any_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:14", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(235) };
+static const struct builtin B15_vec_any_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:15", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(236) };
+static const struct builtin B16_vec_any_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:16", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(237) };
+static const struct builtin B17_vec_any_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:17", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(238) };
+static const struct builtin B18_vec_any_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:18", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(239) };
+static const struct builtin B19_vec_any_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:19", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(240) };
+static const struct builtin B20_vec_any_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:20", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(241) };
+static const struct builtin B21_vec_any_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:21", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(242) };
+static const struct builtin B22_vec_any_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:22", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(243) };
+static const struct builtin B23_vec_any_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:23", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(244) };
+static const struct builtin B1_vec_any_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:1", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(245) };
+static const struct builtin B2_vec_any_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:2", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(246) };
+static const struct builtin B3_vec_any_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:3", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(247) };
+static const struct builtin B4_vec_any_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:4", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(248) };
+static const struct builtin B5_vec_any_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:5", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(249) };
+static const struct builtin B6_vec_any_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:6", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(250) };
+static const struct builtin B7_vec_any_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_ge:7", "*vcmpgefp.", CODE_FOR_j_26_f_fxx_simple, B_UID(251) };
+static const struct builtin B8_vec_any_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:8", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(252) };
+static const struct builtin B9_vec_any_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:9", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(253) };
+static const struct builtin B10_vec_any_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:10", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(254) };
+static const struct builtin B11_vec_any_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:11", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(255) };
+static const struct builtin B12_vec_any_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:12", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(256) };
+static const struct builtin B13_vec_any_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:13", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(257) };
+static const struct builtin B14_vec_any_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:14", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(258) };
+static const struct builtin B15_vec_any_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:15", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(259) };
+static const struct builtin B16_vec_any_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:16", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(260) };
+static const struct builtin B17_vec_any_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:17", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(261) };
+static const struct builtin B18_vec_any_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:18", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(262) };
+static const struct builtin B19_vec_any_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:19", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(263) };
+static const struct builtin B1_vec_any_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(264) };
+static const struct builtin B2_vec_any_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(265) };
+static const struct builtin B3_vec_any_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(266) };
+static const struct builtin B4_vec_any_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(267) };
+static const struct builtin B5_vec_any_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(268) };
+static const struct builtin B6_vec_any_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:6", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(269) };
+static const struct builtin B7_vec_any_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(270) };
+static const struct builtin B8_vec_any_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(271) };
+static const struct builtin B9_vec_any_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(272) };
+static const struct builtin B10_vec_any_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(273) };
+static const struct builtin B11_vec_any_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(274) };
+static const struct builtin B12_vec_any_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(275) };
+static const struct builtin B13_vec_any_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(276) };
+static const struct builtin B14_vec_any_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(277) };
+static const struct builtin B15_vec_any_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(278) };
+static const struct builtin B16_vec_any_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(279) };
+static const struct builtin B17_vec_any_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(280) };
+static const struct builtin B18_vec_any_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:18", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(281) };
+static const struct builtin B19_vec_any_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:19", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(282) };
+static const struct builtin B1_vec_any_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:1", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(283) };
+static const struct builtin B2_vec_any_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:2", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(284) };
+static const struct builtin B3_vec_any_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:3", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(285) };
+static const struct builtin B4_vec_any_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:4", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(286) };
+static const struct builtin B5_vec_any_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:5", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(287) };
+static const struct builtin B6_vec_any_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:6", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(288) };
+static const struct builtin B7_vec_any_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_le:7", "*vcmpgefp.", CODE_FOR_j_26_f_frxx_simple, B_UID(289) };
+static const struct builtin B8_vec_any_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:8", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(290) };
+static const struct builtin B9_vec_any_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:9", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(291) };
+static const struct builtin B10_vec_any_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:10", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(292) };
+static const struct builtin B11_vec_any_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:11", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(293) };
+static const struct builtin B12_vec_any_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:12", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(294) };
+static const struct builtin B13_vec_any_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:13", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(295) };
+static const struct builtin B14_vec_any_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:14", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(296) };
+static const struct builtin B15_vec_any_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:15", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(297) };
+static const struct builtin B16_vec_any_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:16", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(298) };
+static const struct builtin B17_vec_any_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:17", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(299) };
+static const struct builtin B18_vec_any_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:18", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(300) };
+static const struct builtin B19_vec_any_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:19", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(301) };
+static const struct builtin B1_vec_any_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(302) };
+static const struct builtin B2_vec_any_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(303) };
+static const struct builtin B3_vec_any_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(304) };
+static const struct builtin B4_vec_any_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(305) };
+static const struct builtin B5_vec_any_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(306) };
+static const struct builtin B6_vec_any_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:6", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(307) };
+static const struct builtin B7_vec_any_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_frxx_simple, B_UID(308) };
+static const struct builtin B8_vec_any_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(309) };
+static const struct builtin B9_vec_any_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(310) };
+static const struct builtin B10_vec_any_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(311) };
+static const struct builtin B11_vec_any_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(312) };
+static const struct builtin B12_vec_any_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(313) };
+static const struct builtin B13_vec_any_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(314) };
+static const struct builtin B14_vec_any_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(315) };
+static const struct builtin B15_vec_any_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(316) };
+static const struct builtin B16_vec_any_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(317) };
+static const struct builtin B17_vec_any_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(318) };
+static const struct builtin B18_vec_any_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:18", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(319) };
+static const struct builtin B19_vec_any_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:19", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(320) };
+static const struct builtin B_vec_any_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24fd, 1, FALSE, FALSE, 0, "vec_any_nan", "*vcmpeqfp.", CODE_FOR_j_24_f_fx_simple, B_UID(321) };
+static const struct builtin B1_vec_any_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:1", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(322) };
+static const struct builtin B2_vec_any_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:2", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(323) };
+static const struct builtin B3_vec_any_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:3", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(324) };
+static const struct builtin B4_vec_any_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:4", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(325) };
+static const struct builtin B5_vec_any_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:5", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(326) };
+static const struct builtin B6_vec_any_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:6", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(327) };
+static const struct builtin B7_vec_any_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:7", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(328) };
+static const struct builtin B8_vec_any_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:8", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(329) };
+static const struct builtin B9_vec_any_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:9", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(330) };
+static const struct builtin B10_vec_any_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:10", "*vcmpeqfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(331) };
+static const struct builtin B11_vec_any_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:11", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(332) };
+static const struct builtin B12_vec_any_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:12", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(333) };
+static const struct builtin B13_vec_any_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:13", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(334) };
+static const struct builtin B14_vec_any_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:14", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(335) };
+static const struct builtin B15_vec_any_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:15", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(336) };
+static const struct builtin B16_vec_any_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:16", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(337) };
+static const struct builtin B17_vec_any_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:17", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(338) };
+static const struct builtin B18_vec_any_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:18", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(339) };
+static const struct builtin B19_vec_any_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:19", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(340) };
+static const struct builtin B20_vec_any_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:20", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(341) };
+static const struct builtin B21_vec_any_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:21", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(342) };
+static const struct builtin B22_vec_any_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:22", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(343) };
+static const struct builtin B23_vec_any_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:23", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(344) };
+static const struct builtin B_vec_any_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_nge", "*vcmpgefp.", CODE_FOR_j_24_f_fxx_simple, B_UID(345) };
+static const struct builtin B_vec_any_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ngt", "*vcmpgtfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(346) };
+static const struct builtin B_vec_any_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nle", "*vcmpgefp.", CODE_FOR_j_24_f_frxx_simple, B_UID(347) };
+static const struct builtin B_vec_any_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nlt", "*vcmpgtfp.", CODE_FOR_j_24_f_frxx_simple, B_UID(348) };
+static const struct builtin B_vec_any_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26fd, 1, FALSE, FALSE, 0, "vec_any_numeric", "*vcmpeqfp.", CODE_FOR_j_26_f_fx_simple, B_UID(349) };
+static const struct builtin B_vec_any_out = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_out", "*vcmpbfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(350) };
+static const struct builtin B_vec_vavgsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vavgsh", "*vavgsh", CODE_FOR_xfxx_simple, B_UID(351) };
+static const struct builtin B_vec_vavgsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vavgsw", "*vavgsw", CODE_FOR_xfxx_simple, B_UID(352) };
+static const struct builtin B_vec_vavgsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vavgsb", "*vavgsb", CODE_FOR_xfxx_simple, B_UID(353) };
+static const struct builtin B_vec_vavguh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vavguh", "*vavguh", CODE_FOR_xfxx_simple, B_UID(354) };
+static const struct builtin B_vec_vavguw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vavguw", "*vavguw", CODE_FOR_xfxx_simple, B_UID(355) };
+static const struct builtin B_vec_vavgub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vavgub", "*vavgub", CODE_FOR_xfxx_simple, B_UID(356) };
+static const struct builtin B_vec_vrfip = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfip", "*vrfip", CODE_FOR_xfx_fp, B_UID(357) };
+static const struct builtin B_vec_vcmpbfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vcmpbfp", "*vcmpbfp", CODE_FOR_xfxx_simple, B_UID(358) };
+static const struct builtin B_vec_vcmpeqfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpeqfp", "*vcmpeqfp", CODE_FOR_xfxx_simple, B_UID(359) };
+static const struct builtin B1_vec_vcmpequh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:1", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(360) };
+static const struct builtin B1_vec_vcmpequw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:1", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(361) };
+static const struct builtin B1_vec_vcmpequb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:1", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(362) };
+static const struct builtin B2_vec_vcmpequh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:2", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(363) };
+static const struct builtin B2_vec_vcmpequw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:2", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(364) };
+static const struct builtin B2_vec_vcmpequb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:2", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(365) };
+static const struct builtin B_vec_vcmpgefp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgefp", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(366) };
+static const struct builtin B_vec_vcmpgtfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtfp", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(367) };
+static const struct builtin B_vec_vcmpgtsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtsh", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(368) };
+static const struct builtin B_vec_vcmpgtsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtsw", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(369) };
+static const struct builtin B_vec_vcmpgtsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtsb", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(370) };
+static const struct builtin B_vec_vcmpgtuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtuh", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(371) };
+static const struct builtin B_vec_vcmpgtuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtuw", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(372) };
+static const struct builtin B_vec_vcmpgtub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtub", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(373) };
+static const struct builtin B_vec_cmple = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmple", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(374) };
+static const struct builtin B1_vec_cmplt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:1", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(375) };
+static const struct builtin B2_vec_cmplt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:2", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(376) };
+static const struct builtin B3_vec_cmplt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:3", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(377) };
+static const struct builtin B4_vec_cmplt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:4", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(378) };
+static const struct builtin B5_vec_cmplt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:5", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(379) };
+static const struct builtin B6_vec_cmplt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:6", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(380) };
+static const struct builtin B7_vec_cmplt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:7", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(381) };
+static const struct builtin B_vec_vcfsx = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfsx", "*vcfsx", CODE_FOR_xfxB_fp, B_UID(382) };
+static const struct builtin B_vec_vcfux = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfux", "*vcfux", CODE_FOR_xfxB_fp, B_UID(383) };
+static const struct builtin B_vec_vctsxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vctsxs", "*vctsxs", CODE_FOR_xfxB_fp, B_UID(384) };
+static const struct builtin B_vec_vctuxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vctuxs", "*vctuxs", CODE_FOR_xfxB_fp, B_UID(385) };
+static const struct builtin B_vec_dss = { { &T_immed_u2, NULL, NULL, }, "D", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_dss", "*dss", CODE_FOR_vlfD_load, B_UID(386) };
+static const struct builtin B_vec_dssall = { { NULL, NULL, NULL, }, "", &T_volatile_void, 0, FALSE, FALSE, 0, "vec_dssall", "*dssall", CODE_FOR_vlf_load, B_UID(387) };
+static const struct builtin B1_vec_dst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:1", "*dst", CODE_FOR_vlfiiD_load, B_UID(388) };
+static const struct builtin B2_vec_dst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:2", "*dst", CODE_FOR_vlfiiD_load, B_UID(389) };
+static const struct builtin B3_vec_dst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:3", "*dst", CODE_FOR_vlfiiD_load, B_UID(390) };
+static const struct builtin B4_vec_dst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:4", "*dst", CODE_FOR_vlfiiD_load, B_UID(391) };
+static const struct builtin B5_vec_dst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:5", "*dst", CODE_FOR_vlfiiD_load, B_UID(392) };
+static const struct builtin B6_vec_dst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:6", "*dst", CODE_FOR_vlfiiD_load, B_UID(393) };
+static const struct builtin B7_vec_dst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:7", "*dst", CODE_FOR_vlfiiD_load, B_UID(394) };
+static const struct builtin B8_vec_dst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:8", "*dst", CODE_FOR_vlfiiD_load, B_UID(395) };
+static const struct builtin B9_vec_dst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:9", "*dst", CODE_FOR_vlfiiD_load, B_UID(396) };
+static const struct builtin B10_vec_dst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:10", "*dst", CODE_FOR_vlfiiD_load, B_UID(397) };
+static const struct builtin B11_vec_dst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:11", "*dst", CODE_FOR_vlfiiD_load, B_UID(398) };
+static const struct builtin B12_vec_dst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:12", "*dst", CODE_FOR_vlfiiD_load, B_UID(399) };
+static const struct builtin B13_vec_dst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:13", "*dst", CODE_FOR_vlfiiD_load, B_UID(400) };
+static const struct builtin B14_vec_dst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:14", "*dst", CODE_FOR_vlfiiD_load, B_UID(401) };
+static const struct builtin B15_vec_dst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:15", "*dst", CODE_FOR_vlfiiD_load, B_UID(402) };
+static const struct builtin B16_vec_dst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:16", "*dst", CODE_FOR_vlfiiD_load, B_UID(403) };
+static const struct builtin B17_vec_dst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:17", "*dst", CODE_FOR_vlfiiD_load, B_UID(404) };
+static const struct builtin B18_vec_dst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:18", "*dst", CODE_FOR_vlfiiD_load, B_UID(405) };
+static const struct builtin B19_vec_dst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:19", "*dst", CODE_FOR_vlfiiD_load, B_UID(406) };
+static const struct builtin B20_vec_dst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:20", "*dst", CODE_FOR_vlfiiD_load, B_UID(407) };
+static const struct builtin B1_vec_dstst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:1", "*dstst", CODE_FOR_vlfiiD_load, B_UID(408) };
+static const struct builtin B2_vec_dstst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:2", "*dstst", CODE_FOR_vlfiiD_load, B_UID(409) };
+static const struct builtin B3_vec_dstst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:3", "*dstst", CODE_FOR_vlfiiD_load, B_UID(410) };
+static const struct builtin B4_vec_dstst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:4", "*dstst", CODE_FOR_vlfiiD_load, B_UID(411) };
+static const struct builtin B5_vec_dstst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:5", "*dstst", CODE_FOR_vlfiiD_load, B_UID(412) };
+static const struct builtin B6_vec_dstst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:6", "*dstst", CODE_FOR_vlfiiD_load, B_UID(413) };
+static const struct builtin B7_vec_dstst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:7", "*dstst", CODE_FOR_vlfiiD_load, B_UID(414) };
+static const struct builtin B8_vec_dstst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:8", "*dstst", CODE_FOR_vlfiiD_load, B_UID(415) };
+static const struct builtin B9_vec_dstst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:9", "*dstst", CODE_FOR_vlfiiD_load, B_UID(416) };
+static const struct builtin B10_vec_dstst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:10", "*dstst", CODE_FOR_vlfiiD_load, B_UID(417) };
+static const struct builtin B11_vec_dstst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:11", "*dstst", CODE_FOR_vlfiiD_load, B_UID(418) };
+static const struct builtin B12_vec_dstst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:12", "*dstst", CODE_FOR_vlfiiD_load, B_UID(419) };
+static const struct builtin B13_vec_dstst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:13", "*dstst", CODE_FOR_vlfiiD_load, B_UID(420) };
+static const struct builtin B14_vec_dstst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:14", "*dstst", CODE_FOR_vlfiiD_load, B_UID(421) };
+static const struct builtin B15_vec_dstst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:15", "*dstst", CODE_FOR_vlfiiD_load, B_UID(422) };
+static const struct builtin B16_vec_dstst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:16", "*dstst", CODE_FOR_vlfiiD_load, B_UID(423) };
+static const struct builtin B17_vec_dstst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:17", "*dstst", CODE_FOR_vlfiiD_load, B_UID(424) };
+static const struct builtin B18_vec_dstst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:18", "*dstst", CODE_FOR_vlfiiD_load, B_UID(425) };
+static const struct builtin B19_vec_dstst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:19", "*dstst", CODE_FOR_vlfiiD_load, B_UID(426) };
+static const struct builtin B20_vec_dstst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:20", "*dstst", CODE_FOR_vlfiiD_load, B_UID(427) };
+static const struct builtin B1_vec_dststt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:1", "*dststt", CODE_FOR_vlfiiD_load, B_UID(428) };
+static const struct builtin B2_vec_dststt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:2", "*dststt", CODE_FOR_vlfiiD_load, B_UID(429) };
+static const struct builtin B3_vec_dststt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:3", "*dststt", CODE_FOR_vlfiiD_load, B_UID(430) };
+static const struct builtin B4_vec_dststt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:4", "*dststt", CODE_FOR_vlfiiD_load, B_UID(431) };
+static const struct builtin B5_vec_dststt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:5", "*dststt", CODE_FOR_vlfiiD_load, B_UID(432) };
+static const struct builtin B6_vec_dststt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:6", "*dststt", CODE_FOR_vlfiiD_load, B_UID(433) };
+static const struct builtin B7_vec_dststt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:7", "*dststt", CODE_FOR_vlfiiD_load, B_UID(434) };
+static const struct builtin B8_vec_dststt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:8", "*dststt", CODE_FOR_vlfiiD_load, B_UID(435) };
+static const struct builtin B9_vec_dststt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:9", "*dststt", CODE_FOR_vlfiiD_load, B_UID(436) };
+static const struct builtin B10_vec_dststt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:10", "*dststt", CODE_FOR_vlfiiD_load, B_UID(437) };
+static const struct builtin B11_vec_dststt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:11", "*dststt", CODE_FOR_vlfiiD_load, B_UID(438) };
+static const struct builtin B12_vec_dststt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:12", "*dststt", CODE_FOR_vlfiiD_load, B_UID(439) };
+static const struct builtin B13_vec_dststt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:13", "*dststt", CODE_FOR_vlfiiD_load, B_UID(440) };
+static const struct builtin B14_vec_dststt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:14", "*dststt", CODE_FOR_vlfiiD_load, B_UID(441) };
+static const struct builtin B15_vec_dststt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:15", "*dststt", CODE_FOR_vlfiiD_load, B_UID(442) };
+static const struct builtin B16_vec_dststt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:16", "*dststt", CODE_FOR_vlfiiD_load, B_UID(443) };
+static const struct builtin B17_vec_dststt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:17", "*dststt", CODE_FOR_vlfiiD_load, B_UID(444) };
+static const struct builtin B18_vec_dststt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:18", "*dststt", CODE_FOR_vlfiiD_load, B_UID(445) };
+static const struct builtin B19_vec_dststt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:19", "*dststt", CODE_FOR_vlfiiD_load, B_UID(446) };
+static const struct builtin B20_vec_dststt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:20", "*dststt", CODE_FOR_vlfiiD_load, B_UID(447) };
+static const struct builtin B1_vec_dstt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:1", "*dstt", CODE_FOR_vlfiiD_load, B_UID(448) };
+static const struct builtin B2_vec_dstt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:2", "*dstt", CODE_FOR_vlfiiD_load, B_UID(449) };
+static const struct builtin B3_vec_dstt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:3", "*dstt", CODE_FOR_vlfiiD_load, B_UID(450) };
+static const struct builtin B4_vec_dstt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:4", "*dstt", CODE_FOR_vlfiiD_load, B_UID(451) };
+static const struct builtin B5_vec_dstt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:5", "*dstt", CODE_FOR_vlfiiD_load, B_UID(452) };
+static const struct builtin B6_vec_dstt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:6", "*dstt", CODE_FOR_vlfiiD_load, B_UID(453) };
+static const struct builtin B7_vec_dstt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:7", "*dstt", CODE_FOR_vlfiiD_load, B_UID(454) };
+static const struct builtin B8_vec_dstt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:8", "*dstt", CODE_FOR_vlfiiD_load, B_UID(455) };
+static const struct builtin B9_vec_dstt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:9", "*dstt", CODE_FOR_vlfiiD_load, B_UID(456) };
+static const struct builtin B10_vec_dstt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:10", "*dstt", CODE_FOR_vlfiiD_load, B_UID(457) };
+static const struct builtin B11_vec_dstt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:11", "*dstt", CODE_FOR_vlfiiD_load, B_UID(458) };
+static const struct builtin B12_vec_dstt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:12", "*dstt", CODE_FOR_vlfiiD_load, B_UID(459) };
+static const struct builtin B13_vec_dstt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:13", "*dstt", CODE_FOR_vlfiiD_load, B_UID(460) };
+static const struct builtin B14_vec_dstt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:14", "*dstt", CODE_FOR_vlfiiD_load, B_UID(461) };
+static const struct builtin B15_vec_dstt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:15", "*dstt", CODE_FOR_vlfiiD_load, B_UID(462) };
+static const struct builtin B16_vec_dstt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:16", "*dstt", CODE_FOR_vlfiiD_load, B_UID(463) };
+static const struct builtin B17_vec_dstt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:17", "*dstt", CODE_FOR_vlfiiD_load, B_UID(464) };
+static const struct builtin B18_vec_dstt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:18", "*dstt", CODE_FOR_vlfiiD_load, B_UID(465) };
+static const struct builtin B19_vec_dstt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:19", "*dstt", CODE_FOR_vlfiiD_load, B_UID(466) };
+static const struct builtin B20_vec_dstt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:20", "*dstt", CODE_FOR_vlfiiD_load, B_UID(467) };
+static const struct builtin B_vec_vexptefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vexptefp", "*vexptefp", CODE_FOR_xfx_fp, B_UID(468) };
+static const struct builtin B_vec_vrfim = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfim", "*vrfim", CODE_FOR_xfx_fp, B_UID(469) };
+static const struct builtin B1_vec_lvx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:1", "*lvx", CODE_FOR_xlfii_load, B_UID(470) };
+static const struct builtin B2_vec_lvx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:2", "*lvx", CODE_FOR_xlfii_load, B_UID(471) };
+static const struct builtin B3_vec_lvx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:3", "*lvx", CODE_FOR_xlfii_load, B_UID(472) };
+static const struct builtin B4_vec_lvx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:4", "*lvx", CODE_FOR_xlfii_load, B_UID(473) };
+static const struct builtin B5_vec_lvx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:5", "*lvx", CODE_FOR_xlfii_load, B_UID(474) };
+static const struct builtin B6_vec_lvx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:6", "*lvx", CODE_FOR_xlfii_load, B_UID(475) };
+static const struct builtin B7_vec_lvx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:7", "*lvx", CODE_FOR_xlfii_load, B_UID(476) };
+static const struct builtin B8_vec_lvx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:8", "*lvx", CODE_FOR_xlfii_load, B_UID(477) };
+static const struct builtin B9_vec_lvx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:9", "*lvx", CODE_FOR_xlfii_load, B_UID(478) };
+static const struct builtin B10_vec_lvx = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvx:10", "*lvx", CODE_FOR_xlfii_load, B_UID(479) };
+static const struct builtin B11_vec_lvx = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvx:11", "*lvx", CODE_FOR_xlfii_load, B_UID(480) };
+static const struct builtin B12_vec_lvx = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvx:12", "*lvx", CODE_FOR_xlfii_load, B_UID(481) };
+static const struct builtin B13_vec_lvx = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:13", "*lvx", CODE_FOR_xlfii_load, B_UID(482) };
+static const struct builtin B14_vec_lvx = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvx:14", "*lvx", CODE_FOR_xlfii_load, B_UID(483) };
+static const struct builtin B15_vec_lvx = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:15", "*lvx", CODE_FOR_xlfii_load, B_UID(484) };
+static const struct builtin B16_vec_lvx = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:16", "*lvx", CODE_FOR_xlfii_load, B_UID(485) };
+static const struct builtin B17_vec_lvx = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:17", "*lvx", CODE_FOR_xlfii_load, B_UID(486) };
+static const struct builtin B18_vec_lvx = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:18", "*lvx", CODE_FOR_xlfii_load, B_UID(487) };
+static const struct builtin B19_vec_lvx = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:19", "*lvx", CODE_FOR_xlfii_load, B_UID(488) };
+static const struct builtin B20_vec_lvx = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:20", "*lvx", CODE_FOR_xlfii_load, B_UID(489) };
+static const struct builtin B1_vec_lvewx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvewx:1", "*lvewx", CODE_FOR_xlfii_load, B_UID(490) };
+static const struct builtin B2_vec_lvewx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:2", "*lvewx", CODE_FOR_xlfii_load, B_UID(491) };
+static const struct builtin B3_vec_lvewx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:3", "*lvewx", CODE_FOR_xlfii_load, B_UID(492) };
+static const struct builtin B1_vec_lvehx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvehx:1", "*lvehx", CODE_FOR_xlfii_load, B_UID(493) };
+static const struct builtin B1_vec_lvebx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvebx:1", "*lvebx", CODE_FOR_xlfii_load, B_UID(494) };
+static const struct builtin B2_vec_lvebx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvebx:2", "*lvebx", CODE_FOR_xlfii_load, B_UID(495) };
+static const struct builtin B4_vec_lvewx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:4", "*lvewx", CODE_FOR_xlfii_load, B_UID(496) };
+static const struct builtin B5_vec_lvewx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:5", "*lvewx", CODE_FOR_xlfii_load, B_UID(497) };
+static const struct builtin B2_vec_lvehx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvehx:2", "*lvehx", CODE_FOR_xlfii_load, B_UID(498) };
+static const struct builtin B1_vec_lvxl = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:1", "*lvxl", CODE_FOR_xlfii_load, B_UID(499) };
+static const struct builtin B2_vec_lvxl = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:2", "*lvxl", CODE_FOR_xlfii_load, B_UID(500) };
+static const struct builtin B3_vec_lvxl = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:3", "*lvxl", CODE_FOR_xlfii_load, B_UID(501) };
+static const struct builtin B4_vec_lvxl = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:4", "*lvxl", CODE_FOR_xlfii_load, B_UID(502) };
+static const struct builtin B5_vec_lvxl = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:5", "*lvxl", CODE_FOR_xlfii_load, B_UID(503) };
+static const struct builtin B6_vec_lvxl = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:6", "*lvxl", CODE_FOR_xlfii_load, B_UID(504) };
+static const struct builtin B7_vec_lvxl = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:7", "*lvxl", CODE_FOR_xlfii_load, B_UID(505) };
+static const struct builtin B8_vec_lvxl = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:8", "*lvxl", CODE_FOR_xlfii_load, B_UID(506) };
+static const struct builtin B9_vec_lvxl = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:9", "*lvxl", CODE_FOR_xlfii_load, B_UID(507) };
+static const struct builtin B10_vec_lvxl = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvxl:10", "*lvxl", CODE_FOR_xlfii_load, B_UID(508) };
+static const struct builtin B11_vec_lvxl = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvxl:11", "*lvxl", CODE_FOR_xlfii_load, B_UID(509) };
+static const struct builtin B12_vec_lvxl = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvxl:12", "*lvxl", CODE_FOR_xlfii_load, B_UID(510) };
+static const struct builtin B13_vec_lvxl = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:13", "*lvxl", CODE_FOR_xlfii_load, B_UID(511) };
+static const struct builtin B14_vec_lvxl = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvxl:14", "*lvxl", CODE_FOR_xlfii_load, B_UID(512) };
+static const struct builtin B15_vec_lvxl = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:15", "*lvxl", CODE_FOR_xlfii_load, B_UID(513) };
+static const struct builtin B16_vec_lvxl = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:16", "*lvxl", CODE_FOR_xlfii_load, B_UID(514) };
+static const struct builtin B17_vec_lvxl = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:17", "*lvxl", CODE_FOR_xlfii_load, B_UID(515) };
+static const struct builtin B18_vec_lvxl = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:18", "*lvxl", CODE_FOR_xlfii_load, B_UID(516) };
+static const struct builtin B19_vec_lvxl = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:19", "*lvxl", CODE_FOR_xlfii_load, B_UID(517) };
+static const struct builtin B20_vec_lvxl = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:20", "*lvxl", CODE_FOR_xlfii_load, B_UID(518) };
+static const struct builtin B_vec_vlogefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vlogefp", "*vlogefp", CODE_FOR_xfx_fp, B_UID(519) };
+static const struct builtin B1_vec_lvsl = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:1", "*lvsl", CODE_FOR_xfii_load, B_UID(520) };
+static const struct builtin B2_vec_lvsl = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:2", "*lvsl", CODE_FOR_xfii_load, B_UID(521) };
+static const struct builtin B3_vec_lvsl = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:3", "*lvsl", CODE_FOR_xfii_load, B_UID(522) };
+static const struct builtin B4_vec_lvsl = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:4", "*lvsl", CODE_FOR_xfii_load, B_UID(523) };
+static const struct builtin B5_vec_lvsl = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:5", "*lvsl", CODE_FOR_xfii_load, B_UID(524) };
+static const struct builtin B6_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:6", "*lvsl", CODE_FOR_xfii_load, B_UID(525) };
+static const struct builtin B7_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:7", "*lvsl", CODE_FOR_xfii_load, B_UID(526) };
+static const struct builtin B8_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:8", "*lvsl", CODE_FOR_xfii_load, B_UID(527) };
+static const struct builtin B9_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:9", "*lvsl", CODE_FOR_xfii_load, B_UID(528) };
+static const struct builtin B1_vec_lvsr = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:1", "*lvsr", CODE_FOR_xfii_load, B_UID(529) };
+static const struct builtin B2_vec_lvsr = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:2", "*lvsr", CODE_FOR_xfii_load, B_UID(530) };
+static const struct builtin B3_vec_lvsr = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:3", "*lvsr", CODE_FOR_xfii_load, B_UID(531) };
+static const struct builtin B4_vec_lvsr = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:4", "*lvsr", CODE_FOR_xfii_load, B_UID(532) };
+static const struct builtin B5_vec_lvsr = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:5", "*lvsr", CODE_FOR_xfii_load, B_UID(533) };
+static const struct builtin B6_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:6", "*lvsr", CODE_FOR_xfii_load, B_UID(534) };
+static const struct builtin B7_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:7", "*lvsr", CODE_FOR_xfii_load, B_UID(535) };
+static const struct builtin B8_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:8", "*lvsr", CODE_FOR_xfii_load, B_UID(536) };
+static const struct builtin B9_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:9", "*lvsr", CODE_FOR_xfii_load, B_UID(537) };
+static const struct builtin B_vec_vmaddfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vmaddfp", "*vmaddfp", CODE_FOR_xfxxx_fp, B_UID(538) };
+static const struct builtin B_vec_vmhaddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhaddshs", "*vmhaddshs", CODE_FOR_xfxxx_complex, B_UID(539) };
+static const struct builtin B1_vec_vmaxsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:1", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(540) };
+static const struct builtin B1_vec_vmaxuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:1", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(541) };
+static const struct builtin B1_vec_vmaxsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:1", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(542) };
+static const struct builtin B1_vec_vmaxuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:1", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(543) };
+static const struct builtin B1_vec_vmaxsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:1", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(544) };
+static const struct builtin B1_vec_vmaxub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:1", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(545) };
+static const struct builtin B_vec_vmaxfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vmaxfp", "*vmaxfp", CODE_FOR_xfxx_simple, B_UID(546) };
+static const struct builtin B2_vec_vmaxsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:2", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(547) };
+static const struct builtin B3_vec_vmaxsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:3", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(548) };
+static const struct builtin B2_vec_vmaxsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:2", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(549) };
+static const struct builtin B3_vec_vmaxsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:3", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(550) };
+static const struct builtin B2_vec_vmaxsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:2", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(551) };
+static const struct builtin B3_vec_vmaxsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:3", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(552) };
+static const struct builtin B2_vec_vmaxuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:2", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(553) };
+static const struct builtin B3_vec_vmaxuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:3", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(554) };
+static const struct builtin B2_vec_vmaxuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:2", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(555) };
+static const struct builtin B3_vec_vmaxuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:3", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(556) };
+static const struct builtin B2_vec_vmaxub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:2", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(557) };
+static const struct builtin B3_vec_vmaxub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:3", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(558) };
+static const struct builtin B1_vec_vmrghh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrghh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(559) };
+static const struct builtin B1_vec_vmrghw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrghw:1", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(560) };
+static const struct builtin B1_vec_vmrghb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrghb:1", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(561) };
+static const struct builtin B2_vec_vmrghw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrghw:2", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(562) };
+static const struct builtin B2_vec_vmrghh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrghh:2", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(563) };
+static const struct builtin B3_vec_vmrghh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrghh:3", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(564) };
+static const struct builtin B3_vec_vmrghw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrghw:3", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(565) };
+static const struct builtin B2_vec_vmrghb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrghb:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(566) };
+static const struct builtin B4_vec_vmrghh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrghh:4", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(567) };
+static const struct builtin B4_vec_vmrghw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrghw:4", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(568) };
+static const struct builtin B3_vec_vmrghb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrghb:3", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(569) };
+static const struct builtin B1_vec_vmrglh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrglh:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(570) };
+static const struct builtin B1_vec_vmrglw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrglw:1", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(571) };
+static const struct builtin B1_vec_vmrglb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrglb:1", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(572) };
+static const struct builtin B2_vec_vmrglw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrglw:2", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(573) };
+static const struct builtin B2_vec_vmrglh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrglh:2", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(574) };
+static const struct builtin B3_vec_vmrglh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrglh:3", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(575) };
+static const struct builtin B3_vec_vmrglw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrglw:3", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(576) };
+static const struct builtin B2_vec_vmrglb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrglb:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(577) };
+static const struct builtin B4_vec_vmrglh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrglh:4", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(578) };
+static const struct builtin B4_vec_vmrglw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrglw:4", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(579) };
+static const struct builtin B3_vec_vmrglb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrglb:3", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(580) };
+static const struct builtin B_vec_mfvscr = { { NULL, NULL, NULL, }, "", &T_volatile_vec_u16, 0, FALSE, FALSE, 0, "vec_mfvscr", "*mfvscr", CODE_FOR_vxf_fxu, B_UID(581) };
+static const struct builtin B1_vec_vminsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:1", "*vminsh", CODE_FOR_xfxx_simple, B_UID(582) };
+static const struct builtin B1_vec_vminuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:1", "*vminuh", CODE_FOR_xfxx_simple, B_UID(583) };
+static const struct builtin B1_vec_vminsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:1", "*vminsw", CODE_FOR_xfxx_simple, B_UID(584) };
+static const struct builtin B1_vec_vminuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:1", "*vminuw", CODE_FOR_xfxx_simple, B_UID(585) };
+static const struct builtin B1_vec_vminsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:1", "*vminsb", CODE_FOR_xfxx_simple, B_UID(586) };
+static const struct builtin B1_vec_vminub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:1", "*vminub", CODE_FOR_xfxx_simple, B_UID(587) };
+static const struct builtin B_vec_vminfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vminfp", "*vminfp", CODE_FOR_xfxx_simple, B_UID(588) };
+static const struct builtin B2_vec_vminsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:2", "*vminsh", CODE_FOR_xfxx_simple, B_UID(589) };
+static const struct builtin B3_vec_vminsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:3", "*vminsh", CODE_FOR_xfxx_simple, B_UID(590) };
+static const struct builtin B2_vec_vminsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:2", "*vminsw", CODE_FOR_xfxx_simple, B_UID(591) };
+static const struct builtin B3_vec_vminsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:3", "*vminsw", CODE_FOR_xfxx_simple, B_UID(592) };
+static const struct builtin B2_vec_vminsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:2", "*vminsb", CODE_FOR_xfxx_simple, B_UID(593) };
+static const struct builtin B3_vec_vminsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:3", "*vminsb", CODE_FOR_xfxx_simple, B_UID(594) };
+static const struct builtin B2_vec_vminuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:2", "*vminuh", CODE_FOR_xfxx_simple, B_UID(595) };
+static const struct builtin B3_vec_vminuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:3", "*vminuh", CODE_FOR_xfxx_simple, B_UID(596) };
+static const struct builtin B2_vec_vminuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:2", "*vminuw", CODE_FOR_xfxx_simple, B_UID(597) };
+static const struct builtin B3_vec_vminuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:3", "*vminuw", CODE_FOR_xfxx_simple, B_UID(598) };
+static const struct builtin B2_vec_vminub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:2", "*vminub", CODE_FOR_xfxx_simple, B_UID(599) };
+static const struct builtin B3_vec_vminub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:3", "*vminub", CODE_FOR_xfxx_simple, B_UID(600) };
+static const struct builtin B1_vec_vmladduhm = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:1", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(601) };
+static const struct builtin B2_vec_vmladduhm = { { &T_vec_s16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:2", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(602) };
+static const struct builtin B3_vec_vmladduhm = { { &T_vec_u16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:3", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(603) };
+static const struct builtin B4_vec_vmladduhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vmladduhm:4", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(604) };
+static const struct builtin B_vec_vmhraddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhraddshs", "*vmhraddshs", CODE_FOR_xfxxx_complex, B_UID(605) };
+static const struct builtin B_vec_vmsumshm = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshm", "*vmsumshm", CODE_FOR_xfxxx_complex, B_UID(606) };
+static const struct builtin B_vec_vmsummbm = { { &T_vec_s8, &T_vec_u8, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsummbm", "*vmsummbm", CODE_FOR_xfxxx_complex, B_UID(607) };
+static const struct builtin B_vec_vmsumuhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhm", "*vmsumuhm", CODE_FOR_xfxxx_complex, B_UID(608) };
+static const struct builtin B_vec_vmsumubm = { { &T_vec_u8, &T_vec_u8, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumubm", "*vmsumubm", CODE_FOR_xfxxx_complex, B_UID(609) };
+static const struct builtin B_vec_vmsumshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshs", "*vmsumshs", CODE_FOR_xfxxx_complex, B_UID(610) };
+static const struct builtin B_vec_vmsumuhs = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhs", "*vmsumuhs", CODE_FOR_xfxxx_complex, B_UID(611) };
+static const struct builtin B1_vec_mtvscr = { { &T_vec_b16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:1", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(612) };
+static const struct builtin B2_vec_mtvscr = { { &T_vec_b32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:2", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(613) };
+static const struct builtin B3_vec_mtvscr = { { &T_vec_b8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:3", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(614) };
+static const struct builtin B4_vec_mtvscr = { { &T_vec_p16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:4", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(615) };
+static const struct builtin B5_vec_mtvscr = { { &T_vec_s16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:5", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(616) };
+static const struct builtin B6_vec_mtvscr = { { &T_vec_s32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:6", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(617) };
+static const struct builtin B7_vec_mtvscr = { { &T_vec_s8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:7", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(618) };
+static const struct builtin B8_vec_mtvscr = { { &T_vec_u16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:8", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(619) };
+static const struct builtin B9_vec_mtvscr = { { &T_vec_u32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:9", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(620) };
+static const struct builtin B10_vec_mtvscr = { { &T_vec_u8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:10", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(621) };
+static const struct builtin B_vec_vmulesh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulesh", "*vmulesh", CODE_FOR_xfxx_complex, B_UID(622) };
+static const struct builtin B_vec_vmulesb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulesb", "*vmulesb", CODE_FOR_xfxx_complex, B_UID(623) };
+static const struct builtin B_vec_vmuleuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmuleuh", "*vmuleuh", CODE_FOR_xfxx_complex, B_UID(624) };
+static const struct builtin B_vec_vmuleub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuleub", "*vmuleub", CODE_FOR_xfxx_complex, B_UID(625) };
+static const struct builtin B_vec_vmulosh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulosh", "*vmulosh", CODE_FOR_xfxx_complex, B_UID(626) };
+static const struct builtin B_vec_vmulosb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulosb", "*vmulosb", CODE_FOR_xfxx_complex, B_UID(627) };
+static const struct builtin B_vec_vmulouh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmulouh", "*vmulouh", CODE_FOR_xfxx_complex, B_UID(628) };
+static const struct builtin B_vec_vmuloub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuloub", "*vmuloub", CODE_FOR_xfxx_complex, B_UID(629) };
+static const struct builtin B_vec_vnmsubfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vnmsubfp", "*vnmsubfp", CODE_FOR_xfxxx_fp, B_UID(630) };
+static const struct builtin B1_vec_vnor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vnor:1", "*vnor", CODE_FOR_xfxx_simple, B_UID(631) };
+static const struct builtin B2_vec_vnor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vnor:2", "*vnor", CODE_FOR_xfxx_simple, B_UID(632) };
+static const struct builtin B3_vec_vnor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vnor:3", "*vnor", CODE_FOR_xfxx_simple, B_UID(633) };
+static const struct builtin B4_vec_vnor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vnor:4", "*vnor", CODE_FOR_xfxx_simple, B_UID(634) };
+static const struct builtin B5_vec_vnor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vnor:5", "*vnor", CODE_FOR_xfxx_simple, B_UID(635) };
+static const struct builtin B6_vec_vnor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vnor:6", "*vnor", CODE_FOR_xfxx_simple, B_UID(636) };
+static const struct builtin B7_vec_vnor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vnor:7", "*vnor", CODE_FOR_xfxx_simple, B_UID(637) };
+static const struct builtin B8_vec_vnor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vnor:8", "*vnor", CODE_FOR_xfxx_simple, B_UID(638) };
+static const struct builtin B9_vec_vnor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vnor:9", "*vnor", CODE_FOR_xfxx_simple, B_UID(639) };
+static const struct builtin B10_vec_vnor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vnor:10", "*vnor", CODE_FOR_xfxx_simple, B_UID(640) };
+static const struct builtin B1_vec_vor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vor:1", "*vor", CODE_FOR_xfxx_simple, B_UID(641) };
+static const struct builtin B2_vec_vor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:2", "*vor", CODE_FOR_xfxx_simple, B_UID(642) };
+static const struct builtin B3_vec_vor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:3", "*vor", CODE_FOR_xfxx_simple, B_UID(643) };
+static const struct builtin B4_vec_vor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vor:4", "*vor", CODE_FOR_xfxx_simple, B_UID(644) };
+static const struct builtin B5_vec_vor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:5", "*vor", CODE_FOR_xfxx_simple, B_UID(645) };
+static const struct builtin B6_vec_vor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:6", "*vor", CODE_FOR_xfxx_simple, B_UID(646) };
+static const struct builtin B7_vec_vor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:7", "*vor", CODE_FOR_xfxx_simple, B_UID(647) };
+static const struct builtin B8_vec_vor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vor:8", "*vor", CODE_FOR_xfxx_simple, B_UID(648) };
+static const struct builtin B9_vec_vor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:9", "*vor", CODE_FOR_xfxx_simple, B_UID(649) };
+static const struct builtin B10_vec_vor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:10", "*vor", CODE_FOR_xfxx_simple, B_UID(650) };
+static const struct builtin B11_vec_vor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:11", "*vor", CODE_FOR_xfxx_simple, B_UID(651) };
+static const struct builtin B12_vec_vor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:12", "*vor", CODE_FOR_xfxx_simple, B_UID(652) };
+static const struct builtin B13_vec_vor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:13", "*vor", CODE_FOR_xfxx_simple, B_UID(653) };
+static const struct builtin B14_vec_vor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:14", "*vor", CODE_FOR_xfxx_simple, B_UID(654) };
+static const struct builtin B15_vec_vor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:15", "*vor", CODE_FOR_xfxx_simple, B_UID(655) };
+static const struct builtin B16_vec_vor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:16", "*vor", CODE_FOR_xfxx_simple, B_UID(656) };
+static const struct builtin B17_vec_vor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:17", "*vor", CODE_FOR_xfxx_simple, B_UID(657) };
+static const struct builtin B18_vec_vor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:18", "*vor", CODE_FOR_xfxx_simple, B_UID(658) };
+static const struct builtin B19_vec_vor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:19", "*vor", CODE_FOR_xfxx_simple, B_UID(659) };
+static const struct builtin B20_vec_vor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:20", "*vor", CODE_FOR_xfxx_simple, B_UID(660) };
+static const struct builtin B21_vec_vor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:21", "*vor", CODE_FOR_xfxx_simple, B_UID(661) };
+static const struct builtin B22_vec_vor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:22", "*vor", CODE_FOR_xfxx_simple, B_UID(662) };
+static const struct builtin B23_vec_vor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:23", "*vor", CODE_FOR_xfxx_simple, B_UID(663) };
+static const struct builtin B24_vec_vor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:24", "*vor", CODE_FOR_xfxx_simple, B_UID(664) };
+static const struct builtin B1_vec_vpkuhum = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vpkuhum:1", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(665) };
+static const struct builtin B1_vec_vpkuwum = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vpkuwum:1", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(666) };
+static const struct builtin B2_vec_vpkuhum = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkuhum:2", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(667) };
+static const struct builtin B2_vec_vpkuwum = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkuwum:2", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(668) };
+static const struct builtin B3_vec_vpkuhum = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhum:3", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(669) };
+static const struct builtin B3_vec_vpkuwum = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwum:3", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(670) };
+static const struct builtin B_vec_vpkpx = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vpkpx", "*vpkpx", CODE_FOR_xfxx_perm, B_UID(671) };
+static const struct builtin B_vec_vpkshss = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkshss", "*vpkshss", CODE_FOR_xfxx_perm, B_UID(672) };
+static const struct builtin B_vec_vpkswss = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkswss", "*vpkswss", CODE_FOR_xfxx_perm, B_UID(673) };
+static const struct builtin B_vec_vpkuhus = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhus", "*vpkuhus", CODE_FOR_xfxx_perm, B_UID(674) };
+static const struct builtin B_vec_vpkuwus = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwus", "*vpkuwus", CODE_FOR_xfxx_perm, B_UID(675) };
+static const struct builtin B_vec_vpkshus = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkshus", "*vpkshus", CODE_FOR_xfxx_perm, B_UID(676) };
+static const struct builtin B_vec_vpkswus = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkswus", "*vpkswus", CODE_FOR_xfxx_perm, B_UID(677) };
+static const struct builtin B1_vec_vperm = { { &T_vec_b16, &T_vec_b16, &T_vec_u8, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vperm:1", "*vperm", CODE_FOR_xfxxx_perm, B_UID(678) };
+static const struct builtin B2_vec_vperm = { { &T_vec_b32, &T_vec_b32, &T_vec_u8, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vperm:2", "*vperm", CODE_FOR_xfxxx_perm, B_UID(679) };
+static const struct builtin B3_vec_vperm = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vperm:3", "*vperm", CODE_FOR_xfxxx_perm, B_UID(680) };
+static const struct builtin B4_vec_vperm = { { &T_vec_f32, &T_vec_f32, &T_vec_u8, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vperm:4", "*vperm", CODE_FOR_xfxxx_perm, B_UID(681) };
+static const struct builtin B5_vec_vperm = { { &T_vec_p16, &T_vec_p16, &T_vec_u8, }, "xxx", &T_vec_p16, 3, FALSE, FALSE, 0, "vec_vperm:5", "*vperm", CODE_FOR_xfxxx_perm, B_UID(682) };
+static const struct builtin B6_vec_vperm = { { &T_vec_s16, &T_vec_s16, &T_vec_u8, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vperm:6", "*vperm", CODE_FOR_xfxxx_perm, B_UID(683) };
+static const struct builtin B7_vec_vperm = { { &T_vec_s32, &T_vec_s32, &T_vec_u8, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vperm:7", "*vperm", CODE_FOR_xfxxx_perm, B_UID(684) };
+static const struct builtin B8_vec_vperm = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vperm:8", "*vperm", CODE_FOR_xfxxx_perm, B_UID(685) };
+static const struct builtin B9_vec_vperm = { { &T_vec_u16, &T_vec_u16, &T_vec_u8, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vperm:9", "*vperm", CODE_FOR_xfxxx_perm, B_UID(686) };
+static const struct builtin B10_vec_vperm = { { &T_vec_u32, &T_vec_u32, &T_vec_u8, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vperm:10", "*vperm", CODE_FOR_xfxxx_perm, B_UID(687) };
+static const struct builtin B11_vec_vperm = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vperm:11", "*vperm", CODE_FOR_xfxxx_perm, B_UID(688) };
+static const struct builtin B_vec_vrefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrefp", "*vrefp", CODE_FOR_xfx_fp, B_UID(689) };
+static const struct builtin B1_vec_vrlh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vrlh:1", "*vrlh", CODE_FOR_xfxx_simple, B_UID(690) };
+static const struct builtin B1_vec_vrlw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vrlw:1", "*vrlw", CODE_FOR_xfxx_simple, B_UID(691) };
+static const struct builtin B1_vec_vrlb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vrlb:1", "*vrlb", CODE_FOR_xfxx_simple, B_UID(692) };
+static const struct builtin B2_vec_vrlh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vrlh:2", "*vrlh", CODE_FOR_xfxx_simple, B_UID(693) };
+static const struct builtin B2_vec_vrlw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vrlw:2", "*vrlw", CODE_FOR_xfxx_simple, B_UID(694) };
+static const struct builtin B2_vec_vrlb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vrlb:2", "*vrlb", CODE_FOR_xfxx_simple, B_UID(695) };
+static const struct builtin B_vec_vrfin = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfin", "*vrfin", CODE_FOR_xfx_fp, B_UID(696) };
+static const struct builtin B_vec_vrsqrtefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrsqrtefp", "*vrsqrtefp", CODE_FOR_xfx_fp, B_UID(697) };
+static const struct builtin B1_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_b16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:1", "*vsel", CODE_FOR_xfxxx_simple, B_UID(698) };
+static const struct builtin B2_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_u16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:2", "*vsel", CODE_FOR_xfxxx_simple, B_UID(699) };
+static const struct builtin B3_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_b32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:3", "*vsel", CODE_FOR_xfxxx_simple, B_UID(700) };
+static const struct builtin B4_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_u32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:4", "*vsel", CODE_FOR_xfxxx_simple, B_UID(701) };
+static const struct builtin B5_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_b8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:5", "*vsel", CODE_FOR_xfxxx_simple, B_UID(702) };
+static const struct builtin B6_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:6", "*vsel", CODE_FOR_xfxxx_simple, B_UID(703) };
+static const struct builtin B7_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_b32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:7", "*vsel", CODE_FOR_xfxxx_simple, B_UID(704) };
+static const struct builtin B8_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_u32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:8", "*vsel", CODE_FOR_xfxxx_simple, B_UID(705) };
+static const struct builtin B9_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_b16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:9", "*vsel", CODE_FOR_xfxxx_simple, B_UID(706) };
+static const struct builtin B10_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:10", "*vsel", CODE_FOR_xfxxx_simple, B_UID(707) };
+static const struct builtin B11_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_b32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:11", "*vsel", CODE_FOR_xfxxx_simple, B_UID(708) };
+static const struct builtin B12_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_u32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:12", "*vsel", CODE_FOR_xfxxx_simple, B_UID(709) };
+static const struct builtin B13_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_b8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:13", "*vsel", CODE_FOR_xfxxx_simple, B_UID(710) };
+static const struct builtin B14_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:14", "*vsel", CODE_FOR_xfxxx_simple, B_UID(711) };
+static const struct builtin B15_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_b16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:15", "*vsel", CODE_FOR_xfxxx_simple, B_UID(712) };
+static const struct builtin B16_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:16", "*vsel", CODE_FOR_xfxxx_simple, B_UID(713) };
+static const struct builtin B17_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_b32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:17", "*vsel", CODE_FOR_xfxxx_simple, B_UID(714) };
+static const struct builtin B18_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:18", "*vsel", CODE_FOR_xfxxx_simple, B_UID(715) };
+static const struct builtin B19_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_b8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:19", "*vsel", CODE_FOR_xfxxx_simple, B_UID(716) };
+static const struct builtin B20_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:20", "*vsel", CODE_FOR_xfxxx_simple, B_UID(717) };
+static const struct builtin B1_vec_vslh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslh:1", "*vslh", CODE_FOR_xfxx_simple, B_UID(718) };
+static const struct builtin B1_vec_vslw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslw:1", "*vslw", CODE_FOR_xfxx_simple, B_UID(719) };
+static const struct builtin B1_vec_vslb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslb:1", "*vslb", CODE_FOR_xfxx_simple, B_UID(720) };
+static const struct builtin B2_vec_vslh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslh:2", "*vslh", CODE_FOR_xfxx_simple, B_UID(721) };
+static const struct builtin B2_vec_vslw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslw:2", "*vslw", CODE_FOR_xfxx_simple, B_UID(722) };
+static const struct builtin B2_vec_vslb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslb:2", "*vslb", CODE_FOR_xfxx_simple, B_UID(723) };
+static const struct builtin B1_vec_vsldoi = { { &T_vec_b16, &T_vec_b16, &T_immed_u4, }, "xxC", &T_vec_b16, 3, FALSE, FALSE, 3, "vec_vsldoi:1", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(724) };
+static const struct builtin B2_vec_vsldoi = { { &T_vec_b32, &T_vec_b32, &T_immed_u4, }, "xxC", &T_vec_b32, 3, FALSE, FALSE, 3, "vec_vsldoi:2", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(725) };
+static const struct builtin B3_vec_vsldoi = { { &T_vec_b8, &T_vec_b8, &T_immed_u4, }, "xxC", &T_vec_b8, 3, FALSE, FALSE, 3, "vec_vsldoi:3", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(726) };
+static const struct builtin B4_vec_vsldoi = { { &T_vec_f32, &T_vec_f32, &T_immed_u4, }, "xxC", &T_vec_f32, 3, FALSE, FALSE, 3, "vec_vsldoi:4", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(727) };
+static const struct builtin B5_vec_vsldoi = { { &T_vec_p16, &T_vec_p16, &T_immed_u4, }, "xxC", &T_vec_p16, 3, FALSE, FALSE, 3, "vec_vsldoi:5", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(728) };
+static const struct builtin B6_vec_vsldoi = { { &T_vec_s16, &T_vec_s16, &T_immed_u4, }, "xxC", &T_vec_s16, 3, FALSE, FALSE, 3, "vec_vsldoi:6", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(729) };
+static const struct builtin B7_vec_vsldoi = { { &T_vec_s32, &T_vec_s32, &T_immed_u4, }, "xxC", &T_vec_s32, 3, FALSE, FALSE, 3, "vec_vsldoi:7", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(730) };
+static const struct builtin B8_vec_vsldoi = { { &T_vec_s8, &T_vec_s8, &T_immed_u4, }, "xxC", &T_vec_s8, 3, FALSE, FALSE, 3, "vec_vsldoi:8", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(731) };
+static const struct builtin B9_vec_vsldoi = { { &T_vec_u16, &T_vec_u16, &T_immed_u4, }, "xxC", &T_vec_u16, 3, FALSE, FALSE, 3, "vec_vsldoi:9", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(732) };
+static const struct builtin B10_vec_vsldoi = { { &T_vec_u32, &T_vec_u32, &T_immed_u4, }, "xxC", &T_vec_u32, 3, FALSE, FALSE, 3, "vec_vsldoi:10", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(733) };
+static const struct builtin B11_vec_vsldoi = { { &T_vec_u8, &T_vec_u8, &T_immed_u4, }, "xxC", &T_vec_u8, 3, FALSE, FALSE, 3, "vec_vsldoi:11", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(734) };
+static const struct builtin B1_vec_vsl = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:1", "*vsl", CODE_FOR_xfxx_simple, B_UID(735) };
+static const struct builtin B2_vec_vsl = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:2", "*vsl", CODE_FOR_xfxx_simple, B_UID(736) };
+static const struct builtin B3_vec_vsl = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:3", "*vsl", CODE_FOR_xfxx_simple, B_UID(737) };
+static const struct builtin B4_vec_vsl = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:4", "*vsl", CODE_FOR_xfxx_simple, B_UID(738) };
+static const struct builtin B5_vec_vsl = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:5", "*vsl", CODE_FOR_xfxx_simple, B_UID(739) };
+static const struct builtin B6_vec_vsl = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:6", "*vsl", CODE_FOR_xfxx_simple, B_UID(740) };
+static const struct builtin B7_vec_vsl = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:7", "*vsl", CODE_FOR_xfxx_simple, B_UID(741) };
+static const struct builtin B8_vec_vsl = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:8", "*vsl", CODE_FOR_xfxx_simple, B_UID(742) };
+static const struct builtin B9_vec_vsl = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:9", "*vsl", CODE_FOR_xfxx_simple, B_UID(743) };
+static const struct builtin B10_vec_vsl = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:10", "*vsl", CODE_FOR_xfxx_simple, B_UID(744) };
+static const struct builtin B11_vec_vsl = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:11", "*vsl", CODE_FOR_xfxx_simple, B_UID(745) };
+static const struct builtin B12_vec_vsl = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:12", "*vsl", CODE_FOR_xfxx_simple, B_UID(746) };
+static const struct builtin B13_vec_vsl = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:13", "*vsl", CODE_FOR_xfxx_simple, B_UID(747) };
+static const struct builtin B14_vec_vsl = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:14", "*vsl", CODE_FOR_xfxx_simple, B_UID(748) };
+static const struct builtin B15_vec_vsl = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:15", "*vsl", CODE_FOR_xfxx_simple, B_UID(749) };
+static const struct builtin B16_vec_vsl = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:16", "*vsl", CODE_FOR_xfxx_simple, B_UID(750) };
+static const struct builtin B17_vec_vsl = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:17", "*vsl", CODE_FOR_xfxx_simple, B_UID(751) };
+static const struct builtin B18_vec_vsl = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:18", "*vsl", CODE_FOR_xfxx_simple, B_UID(752) };
+static const struct builtin B19_vec_vsl = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:19", "*vsl", CODE_FOR_xfxx_simple, B_UID(753) };
+static const struct builtin B20_vec_vsl = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:20", "*vsl", CODE_FOR_xfxx_simple, B_UID(754) };
+static const struct builtin B21_vec_vsl = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:21", "*vsl", CODE_FOR_xfxx_simple, B_UID(755) };
+static const struct builtin B22_vec_vsl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:22", "*vsl", CODE_FOR_xfxx_simple, B_UID(756) };
+static const struct builtin B23_vec_vsl = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:23", "*vsl", CODE_FOR_xfxx_simple, B_UID(757) };
+static const struct builtin B24_vec_vsl = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:24", "*vsl", CODE_FOR_xfxx_simple, B_UID(758) };
+static const struct builtin B25_vec_vsl = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:25", "*vsl", CODE_FOR_xfxx_simple, B_UID(759) };
+static const struct builtin B26_vec_vsl = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:26", "*vsl", CODE_FOR_xfxx_simple, B_UID(760) };
+static const struct builtin B27_vec_vsl = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:27", "*vsl", CODE_FOR_xfxx_simple, B_UID(761) };
+static const struct builtin B28_vec_vsl = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:28", "*vsl", CODE_FOR_xfxx_simple, B_UID(762) };
+static const struct builtin B29_vec_vsl = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:29", "*vsl", CODE_FOR_xfxx_simple, B_UID(763) };
+static const struct builtin B30_vec_vsl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:30", "*vsl", CODE_FOR_xfxx_simple, B_UID(764) };
+static const struct builtin B1_vec_vslo = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:1", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(765) };
+static const struct builtin B2_vec_vslo = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:2", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(766) };
+static const struct builtin B3_vec_vslo = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:3", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(767) };
+static const struct builtin B4_vec_vslo = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:4", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(768) };
+static const struct builtin B5_vec_vslo = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:5", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(769) };
+static const struct builtin B6_vec_vslo = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:6", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(770) };
+static const struct builtin B7_vec_vslo = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:7", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(771) };
+static const struct builtin B8_vec_vslo = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:8", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(772) };
+static const struct builtin B9_vec_vslo = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:9", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(773) };
+static const struct builtin B10_vec_vslo = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:10", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(774) };
+static const struct builtin B11_vec_vslo = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:11", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(775) };
+static const struct builtin B12_vec_vslo = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:12", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(776) };
+static const struct builtin B13_vec_vslo = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:13", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(777) };
+static const struct builtin B14_vec_vslo = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:14", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(778) };
+static const struct builtin B15_vec_vslo = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:15", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(779) };
+static const struct builtin B16_vec_vslo = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:16", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(780) };
+static const struct builtin B1_vec_vsplth = { { &T_vec_b16, &T_immed_u5, NULL, }, "xB", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsplth:1", "*vsplth", CODE_FOR_xfxB_perm, B_UID(781) };
+static const struct builtin B1_vec_vspltw = { { &T_vec_b32, &T_immed_u5, NULL, }, "xB", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vspltw:1", "*vspltw", CODE_FOR_xfxB_perm, B_UID(782) };
+static const struct builtin B1_vec_vspltb = { { &T_vec_b8, &T_immed_u5, NULL, }, "xB", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vspltb:1", "*vspltb", CODE_FOR_xfxB_perm, B_UID(783) };
+static const struct builtin B2_vec_vspltw = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vspltw:2", "*vspltw", CODE_FOR_xfxB_perm, B_UID(784) };
+static const struct builtin B2_vec_vsplth = { { &T_vec_p16, &T_immed_u5, NULL, }, "xB", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsplth:2", "*vsplth", CODE_FOR_xfxB_perm, B_UID(785) };
+static const struct builtin B3_vec_vsplth = { { &T_vec_s16, &T_immed_u5, NULL, }, "xB", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsplth:3", "*vsplth", CODE_FOR_xfxB_perm, B_UID(786) };
+static const struct builtin B3_vec_vspltw = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vspltw:3", "*vspltw", CODE_FOR_xfxB_perm, B_UID(787) };
+static const struct builtin B2_vec_vspltb = { { &T_vec_s8, &T_immed_u5, NULL, }, "xB", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vspltb:2", "*vspltb", CODE_FOR_xfxB_perm, B_UID(788) };
+static const struct builtin B4_vec_vsplth = { { &T_vec_u16, &T_immed_u5, NULL, }, "xB", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsplth:4", "*vsplth", CODE_FOR_xfxB_perm, B_UID(789) };
+static const struct builtin B4_vec_vspltw = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vspltw:4", "*vspltw", CODE_FOR_xfxB_perm, B_UID(790) };
+static const struct builtin B3_vec_vspltb = { { &T_vec_u8, &T_immed_u5, NULL, }, "xB", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vspltb:3", "*vspltb", CODE_FOR_xfxB_perm, B_UID(791) };
+static const struct builtin B_vec_vspltish = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s16, 1, FALSE, FALSE, 5, "vec_vspltish", "*vspltish", CODE_FOR_xfA_perm, B_UID(792) };
+static const struct builtin B_vec_vspltisw = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s32, 1, FALSE, FALSE, 6, "vec_vspltisw", "*vspltisw", CODE_FOR_xfA_perm, B_UID(793) };
+static const struct builtin B_vec_vspltisb = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s8, 1, FALSE, FALSE, 4, "vec_vspltisb", "*vspltisb", CODE_FOR_xfA_perm, B_UID(794) };
+static const struct builtin B_vec_splat_u16 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u16, 1, FALSE, FALSE, 5, "vec_splat_u16", "*vspltish", CODE_FOR_xfA_perm, B_UID(795) };
+static const struct builtin B_vec_splat_u32 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u32, 1, FALSE, FALSE, 6, "vec_splat_u32", "*vspltisw", CODE_FOR_xfA_perm, B_UID(796) };
+static const struct builtin B_vec_splat_u8 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u8, 1, FALSE, FALSE, 4, "vec_splat_u8", "*vspltisb", CODE_FOR_xfA_perm, B_UID(797) };
+static const struct builtin B1_vec_vsrh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrh:1", "*vsrh", CODE_FOR_xfxx_simple, B_UID(798) };
+static const struct builtin B1_vec_vsrw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsrw:1", "*vsrw", CODE_FOR_xfxx_simple, B_UID(799) };
+static const struct builtin B1_vec_vsrb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrb:1", "*vsrb", CODE_FOR_xfxx_simple, B_UID(800) };
+static const struct builtin B2_vec_vsrh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrh:2", "*vsrh", CODE_FOR_xfxx_simple, B_UID(801) };
+static const struct builtin B2_vec_vsrw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsrw:2", "*vsrw", CODE_FOR_xfxx_simple, B_UID(802) };
+static const struct builtin B2_vec_vsrb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrb:2", "*vsrb", CODE_FOR_xfxx_simple, B_UID(803) };
+static const struct builtin B1_vec_vsrah = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrah:1", "*vsrah", CODE_FOR_xfxx_simple, B_UID(804) };
+static const struct builtin B1_vec_vsraw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsraw:1", "*vsraw", CODE_FOR_xfxx_simple, B_UID(805) };
+static const struct builtin B1_vec_vsrab = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrab:1", "*vsrab", CODE_FOR_xfxx_simple, B_UID(806) };
+static const struct builtin B2_vec_vsrah = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrah:2", "*vsrah", CODE_FOR_xfxx_simple, B_UID(807) };
+static const struct builtin B2_vec_vsraw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsraw:2", "*vsraw", CODE_FOR_xfxx_simple, B_UID(808) };
+static const struct builtin B2_vec_vsrab = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrab:2", "*vsrab", CODE_FOR_xfxx_simple, B_UID(809) };
+static const struct builtin B1_vec_vsr = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:1", "*vsr", CODE_FOR_xfxx_simple, B_UID(810) };
+static const struct builtin B2_vec_vsr = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:2", "*vsr", CODE_FOR_xfxx_simple, B_UID(811) };
+static const struct builtin B3_vec_vsr = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:3", "*vsr", CODE_FOR_xfxx_simple, B_UID(812) };
+static const struct builtin B4_vec_vsr = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:4", "*vsr", CODE_FOR_xfxx_simple, B_UID(813) };
+static const struct builtin B5_vec_vsr = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:5", "*vsr", CODE_FOR_xfxx_simple, B_UID(814) };
+static const struct builtin B6_vec_vsr = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:6", "*vsr", CODE_FOR_xfxx_simple, B_UID(815) };
+static const struct builtin B7_vec_vsr = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:7", "*vsr", CODE_FOR_xfxx_simple, B_UID(816) };
+static const struct builtin B8_vec_vsr = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:8", "*vsr", CODE_FOR_xfxx_simple, B_UID(817) };
+static const struct builtin B9_vec_vsr = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:9", "*vsr", CODE_FOR_xfxx_simple, B_UID(818) };
+static const struct builtin B10_vec_vsr = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:10", "*vsr", CODE_FOR_xfxx_simple, B_UID(819) };
+static const struct builtin B11_vec_vsr = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:11", "*vsr", CODE_FOR_xfxx_simple, B_UID(820) };
+static const struct builtin B12_vec_vsr = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:12", "*vsr", CODE_FOR_xfxx_simple, B_UID(821) };
+static const struct builtin B13_vec_vsr = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:13", "*vsr", CODE_FOR_xfxx_simple, B_UID(822) };
+static const struct builtin B14_vec_vsr = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:14", "*vsr", CODE_FOR_xfxx_simple, B_UID(823) };
+static const struct builtin B15_vec_vsr = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:15", "*vsr", CODE_FOR_xfxx_simple, B_UID(824) };
+static const struct builtin B16_vec_vsr = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:16", "*vsr", CODE_FOR_xfxx_simple, B_UID(825) };
+static const struct builtin B17_vec_vsr = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:17", "*vsr", CODE_FOR_xfxx_simple, B_UID(826) };
+static const struct builtin B18_vec_vsr = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:18", "*vsr", CODE_FOR_xfxx_simple, B_UID(827) };
+static const struct builtin B19_vec_vsr = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:19", "*vsr", CODE_FOR_xfxx_simple, B_UID(828) };
+static const struct builtin B20_vec_vsr = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:20", "*vsr", CODE_FOR_xfxx_simple, B_UID(829) };
+static const struct builtin B21_vec_vsr = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:21", "*vsr", CODE_FOR_xfxx_simple, B_UID(830) };
+static const struct builtin B22_vec_vsr = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:22", "*vsr", CODE_FOR_xfxx_simple, B_UID(831) };
+static const struct builtin B23_vec_vsr = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:23", "*vsr", CODE_FOR_xfxx_simple, B_UID(832) };
+static const struct builtin B24_vec_vsr = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:24", "*vsr", CODE_FOR_xfxx_simple, B_UID(833) };
+static const struct builtin B25_vec_vsr = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:25", "*vsr", CODE_FOR_xfxx_simple, B_UID(834) };
+static const struct builtin B26_vec_vsr = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:26", "*vsr", CODE_FOR_xfxx_simple, B_UID(835) };
+static const struct builtin B27_vec_vsr = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:27", "*vsr", CODE_FOR_xfxx_simple, B_UID(836) };
+static const struct builtin B28_vec_vsr = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:28", "*vsr", CODE_FOR_xfxx_simple, B_UID(837) };
+static const struct builtin B29_vec_vsr = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:29", "*vsr", CODE_FOR_xfxx_simple, B_UID(838) };
+static const struct builtin B30_vec_vsr = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:30", "*vsr", CODE_FOR_xfxx_simple, B_UID(839) };
+static const struct builtin B1_vec_vsro = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:1", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(840) };
+static const struct builtin B2_vec_vsro = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:2", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(841) };
+static const struct builtin B3_vec_vsro = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:3", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(842) };
+static const struct builtin B4_vec_vsro = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:4", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(843) };
+static const struct builtin B5_vec_vsro = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:5", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(844) };
+static const struct builtin B6_vec_vsro = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:6", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(845) };
+static const struct builtin B7_vec_vsro = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:7", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(846) };
+static const struct builtin B8_vec_vsro = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:8", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(847) };
+static const struct builtin B9_vec_vsro = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:9", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(848) };
+static const struct builtin B10_vec_vsro = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:10", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(849) };
+static const struct builtin B11_vec_vsro = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:11", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(850) };
+static const struct builtin B12_vec_vsro = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:12", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(851) };
+static const struct builtin B13_vec_vsro = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:13", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(852) };
+static const struct builtin B14_vec_vsro = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:14", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(853) };
+static const struct builtin B15_vec_vsro = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:15", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(854) };
+static const struct builtin B16_vec_vsro = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:16", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(855) };
+static const struct builtin B1_vec_stvx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:1", "*stvx", CODE_FOR_sfxii_store, B_UID(856) };
+static const struct builtin B2_vec_stvx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:2", "*stvx", CODE_FOR_sfxii_store, B_UID(857) };
+static const struct builtin B3_vec_stvx = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:3", "*stvx", CODE_FOR_sfxii_store, B_UID(858) };
+static const struct builtin B4_vec_stvx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:4", "*stvx", CODE_FOR_sfxii_store, B_UID(859) };
+static const struct builtin B5_vec_stvx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:5", "*stvx", CODE_FOR_sfxii_store, B_UID(860) };
+static const struct builtin B6_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:6", "*stvx", CODE_FOR_sfxii_store, B_UID(861) };
+static const struct builtin B7_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:7", "*stvx", CODE_FOR_sfxii_store, B_UID(862) };
+static const struct builtin B8_vec_stvx = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:8", "*stvx", CODE_FOR_sfxii_store, B_UID(863) };
+static const struct builtin B9_vec_stvx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:9", "*stvx", CODE_FOR_sfxii_store, B_UID(864) };
+static const struct builtin B10_vec_stvx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:10", "*stvx", CODE_FOR_sfxii_store, B_UID(865) };
+static const struct builtin B11_vec_stvx = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:11", "*stvx", CODE_FOR_sfxii_store, B_UID(866) };
+static const struct builtin B12_vec_stvx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:12", "*stvx", CODE_FOR_sfxii_store, B_UID(867) };
+static const struct builtin B13_vec_stvx = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:13", "*stvx", CODE_FOR_sfxii_store, B_UID(868) };
+static const struct builtin B14_vec_stvx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:14", "*stvx", CODE_FOR_sfxii_store, B_UID(869) };
+static const struct builtin B15_vec_stvx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:15", "*stvx", CODE_FOR_sfxii_store, B_UID(870) };
+static const struct builtin B16_vec_stvx = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:16", "*stvx", CODE_FOR_sfxii_store, B_UID(871) };
+static const struct builtin B17_vec_stvx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:17", "*stvx", CODE_FOR_sfxii_store, B_UID(872) };
+static const struct builtin B18_vec_stvx = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:18", "*stvx", CODE_FOR_sfxii_store, B_UID(873) };
+static const struct builtin B19_vec_stvx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:19", "*stvx", CODE_FOR_sfxii_store, B_UID(874) };
+static const struct builtin B20_vec_stvx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:20", "*stvx", CODE_FOR_sfxii_store, B_UID(875) };
+static const struct builtin B21_vec_stvx = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:21", "*stvx", CODE_FOR_sfxii_store, B_UID(876) };
+static const struct builtin B22_vec_stvx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:22", "*stvx", CODE_FOR_sfxii_store, B_UID(877) };
+static const struct builtin B23_vec_stvx = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:23", "*stvx", CODE_FOR_sfxii_store, B_UID(878) };
+static const struct builtin B24_vec_stvx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:24", "*stvx", CODE_FOR_sfxii_store, B_UID(879) };
+static const struct builtin B25_vec_stvx = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:25", "*stvx", CODE_FOR_sfxii_store, B_UID(880) };
+static const struct builtin B26_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:26", "*stvx", CODE_FOR_sfxii_store, B_UID(881) };
+static const struct builtin B27_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:27", "*stvx", CODE_FOR_sfxii_store, B_UID(882) };
+static const struct builtin B28_vec_stvx = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:28", "*stvx", CODE_FOR_sfxii_store, B_UID(883) };
+static const struct builtin B29_vec_stvx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:29", "*stvx", CODE_FOR_sfxii_store, B_UID(884) };
+static const struct builtin B30_vec_stvx = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:30", "*stvx", CODE_FOR_sfxii_store, B_UID(885) };
+static const struct builtin B1_vec_stvebx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:1", "*stvebx", CODE_FOR_sfxii_store, B_UID(886) };
+static const struct builtin B2_vec_stvebx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:2", "*stvebx", CODE_FOR_sfxii_store, B_UID(887) };
+static const struct builtin B1_vec_stvewx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:1", "*stvewx", CODE_FOR_sfxii_store, B_UID(888) };
+static const struct builtin B2_vec_stvewx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:2", "*stvewx", CODE_FOR_sfxii_store, B_UID(889) };
+static const struct builtin B3_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:3", "*stvewx", CODE_FOR_sfxii_store, B_UID(890) };
+static const struct builtin B4_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:4", "*stvewx", CODE_FOR_sfxii_store, B_UID(891) };
+static const struct builtin B3_vec_stvebx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:3", "*stvebx", CODE_FOR_sfxii_store, B_UID(892) };
+static const struct builtin B4_vec_stvebx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:4", "*stvebx", CODE_FOR_sfxii_store, B_UID(893) };
+static const struct builtin B5_vec_stvewx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:5", "*stvewx", CODE_FOR_sfxii_store, B_UID(894) };
+static const struct builtin B1_vec_stvehx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:1", "*stvehx", CODE_FOR_sfxii_store, B_UID(895) };
+static const struct builtin B2_vec_stvehx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:2", "*stvehx", CODE_FOR_sfxii_store, B_UID(896) };
+static const struct builtin B3_vec_stvehx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:3", "*stvehx", CODE_FOR_sfxii_store, B_UID(897) };
+static const struct builtin B6_vec_stvewx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:6", "*stvewx", CODE_FOR_sfxii_store, B_UID(898) };
+static const struct builtin B7_vec_stvewx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:7", "*stvewx", CODE_FOR_sfxii_store, B_UID(899) };
+static const struct builtin B5_vec_stvebx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:5", "*stvebx", CODE_FOR_sfxii_store, B_UID(900) };
+static const struct builtin B4_vec_stvehx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:4", "*stvehx", CODE_FOR_sfxii_store, B_UID(901) };
+static const struct builtin B8_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:8", "*stvewx", CODE_FOR_sfxii_store, B_UID(902) };
+static const struct builtin B9_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:9", "*stvewx", CODE_FOR_sfxii_store, B_UID(903) };
+static const struct builtin B6_vec_stvebx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:6", "*stvebx", CODE_FOR_sfxii_store, B_UID(904) };
+static const struct builtin B1_vec_stvxl = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:1", "*stvxl", CODE_FOR_sfxii_store, B_UID(905) };
+static const struct builtin B2_vec_stvxl = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:2", "*stvxl", CODE_FOR_sfxii_store, B_UID(906) };
+static const struct builtin B3_vec_stvxl = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:3", "*stvxl", CODE_FOR_sfxii_store, B_UID(907) };
+static const struct builtin B4_vec_stvxl = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:4", "*stvxl", CODE_FOR_sfxii_store, B_UID(908) };
+static const struct builtin B5_vec_stvxl = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:5", "*stvxl", CODE_FOR_sfxii_store, B_UID(909) };
+static const struct builtin B6_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:6", "*stvxl", CODE_FOR_sfxii_store, B_UID(910) };
+static const struct builtin B7_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:7", "*stvxl", CODE_FOR_sfxii_store, B_UID(911) };
+static const struct builtin B8_vec_stvxl = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:8", "*stvxl", CODE_FOR_sfxii_store, B_UID(912) };
+static const struct builtin B9_vec_stvxl = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:9", "*stvxl", CODE_FOR_sfxii_store, B_UID(913) };
+static const struct builtin B10_vec_stvxl = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:10", "*stvxl", CODE_FOR_sfxii_store, B_UID(914) };
+static const struct builtin B11_vec_stvxl = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:11", "*stvxl", CODE_FOR_sfxii_store, B_UID(915) };
+static const struct builtin B12_vec_stvxl = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:12", "*stvxl", CODE_FOR_sfxii_store, B_UID(916) };
+static const struct builtin B13_vec_stvxl = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:13", "*stvxl", CODE_FOR_sfxii_store, B_UID(917) };
+static const struct builtin B14_vec_stvxl = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:14", "*stvxl", CODE_FOR_sfxii_store, B_UID(918) };
+static const struct builtin B15_vec_stvxl = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:15", "*stvxl", CODE_FOR_sfxii_store, B_UID(919) };
+static const struct builtin B16_vec_stvxl = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:16", "*stvxl", CODE_FOR_sfxii_store, B_UID(920) };
+static const struct builtin B17_vec_stvxl = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:17", "*stvxl", CODE_FOR_sfxii_store, B_UID(921) };
+static const struct builtin B18_vec_stvxl = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:18", "*stvxl", CODE_FOR_sfxii_store, B_UID(922) };
+static const struct builtin B19_vec_stvxl = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:19", "*stvxl", CODE_FOR_sfxii_store, B_UID(923) };
+static const struct builtin B20_vec_stvxl = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:20", "*stvxl", CODE_FOR_sfxii_store, B_UID(924) };
+static const struct builtin B21_vec_stvxl = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:21", "*stvxl", CODE_FOR_sfxii_store, B_UID(925) };
+static const struct builtin B22_vec_stvxl = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:22", "*stvxl", CODE_FOR_sfxii_store, B_UID(926) };
+static const struct builtin B23_vec_stvxl = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:23", "*stvxl", CODE_FOR_sfxii_store, B_UID(927) };
+static const struct builtin B24_vec_stvxl = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:24", "*stvxl", CODE_FOR_sfxii_store, B_UID(928) };
+static const struct builtin B25_vec_stvxl = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:25", "*stvxl", CODE_FOR_sfxii_store, B_UID(929) };
+static const struct builtin B26_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:26", "*stvxl", CODE_FOR_sfxii_store, B_UID(930) };
+static const struct builtin B27_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:27", "*stvxl", CODE_FOR_sfxii_store, B_UID(931) };
+static const struct builtin B28_vec_stvxl = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:28", "*stvxl", CODE_FOR_sfxii_store, B_UID(932) };
+static const struct builtin B29_vec_stvxl = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:29", "*stvxl", CODE_FOR_sfxii_store, B_UID(933) };
+static const struct builtin B30_vec_stvxl = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:30", "*stvxl", CODE_FOR_sfxii_store, B_UID(934) };
+static const struct builtin B1_vec_vsubuhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:1", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(935) };
+static const struct builtin B2_vec_vsubuhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:2", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(936) };
+static const struct builtin B1_vec_vsubuwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:1", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(937) };
+static const struct builtin B2_vec_vsubuwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:2", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(938) };
+static const struct builtin B1_vec_vsububm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:1", "*vsububm", CODE_FOR_xfxx_simple, B_UID(939) };
+static const struct builtin B2_vec_vsububm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:2", "*vsububm", CODE_FOR_xfxx_simple, B_UID(940) };
+static const struct builtin B_vec_vsubfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vsubfp", "*vsubfp", CODE_FOR_xfxx_fp, B_UID(941) };
+static const struct builtin B3_vec_vsubuhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:3", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(942) };
+static const struct builtin B4_vec_vsubuhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:4", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(943) };
+static const struct builtin B3_vec_vsubuwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:3", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(944) };
+static const struct builtin B4_vec_vsubuwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:4", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(945) };
+static const struct builtin B3_vec_vsububm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:3", "*vsububm", CODE_FOR_xfxx_simple, B_UID(946) };
+static const struct builtin B4_vec_vsububm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:4", "*vsububm", CODE_FOR_xfxx_simple, B_UID(947) };
+static const struct builtin B5_vec_vsubuhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:5", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(948) };
+static const struct builtin B6_vec_vsubuhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:6", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(949) };
+static const struct builtin B5_vec_vsubuwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:5", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(950) };
+static const struct builtin B6_vec_vsubuwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:6", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(951) };
+static const struct builtin B5_vec_vsububm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:5", "*vsububm", CODE_FOR_xfxx_simple, B_UID(952) };
+static const struct builtin B6_vec_vsububm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:6", "*vsububm", CODE_FOR_xfxx_simple, B_UID(953) };
+static const struct builtin B_vec_vsubcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsubcuw", "*vsubcuw", CODE_FOR_xfxx_simple, B_UID(954) };
+static const struct builtin B1_vec_vsubshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:1", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(955) };
+static const struct builtin B1_vec_vsubuhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:1", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(956) };
+static const struct builtin B1_vec_vsubsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:1", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(957) };
+static const struct builtin B1_vec_vsubuws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:1", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(958) };
+static const struct builtin B1_vec_vsubsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:1", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(959) };
+static const struct builtin B1_vec_vsububs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:1", "*vsububs", CODE_FOR_xfxx_simple, B_UID(960) };
+static const struct builtin B2_vec_vsubshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:2", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(961) };
+static const struct builtin B3_vec_vsubshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:3", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(962) };
+static const struct builtin B2_vec_vsubsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:2", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(963) };
+static const struct builtin B3_vec_vsubsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:3", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(964) };
+static const struct builtin B2_vec_vsubsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:2", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(965) };
+static const struct builtin B3_vec_vsubsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:3", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(966) };
+static const struct builtin B2_vec_vsubuhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:2", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(967) };
+static const struct builtin B3_vec_vsubuhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:3", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(968) };
+static const struct builtin B2_vec_vsubuws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:2", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(969) };
+static const struct builtin B3_vec_vsubuws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:3", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(970) };
+static const struct builtin B2_vec_vsububs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:2", "*vsububs", CODE_FOR_xfxx_simple, B_UID(971) };
+static const struct builtin B3_vec_vsububs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:3", "*vsububs", CODE_FOR_xfxx_simple, B_UID(972) };
+static const struct builtin B_vec_vsum2sws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum2sws", "*vsum2sws", CODE_FOR_xfxx_complex, B_UID(973) };
+static const struct builtin B_vec_vsum4shs = { { &T_vec_s16, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4shs", "*vsum4shs", CODE_FOR_xfxx_complex, B_UID(974) };
+static const struct builtin B_vec_vsum4sbs = { { &T_vec_s8, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4sbs", "*vsum4sbs", CODE_FOR_xfxx_complex, B_UID(975) };
+static const struct builtin B_vec_vsum4ubs = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsum4ubs", "*vsum4ubs", CODE_FOR_xfxx_complex, B_UID(976) };
+static const struct builtin B_vec_vsumsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsumsws", "*vsumsws", CODE_FOR_xfxx_complex, B_UID(977) };
+static const struct builtin B_vec_vrfiz = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfiz", "*vrfiz", CODE_FOR_xfx_fp, B_UID(978) };
+static const struct builtin B1_vec_unpack2sh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(979) };
+static const struct builtin B2_vec_unpack2sh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(980) };
+static const struct builtin B1_vec_unpack2sl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sl:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(981) };
+static const struct builtin B2_vec_unpack2sl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sl:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(982) };
+static const struct builtin B1_vec_unpack2uh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2uh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(983) };
+static const struct builtin B2_vec_unpack2uh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2uh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(984) };
+static const struct builtin B1_vec_unpack2ul = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2ul:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(985) };
+static const struct builtin B2_vec_unpack2ul = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2ul:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(986) };
+static const struct builtin B1_vec_vupkhsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupkhsh:1", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(987) };
+static const struct builtin B1_vec_vupkhsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupkhsb:1", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(988) };
+static const struct builtin B_vec_vupkhpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupkhpx", "*vupkhpx", CODE_FOR_xfx_perm, B_UID(989) };
+static const struct builtin B2_vec_vupkhsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupkhsh:2", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(990) };
+static const struct builtin B2_vec_vupkhsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupkhsb:2", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(991) };
+static const struct builtin B1_vec_vupklsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupklsh:1", "*vupklsh", CODE_FOR_xfx_perm, B_UID(992) };
+static const struct builtin B1_vec_vupklsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupklsb:1", "*vupklsb", CODE_FOR_xfx_perm, B_UID(993) };
+static const struct builtin B_vec_vupklpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupklpx", "*vupklpx", CODE_FOR_xfx_perm, B_UID(994) };
+static const struct builtin B2_vec_vupklsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupklsh:2", "*vupklsh", CODE_FOR_xfx_perm, B_UID(995) };
+static const struct builtin B2_vec_vupklsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupklsb:2", "*vupklsb", CODE_FOR_xfx_perm, B_UID(996) };
+static const struct builtin B1_vec_vxor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vxor:1", "*vxor", CODE_FOR_xfxx_simple, B_UID(997) };
+static const struct builtin B2_vec_vxor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:2", "*vxor", CODE_FOR_xfxx_simple, B_UID(998) };
+static const struct builtin B3_vec_vxor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:3", "*vxor", CODE_FOR_xfxx_simple, B_UID(999) };
+static const struct builtin B4_vec_vxor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vxor:4", "*vxor", CODE_FOR_xfxx_simple, B_UID(1000) };
+static const struct builtin B5_vec_vxor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:5", "*vxor", CODE_FOR_xfxx_simple, B_UID(1001) };
+static const struct builtin B6_vec_vxor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:6", "*vxor", CODE_FOR_xfxx_simple, B_UID(1002) };
+static const struct builtin B7_vec_vxor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:7", "*vxor", CODE_FOR_xfxx_simple, B_UID(1003) };
+static const struct builtin B8_vec_vxor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vxor:8", "*vxor", CODE_FOR_xfxx_simple, B_UID(1004) };
+static const struct builtin B9_vec_vxor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:9", "*vxor", CODE_FOR_xfxx_simple, B_UID(1005) };
+static const struct builtin B10_vec_vxor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:10", "*vxor", CODE_FOR_xfxx_simple, B_UID(1006) };
+static const struct builtin B11_vec_vxor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:11", "*vxor", CODE_FOR_xfxx_simple, B_UID(1007) };
+static const struct builtin B12_vec_vxor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:12", "*vxor", CODE_FOR_xfxx_simple, B_UID(1008) };
+static const struct builtin B13_vec_vxor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:13", "*vxor", CODE_FOR_xfxx_simple, B_UID(1009) };
+static const struct builtin B14_vec_vxor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:14", "*vxor", CODE_FOR_xfxx_simple, B_UID(1010) };
+static const struct builtin B15_vec_vxor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:15", "*vxor", CODE_FOR_xfxx_simple, B_UID(1011) };
+static const struct builtin B16_vec_vxor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:16", "*vxor", CODE_FOR_xfxx_simple, B_UID(1012) };
+static const struct builtin B17_vec_vxor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:17", "*vxor", CODE_FOR_xfxx_simple, B_UID(1013) };
+static const struct builtin B18_vec_vxor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:18", "*vxor", CODE_FOR_xfxx_simple, B_UID(1014) };
+static const struct builtin B19_vec_vxor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:19", "*vxor", CODE_FOR_xfxx_simple, B_UID(1015) };
+static const struct builtin B20_vec_vxor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:20", "*vxor", CODE_FOR_xfxx_simple, B_UID(1016) };
+static const struct builtin B21_vec_vxor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:21", "*vxor", CODE_FOR_xfxx_simple, B_UID(1017) };
+static const struct builtin B22_vec_vxor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:22", "*vxor", CODE_FOR_xfxx_simple, B_UID(1018) };
+static const struct builtin B23_vec_vxor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:23", "*vxor", CODE_FOR_xfxx_simple, B_UID(1019) };
+static const struct builtin B24_vec_vxor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:24", "*vxor", CODE_FOR_xfxx_simple, B_UID(1020) };
+#define LAST_B_UID B_UID(1021)
+
+const struct builtin * const Builtin[] = {
+ &B1_vec_abs,
+ &B2_vec_abs,
+ &B3_vec_abs,
+ &B4_vec_abs,
+ &B1_vec_abss,
+ &B2_vec_abss,
+ &B3_vec_abss,
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B_vec_vaddfp,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+ &B_vec_vaddcuw,
+ &B1_vec_vaddshs,
+ &B1_vec_vadduhs,
+ &B1_vec_vaddsws,
+ &B1_vec_vadduws,
+ &B1_vec_vaddsbs,
+ &B1_vec_vaddubs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+ &B1_vec_all_eq,
+ &B2_vec_all_eq,
+ &B3_vec_all_eq,
+ &B4_vec_all_eq,
+ &B5_vec_all_eq,
+ &B6_vec_all_eq,
+ &B7_vec_all_eq,
+ &B8_vec_all_eq,
+ &B9_vec_all_eq,
+ &B10_vec_all_eq,
+ &B11_vec_all_eq,
+ &B12_vec_all_eq,
+ &B13_vec_all_eq,
+ &B14_vec_all_eq,
+ &B15_vec_all_eq,
+ &B16_vec_all_eq,
+ &B17_vec_all_eq,
+ &B18_vec_all_eq,
+ &B19_vec_all_eq,
+ &B20_vec_all_eq,
+ &B21_vec_all_eq,
+ &B22_vec_all_eq,
+ &B23_vec_all_eq,
+ &B1_vec_all_ge,
+ &B2_vec_all_ge,
+ &B3_vec_all_ge,
+ &B4_vec_all_ge,
+ &B5_vec_all_ge,
+ &B6_vec_all_ge,
+ &B7_vec_all_ge,
+ &B8_vec_all_ge,
+ &B9_vec_all_ge,
+ &B10_vec_all_ge,
+ &B11_vec_all_ge,
+ &B12_vec_all_ge,
+ &B13_vec_all_ge,
+ &B14_vec_all_ge,
+ &B15_vec_all_ge,
+ &B16_vec_all_ge,
+ &B17_vec_all_ge,
+ &B18_vec_all_ge,
+ &B19_vec_all_ge,
+ &B1_vec_all_gt,
+ &B2_vec_all_gt,
+ &B3_vec_all_gt,
+ &B4_vec_all_gt,
+ &B5_vec_all_gt,
+ &B6_vec_all_gt,
+ &B7_vec_all_gt,
+ &B8_vec_all_gt,
+ &B9_vec_all_gt,
+ &B10_vec_all_gt,
+ &B11_vec_all_gt,
+ &B12_vec_all_gt,
+ &B13_vec_all_gt,
+ &B14_vec_all_gt,
+ &B15_vec_all_gt,
+ &B16_vec_all_gt,
+ &B17_vec_all_gt,
+ &B18_vec_all_gt,
+ &B19_vec_all_gt,
+ &B_vec_all_in,
+ &B1_vec_all_le,
+ &B2_vec_all_le,
+ &B3_vec_all_le,
+ &B4_vec_all_le,
+ &B5_vec_all_le,
+ &B6_vec_all_le,
+ &B7_vec_all_le,
+ &B8_vec_all_le,
+ &B9_vec_all_le,
+ &B10_vec_all_le,
+ &B11_vec_all_le,
+ &B12_vec_all_le,
+ &B13_vec_all_le,
+ &B14_vec_all_le,
+ &B15_vec_all_le,
+ &B16_vec_all_le,
+ &B17_vec_all_le,
+ &B18_vec_all_le,
+ &B19_vec_all_le,
+ &B1_vec_all_lt,
+ &B2_vec_all_lt,
+ &B3_vec_all_lt,
+ &B4_vec_all_lt,
+ &B5_vec_all_lt,
+ &B6_vec_all_lt,
+ &B7_vec_all_lt,
+ &B8_vec_all_lt,
+ &B9_vec_all_lt,
+ &B10_vec_all_lt,
+ &B11_vec_all_lt,
+ &B12_vec_all_lt,
+ &B13_vec_all_lt,
+ &B14_vec_all_lt,
+ &B15_vec_all_lt,
+ &B16_vec_all_lt,
+ &B17_vec_all_lt,
+ &B18_vec_all_lt,
+ &B19_vec_all_lt,
+ &B_vec_all_nan,
+ &B1_vec_all_ne,
+ &B2_vec_all_ne,
+ &B3_vec_all_ne,
+ &B4_vec_all_ne,
+ &B5_vec_all_ne,
+ &B6_vec_all_ne,
+ &B7_vec_all_ne,
+ &B8_vec_all_ne,
+ &B9_vec_all_ne,
+ &B10_vec_all_ne,
+ &B11_vec_all_ne,
+ &B12_vec_all_ne,
+ &B13_vec_all_ne,
+ &B14_vec_all_ne,
+ &B15_vec_all_ne,
+ &B16_vec_all_ne,
+ &B17_vec_all_ne,
+ &B18_vec_all_ne,
+ &B19_vec_all_ne,
+ &B20_vec_all_ne,
+ &B21_vec_all_ne,
+ &B22_vec_all_ne,
+ &B23_vec_all_ne,
+ &B_vec_all_nge,
+ &B_vec_all_ngt,
+ &B_vec_all_nle,
+ &B_vec_all_nlt,
+ &B_vec_all_numeric,
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+ &B1_vec_any_eq,
+ &B2_vec_any_eq,
+ &B3_vec_any_eq,
+ &B4_vec_any_eq,
+ &B5_vec_any_eq,
+ &B6_vec_any_eq,
+ &B7_vec_any_eq,
+ &B8_vec_any_eq,
+ &B9_vec_any_eq,
+ &B10_vec_any_eq,
+ &B11_vec_any_eq,
+ &B12_vec_any_eq,
+ &B13_vec_any_eq,
+ &B14_vec_any_eq,
+ &B15_vec_any_eq,
+ &B16_vec_any_eq,
+ &B17_vec_any_eq,
+ &B18_vec_any_eq,
+ &B19_vec_any_eq,
+ &B20_vec_any_eq,
+ &B21_vec_any_eq,
+ &B22_vec_any_eq,
+ &B23_vec_any_eq,
+ &B1_vec_any_ge,
+ &B2_vec_any_ge,
+ &B3_vec_any_ge,
+ &B4_vec_any_ge,
+ &B5_vec_any_ge,
+ &B6_vec_any_ge,
+ &B7_vec_any_ge,
+ &B8_vec_any_ge,
+ &B9_vec_any_ge,
+ &B10_vec_any_ge,
+ &B11_vec_any_ge,
+ &B12_vec_any_ge,
+ &B13_vec_any_ge,
+ &B14_vec_any_ge,
+ &B15_vec_any_ge,
+ &B16_vec_any_ge,
+ &B17_vec_any_ge,
+ &B18_vec_any_ge,
+ &B19_vec_any_ge,
+ &B1_vec_any_gt,
+ &B2_vec_any_gt,
+ &B3_vec_any_gt,
+ &B4_vec_any_gt,
+ &B5_vec_any_gt,
+ &B6_vec_any_gt,
+ &B7_vec_any_gt,
+ &B8_vec_any_gt,
+ &B9_vec_any_gt,
+ &B10_vec_any_gt,
+ &B11_vec_any_gt,
+ &B12_vec_any_gt,
+ &B13_vec_any_gt,
+ &B14_vec_any_gt,
+ &B15_vec_any_gt,
+ &B16_vec_any_gt,
+ &B17_vec_any_gt,
+ &B18_vec_any_gt,
+ &B19_vec_any_gt,
+ &B1_vec_any_le,
+ &B2_vec_any_le,
+ &B3_vec_any_le,
+ &B4_vec_any_le,
+ &B5_vec_any_le,
+ &B6_vec_any_le,
+ &B7_vec_any_le,
+ &B8_vec_any_le,
+ &B9_vec_any_le,
+ &B10_vec_any_le,
+ &B11_vec_any_le,
+ &B12_vec_any_le,
+ &B13_vec_any_le,
+ &B14_vec_any_le,
+ &B15_vec_any_le,
+ &B16_vec_any_le,
+ &B17_vec_any_le,
+ &B18_vec_any_le,
+ &B19_vec_any_le,
+ &B1_vec_any_lt,
+ &B2_vec_any_lt,
+ &B3_vec_any_lt,
+ &B4_vec_any_lt,
+ &B5_vec_any_lt,
+ &B6_vec_any_lt,
+ &B7_vec_any_lt,
+ &B8_vec_any_lt,
+ &B9_vec_any_lt,
+ &B10_vec_any_lt,
+ &B11_vec_any_lt,
+ &B12_vec_any_lt,
+ &B13_vec_any_lt,
+ &B14_vec_any_lt,
+ &B15_vec_any_lt,
+ &B16_vec_any_lt,
+ &B17_vec_any_lt,
+ &B18_vec_any_lt,
+ &B19_vec_any_lt,
+ &B_vec_any_nan,
+ &B1_vec_any_ne,
+ &B2_vec_any_ne,
+ &B3_vec_any_ne,
+ &B4_vec_any_ne,
+ &B5_vec_any_ne,
+ &B6_vec_any_ne,
+ &B7_vec_any_ne,
+ &B8_vec_any_ne,
+ &B9_vec_any_ne,
+ &B10_vec_any_ne,
+ &B11_vec_any_ne,
+ &B12_vec_any_ne,
+ &B13_vec_any_ne,
+ &B14_vec_any_ne,
+ &B15_vec_any_ne,
+ &B16_vec_any_ne,
+ &B17_vec_any_ne,
+ &B18_vec_any_ne,
+ &B19_vec_any_ne,
+ &B20_vec_any_ne,
+ &B21_vec_any_ne,
+ &B22_vec_any_ne,
+ &B23_vec_any_ne,
+ &B_vec_any_nge,
+ &B_vec_any_ngt,
+ &B_vec_any_nle,
+ &B_vec_any_nlt,
+ &B_vec_any_numeric,
+ &B_vec_any_out,
+ &B_vec_vavgsh,
+ &B_vec_vavgsw,
+ &B_vec_vavgsb,
+ &B_vec_vavguh,
+ &B_vec_vavguw,
+ &B_vec_vavgub,
+ &B_vec_vrfip,
+ &B_vec_vcmpbfp,
+ &B_vec_vcmpeqfp,
+ &B1_vec_vcmpequh,
+ &B1_vec_vcmpequw,
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequh,
+ &B2_vec_vcmpequw,
+ &B2_vec_vcmpequb,
+ &B_vec_vcmpgefp,
+ &B_vec_vcmpgtfp,
+ &B_vec_vcmpgtsh,
+ &B_vec_vcmpgtsw,
+ &B_vec_vcmpgtsb,
+ &B_vec_vcmpgtuh,
+ &B_vec_vcmpgtuw,
+ &B_vec_vcmpgtub,
+ &B_vec_cmple,
+ &B1_vec_cmplt,
+ &B2_vec_cmplt,
+ &B3_vec_cmplt,
+ &B4_vec_cmplt,
+ &B5_vec_cmplt,
+ &B6_vec_cmplt,
+ &B7_vec_cmplt,
+ &B_vec_vcfsx,
+ &B_vec_vcfux,
+ &B_vec_vctsxs,
+ &B_vec_vctuxs,
+ &B_vec_dss,
+ &B_vec_dssall,
+ &B1_vec_dst,
+ &B2_vec_dst,
+ &B3_vec_dst,
+ &B4_vec_dst,
+ &B5_vec_dst,
+ &B6_vec_dst,
+ &B7_vec_dst,
+ &B8_vec_dst,
+ &B9_vec_dst,
+ &B10_vec_dst,
+ &B11_vec_dst,
+ &B12_vec_dst,
+ &B13_vec_dst,
+ &B14_vec_dst,
+ &B15_vec_dst,
+ &B16_vec_dst,
+ &B17_vec_dst,
+ &B18_vec_dst,
+ &B19_vec_dst,
+ &B20_vec_dst,
+ &B1_vec_dstst,
+ &B2_vec_dstst,
+ &B3_vec_dstst,
+ &B4_vec_dstst,
+ &B5_vec_dstst,
+ &B6_vec_dstst,
+ &B7_vec_dstst,
+ &B8_vec_dstst,
+ &B9_vec_dstst,
+ &B10_vec_dstst,
+ &B11_vec_dstst,
+ &B12_vec_dstst,
+ &B13_vec_dstst,
+ &B14_vec_dstst,
+ &B15_vec_dstst,
+ &B16_vec_dstst,
+ &B17_vec_dstst,
+ &B18_vec_dstst,
+ &B19_vec_dstst,
+ &B20_vec_dstst,
+ &B1_vec_dststt,
+ &B2_vec_dststt,
+ &B3_vec_dststt,
+ &B4_vec_dststt,
+ &B5_vec_dststt,
+ &B6_vec_dststt,
+ &B7_vec_dststt,
+ &B8_vec_dststt,
+ &B9_vec_dststt,
+ &B10_vec_dststt,
+ &B11_vec_dststt,
+ &B12_vec_dststt,
+ &B13_vec_dststt,
+ &B14_vec_dststt,
+ &B15_vec_dststt,
+ &B16_vec_dststt,
+ &B17_vec_dststt,
+ &B18_vec_dststt,
+ &B19_vec_dststt,
+ &B20_vec_dststt,
+ &B1_vec_dstt,
+ &B2_vec_dstt,
+ &B3_vec_dstt,
+ &B4_vec_dstt,
+ &B5_vec_dstt,
+ &B6_vec_dstt,
+ &B7_vec_dstt,
+ &B8_vec_dstt,
+ &B9_vec_dstt,
+ &B10_vec_dstt,
+ &B11_vec_dstt,
+ &B12_vec_dstt,
+ &B13_vec_dstt,
+ &B14_vec_dstt,
+ &B15_vec_dstt,
+ &B16_vec_dstt,
+ &B17_vec_dstt,
+ &B18_vec_dstt,
+ &B19_vec_dstt,
+ &B20_vec_dstt,
+ &B_vec_vexptefp,
+ &B_vec_vrfim,
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B1_vec_lvehx,
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+ &B2_vec_lvehx,
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+ &B_vec_vlogefp,
+ &B1_vec_lvsl,
+ &B2_vec_lvsl,
+ &B3_vec_lvsl,
+ &B4_vec_lvsl,
+ &B5_vec_lvsl,
+ &B6_vec_lvsl,
+ &B7_vec_lvsl,
+ &B8_vec_lvsl,
+ &B9_vec_lvsl,
+ &B1_vec_lvsr,
+ &B2_vec_lvsr,
+ &B3_vec_lvsr,
+ &B4_vec_lvsr,
+ &B5_vec_lvsr,
+ &B6_vec_lvsr,
+ &B7_vec_lvsr,
+ &B8_vec_lvsr,
+ &B9_vec_lvsr,
+ &B_vec_vmaddfp,
+ &B_vec_vmhaddshs,
+ &B1_vec_vmaxsh,
+ &B1_vec_vmaxuh,
+ &B1_vec_vmaxsw,
+ &B1_vec_vmaxuw,
+ &B1_vec_vmaxsb,
+ &B1_vec_vmaxub,
+ &B_vec_vmaxfp,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+ &B1_vec_vmrghh,
+ &B1_vec_vmrghw,
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghw,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B3_vec_vmrghw,
+ &B2_vec_vmrghb,
+ &B4_vec_vmrghh,
+ &B4_vec_vmrghw,
+ &B3_vec_vmrghb,
+ &B1_vec_vmrglh,
+ &B1_vec_vmrglw,
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglw,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B3_vec_vmrglw,
+ &B2_vec_vmrglb,
+ &B4_vec_vmrglh,
+ &B4_vec_vmrglw,
+ &B3_vec_vmrglb,
+ &B_vec_mfvscr,
+ &B1_vec_vminsh,
+ &B1_vec_vminuh,
+ &B1_vec_vminsw,
+ &B1_vec_vminuw,
+ &B1_vec_vminsb,
+ &B1_vec_vminub,
+ &B_vec_vminfp,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+ &B_vec_vmhraddshs,
+ &B_vec_vmsumshm,
+ &B_vec_vmsummbm,
+ &B_vec_vmsumuhm,
+ &B_vec_vmsumubm,
+ &B_vec_vmsumshs,
+ &B_vec_vmsumuhs,
+ &B1_vec_mtvscr,
+ &B2_vec_mtvscr,
+ &B3_vec_mtvscr,
+ &B4_vec_mtvscr,
+ &B5_vec_mtvscr,
+ &B6_vec_mtvscr,
+ &B7_vec_mtvscr,
+ &B8_vec_mtvscr,
+ &B9_vec_mtvscr,
+ &B10_vec_mtvscr,
+ &B_vec_vmulesh,
+ &B_vec_vmulesb,
+ &B_vec_vmuleuh,
+ &B_vec_vmuleub,
+ &B_vec_vmulosh,
+ &B_vec_vmulosb,
+ &B_vec_vmulouh,
+ &B_vec_vmuloub,
+ &B_vec_vnmsubfp,
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+ &B1_vec_vpkuhum,
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuhum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuhum,
+ &B3_vec_vpkuwum,
+ &B_vec_vpkpx,
+ &B_vec_vpkshss,
+ &B_vec_vpkswss,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+ &B_vec_vpkshus,
+ &B_vec_vpkswus,
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+ &B_vec_vrefp,
+ &B1_vec_vrlh,
+ &B1_vec_vrlw,
+ &B1_vec_vrlb,
+ &B2_vec_vrlh,
+ &B2_vec_vrlw,
+ &B2_vec_vrlb,
+ &B_vec_vrfin,
+ &B_vec_vrsqrtefp,
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+ &B1_vec_vslh,
+ &B1_vec_vslw,
+ &B1_vec_vslb,
+ &B2_vec_vslh,
+ &B2_vec_vslw,
+ &B2_vec_vslb,
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+ &B1_vec_vsplth,
+ &B1_vec_vspltw,
+ &B1_vec_vspltb,
+ &B2_vec_vspltw,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B3_vec_vspltw,
+ &B2_vec_vspltb,
+ &B4_vec_vsplth,
+ &B4_vec_vspltw,
+ &B3_vec_vspltb,
+ &B_vec_vspltish,
+ &B_vec_vspltisw,
+ &B_vec_vspltisb,
+ &B_vec_splat_u16,
+ &B_vec_splat_u32,
+ &B_vec_splat_u8,
+ &B1_vec_vsrh,
+ &B1_vec_vsrw,
+ &B1_vec_vsrb,
+ &B2_vec_vsrh,
+ &B2_vec_vsrw,
+ &B2_vec_vsrb,
+ &B1_vec_vsrah,
+ &B1_vec_vsraw,
+ &B1_vec_vsrab,
+ &B2_vec_vsrah,
+ &B2_vec_vsraw,
+ &B2_vec_vsrab,
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvewx,
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B5_vec_stvebx,
+ &B4_vec_stvehx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+ &B6_vec_stvebx,
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B_vec_vsubfp,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+ &B_vec_vsubcuw,
+ &B1_vec_vsubshs,
+ &B1_vec_vsubuhs,
+ &B1_vec_vsubsws,
+ &B1_vec_vsubuws,
+ &B1_vec_vsubsbs,
+ &B1_vec_vsububs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+ &B_vec_vsum2sws,
+ &B_vec_vsum4shs,
+ &B_vec_vsum4sbs,
+ &B_vec_vsum4ubs,
+ &B_vec_vsumsws,
+ &B_vec_vrfiz,
+ &B1_vec_unpack2sh,
+ &B2_vec_unpack2sh,
+ &B1_vec_unpack2sl,
+ &B2_vec_unpack2sl,
+ &B1_vec_unpack2uh,
+ &B2_vec_unpack2uh,
+ &B1_vec_unpack2ul,
+ &B2_vec_unpack2ul,
+ &B1_vec_vupkhsh,
+ &B1_vec_vupkhsb,
+ &B_vec_vupkhpx,
+ &B2_vec_vupkhsh,
+ &B2_vec_vupkhsb,
+ &B1_vec_vupklsh,
+ &B1_vec_vupklsb,
+ &B_vec_vupklpx,
+ &B2_vec_vupklsh,
+ &B2_vec_vupklsb,
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+
+static const struct builtin *const O_vec_abs[4] = {
+ &B1_vec_abs,
+ &B2_vec_abs,
+ &B3_vec_abs,
+ &B4_vec_abs,
+};
+static const struct builtin *const O_vec_abss[3] = {
+ &B1_vec_abss,
+ &B2_vec_abss,
+ &B3_vec_abss,
+};
+static const struct builtin *const O_vec_add[19] = {
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B_vec_vaddfp,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+};
+static const struct builtin *const O_vec_addc[1] = {
+ &B_vec_vaddcuw,
+};
+static const struct builtin *const O_vec_adds[18] = {
+ &B1_vec_vaddshs,
+ &B1_vec_vadduhs,
+ &B1_vec_vaddsws,
+ &B1_vec_vadduws,
+ &B1_vec_vaddsbs,
+ &B1_vec_vaddubs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+};
+static const struct builtin *const O_vec_all_eq[23] = {
+ &B1_vec_all_eq,
+ &B2_vec_all_eq,
+ &B3_vec_all_eq,
+ &B4_vec_all_eq,
+ &B5_vec_all_eq,
+ &B6_vec_all_eq,
+ &B7_vec_all_eq,
+ &B8_vec_all_eq,
+ &B9_vec_all_eq,
+ &B10_vec_all_eq,
+ &B11_vec_all_eq,
+ &B12_vec_all_eq,
+ &B13_vec_all_eq,
+ &B14_vec_all_eq,
+ &B15_vec_all_eq,
+ &B16_vec_all_eq,
+ &B17_vec_all_eq,
+ &B18_vec_all_eq,
+ &B19_vec_all_eq,
+ &B20_vec_all_eq,
+ &B21_vec_all_eq,
+ &B22_vec_all_eq,
+ &B23_vec_all_eq,
+};
+static const struct builtin *const O_vec_all_ge[19] = {
+ &B1_vec_all_ge,
+ &B2_vec_all_ge,
+ &B3_vec_all_ge,
+ &B4_vec_all_ge,
+ &B5_vec_all_ge,
+ &B6_vec_all_ge,
+ &B7_vec_all_ge,
+ &B8_vec_all_ge,
+ &B9_vec_all_ge,
+ &B10_vec_all_ge,
+ &B11_vec_all_ge,
+ &B12_vec_all_ge,
+ &B13_vec_all_ge,
+ &B14_vec_all_ge,
+ &B15_vec_all_ge,
+ &B16_vec_all_ge,
+ &B17_vec_all_ge,
+ &B18_vec_all_ge,
+ &B19_vec_all_ge,
+};
+static const struct builtin *const O_vec_all_gt[19] = {
+ &B1_vec_all_gt,
+ &B2_vec_all_gt,
+ &B3_vec_all_gt,
+ &B4_vec_all_gt,
+ &B5_vec_all_gt,
+ &B6_vec_all_gt,
+ &B7_vec_all_gt,
+ &B8_vec_all_gt,
+ &B9_vec_all_gt,
+ &B10_vec_all_gt,
+ &B11_vec_all_gt,
+ &B12_vec_all_gt,
+ &B13_vec_all_gt,
+ &B14_vec_all_gt,
+ &B15_vec_all_gt,
+ &B16_vec_all_gt,
+ &B17_vec_all_gt,
+ &B18_vec_all_gt,
+ &B19_vec_all_gt,
+};
+static const struct builtin *const O_vec_all_in[1] = {
+ &B_vec_all_in,
+};
+static const struct builtin *const O_vec_all_le[19] = {
+ &B1_vec_all_le,
+ &B2_vec_all_le,
+ &B3_vec_all_le,
+ &B4_vec_all_le,
+ &B5_vec_all_le,
+ &B6_vec_all_le,
+ &B7_vec_all_le,
+ &B8_vec_all_le,
+ &B9_vec_all_le,
+ &B10_vec_all_le,
+ &B11_vec_all_le,
+ &B12_vec_all_le,
+ &B13_vec_all_le,
+ &B14_vec_all_le,
+ &B15_vec_all_le,
+ &B16_vec_all_le,
+ &B17_vec_all_le,
+ &B18_vec_all_le,
+ &B19_vec_all_le,
+};
+static const struct builtin *const O_vec_all_lt[19] = {
+ &B1_vec_all_lt,
+ &B2_vec_all_lt,
+ &B3_vec_all_lt,
+ &B4_vec_all_lt,
+ &B5_vec_all_lt,
+ &B6_vec_all_lt,
+ &B7_vec_all_lt,
+ &B8_vec_all_lt,
+ &B9_vec_all_lt,
+ &B10_vec_all_lt,
+ &B11_vec_all_lt,
+ &B12_vec_all_lt,
+ &B13_vec_all_lt,
+ &B14_vec_all_lt,
+ &B15_vec_all_lt,
+ &B16_vec_all_lt,
+ &B17_vec_all_lt,
+ &B18_vec_all_lt,
+ &B19_vec_all_lt,
+};
+static const struct builtin *const O_vec_all_nan[1] = {
+ &B_vec_all_nan,
+};
+static const struct builtin *const O_vec_all_ne[23] = {
+ &B1_vec_all_ne,
+ &B2_vec_all_ne,
+ &B3_vec_all_ne,
+ &B4_vec_all_ne,
+ &B5_vec_all_ne,
+ &B6_vec_all_ne,
+ &B7_vec_all_ne,
+ &B8_vec_all_ne,
+ &B9_vec_all_ne,
+ &B10_vec_all_ne,
+ &B11_vec_all_ne,
+ &B12_vec_all_ne,
+ &B13_vec_all_ne,
+ &B14_vec_all_ne,
+ &B15_vec_all_ne,
+ &B16_vec_all_ne,
+ &B17_vec_all_ne,
+ &B18_vec_all_ne,
+ &B19_vec_all_ne,
+ &B20_vec_all_ne,
+ &B21_vec_all_ne,
+ &B22_vec_all_ne,
+ &B23_vec_all_ne,
+};
+static const struct builtin *const O_vec_all_nge[1] = {
+ &B_vec_all_nge,
+};
+static const struct builtin *const O_vec_all_ngt[1] = {
+ &B_vec_all_ngt,
+};
+static const struct builtin *const O_vec_all_nle[1] = {
+ &B_vec_all_nle,
+};
+static const struct builtin *const O_vec_all_nlt[1] = {
+ &B_vec_all_nlt,
+};
+static const struct builtin *const O_vec_all_numeric[1] = {
+ &B_vec_all_numeric,
+};
+static const struct builtin *const O_vec_and[24] = {
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+};
+static const struct builtin *const O_vec_andc[24] = {
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+};
+static const struct builtin *const O_vec_any_eq[23] = {
+ &B1_vec_any_eq,
+ &B2_vec_any_eq,
+ &B3_vec_any_eq,
+ &B4_vec_any_eq,
+ &B5_vec_any_eq,
+ &B6_vec_any_eq,
+ &B7_vec_any_eq,
+ &B8_vec_any_eq,
+ &B9_vec_any_eq,
+ &B10_vec_any_eq,
+ &B11_vec_any_eq,
+ &B12_vec_any_eq,
+ &B13_vec_any_eq,
+ &B14_vec_any_eq,
+ &B15_vec_any_eq,
+ &B16_vec_any_eq,
+ &B17_vec_any_eq,
+ &B18_vec_any_eq,
+ &B19_vec_any_eq,
+ &B20_vec_any_eq,
+ &B21_vec_any_eq,
+ &B22_vec_any_eq,
+ &B23_vec_any_eq,
+};
+static const struct builtin *const O_vec_any_ge[19] = {
+ &B1_vec_any_ge,
+ &B2_vec_any_ge,
+ &B3_vec_any_ge,
+ &B4_vec_any_ge,
+ &B5_vec_any_ge,
+ &B6_vec_any_ge,
+ &B7_vec_any_ge,
+ &B8_vec_any_ge,
+ &B9_vec_any_ge,
+ &B10_vec_any_ge,
+ &B11_vec_any_ge,
+ &B12_vec_any_ge,
+ &B13_vec_any_ge,
+ &B14_vec_any_ge,
+ &B15_vec_any_ge,
+ &B16_vec_any_ge,
+ &B17_vec_any_ge,
+ &B18_vec_any_ge,
+ &B19_vec_any_ge,
+};
+static const struct builtin *const O_vec_any_gt[19] = {
+ &B1_vec_any_gt,
+ &B2_vec_any_gt,
+ &B3_vec_any_gt,
+ &B4_vec_any_gt,
+ &B5_vec_any_gt,
+ &B6_vec_any_gt,
+ &B7_vec_any_gt,
+ &B8_vec_any_gt,
+ &B9_vec_any_gt,
+ &B10_vec_any_gt,
+ &B11_vec_any_gt,
+ &B12_vec_any_gt,
+ &B13_vec_any_gt,
+ &B14_vec_any_gt,
+ &B15_vec_any_gt,
+ &B16_vec_any_gt,
+ &B17_vec_any_gt,
+ &B18_vec_any_gt,
+ &B19_vec_any_gt,
+};
+static const struct builtin *const O_vec_any_le[19] = {
+ &B1_vec_any_le,
+ &B2_vec_any_le,
+ &B3_vec_any_le,
+ &B4_vec_any_le,
+ &B5_vec_any_le,
+ &B6_vec_any_le,
+ &B7_vec_any_le,
+ &B8_vec_any_le,
+ &B9_vec_any_le,
+ &B10_vec_any_le,
+ &B11_vec_any_le,
+ &B12_vec_any_le,
+ &B13_vec_any_le,
+ &B14_vec_any_le,
+ &B15_vec_any_le,
+ &B16_vec_any_le,
+ &B17_vec_any_le,
+ &B18_vec_any_le,
+ &B19_vec_any_le,
+};
+static const struct builtin *const O_vec_any_lt[19] = {
+ &B1_vec_any_lt,
+ &B2_vec_any_lt,
+ &B3_vec_any_lt,
+ &B4_vec_any_lt,
+ &B5_vec_any_lt,
+ &B6_vec_any_lt,
+ &B7_vec_any_lt,
+ &B8_vec_any_lt,
+ &B9_vec_any_lt,
+ &B10_vec_any_lt,
+ &B11_vec_any_lt,
+ &B12_vec_any_lt,
+ &B13_vec_any_lt,
+ &B14_vec_any_lt,
+ &B15_vec_any_lt,
+ &B16_vec_any_lt,
+ &B17_vec_any_lt,
+ &B18_vec_any_lt,
+ &B19_vec_any_lt,
+};
+static const struct builtin *const O_vec_any_nan[1] = {
+ &B_vec_any_nan,
+};
+static const struct builtin *const O_vec_any_ne[23] = {
+ &B1_vec_any_ne,
+ &B2_vec_any_ne,
+ &B3_vec_any_ne,
+ &B4_vec_any_ne,
+ &B5_vec_any_ne,
+ &B6_vec_any_ne,
+ &B7_vec_any_ne,
+ &B8_vec_any_ne,
+ &B9_vec_any_ne,
+ &B10_vec_any_ne,
+ &B11_vec_any_ne,
+ &B12_vec_any_ne,
+ &B13_vec_any_ne,
+ &B14_vec_any_ne,
+ &B15_vec_any_ne,
+ &B16_vec_any_ne,
+ &B17_vec_any_ne,
+ &B18_vec_any_ne,
+ &B19_vec_any_ne,
+ &B20_vec_any_ne,
+ &B21_vec_any_ne,
+ &B22_vec_any_ne,
+ &B23_vec_any_ne,
+};
+static const struct builtin *const O_vec_any_nge[1] = {
+ &B_vec_any_nge,
+};
+static const struct builtin *const O_vec_any_ngt[1] = {
+ &B_vec_any_ngt,
+};
+static const struct builtin *const O_vec_any_nle[1] = {
+ &B_vec_any_nle,
+};
+static const struct builtin *const O_vec_any_nlt[1] = {
+ &B_vec_any_nlt,
+};
+static const struct builtin *const O_vec_any_numeric[1] = {
+ &B_vec_any_numeric,
+};
+static const struct builtin *const O_vec_any_out[1] = {
+ &B_vec_any_out,
+};
+static const struct builtin *const O_vec_avg[6] = {
+ &B_vec_vavgsh,
+ &B_vec_vavgsw,
+ &B_vec_vavgsb,
+ &B_vec_vavguh,
+ &B_vec_vavguw,
+ &B_vec_vavgub,
+};
+static const struct builtin *const O_vec_ceil[1] = {
+ &B_vec_vrfip,
+};
+static const struct builtin *const O_vec_cmpb[1] = {
+ &B_vec_vcmpbfp,
+};
+static const struct builtin *const O_vec_cmpeq[7] = {
+ &B_vec_vcmpeqfp,
+ &B1_vec_vcmpequh,
+ &B1_vec_vcmpequw,
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequh,
+ &B2_vec_vcmpequw,
+ &B2_vec_vcmpequb,
+};
+static const struct builtin *const O_vec_cmpge[1] = {
+ &B_vec_vcmpgefp,
+};
+static const struct builtin *const O_vec_cmpgt[7] = {
+ &B_vec_vcmpgtfp,
+ &B_vec_vcmpgtsh,
+ &B_vec_vcmpgtsw,
+ &B_vec_vcmpgtsb,
+ &B_vec_vcmpgtuh,
+ &B_vec_vcmpgtuw,
+ &B_vec_vcmpgtub,
+};
+static const struct builtin *const O_vec_cmple[1] = {
+ &B_vec_cmple,
+};
+static const struct builtin *const O_vec_cmplt[7] = {
+ &B1_vec_cmplt,
+ &B2_vec_cmplt,
+ &B3_vec_cmplt,
+ &B4_vec_cmplt,
+ &B5_vec_cmplt,
+ &B6_vec_cmplt,
+ &B7_vec_cmplt,
+};
+static const struct builtin *const O_vec_ctf[2] = {
+ &B_vec_vcfsx,
+ &B_vec_vcfux,
+};
+static const struct builtin *const O_vec_cts[1] = {
+ &B_vec_vctsxs,
+};
+static const struct builtin *const O_vec_ctu[1] = {
+ &B_vec_vctuxs,
+};
+static const struct builtin *const O_vec_dss[1] = {
+ &B_vec_dss,
+};
+static const struct builtin *const O_vec_dssall[1] = {
+ &B_vec_dssall,
+};
+static const struct builtin *const O_vec_dst[20] = {
+ &B1_vec_dst,
+ &B2_vec_dst,
+ &B3_vec_dst,
+ &B4_vec_dst,
+ &B5_vec_dst,
+ &B6_vec_dst,
+ &B7_vec_dst,
+ &B8_vec_dst,
+ &B9_vec_dst,
+ &B10_vec_dst,
+ &B11_vec_dst,
+ &B12_vec_dst,
+ &B13_vec_dst,
+ &B14_vec_dst,
+ &B15_vec_dst,
+ &B16_vec_dst,
+ &B17_vec_dst,
+ &B18_vec_dst,
+ &B19_vec_dst,
+ &B20_vec_dst,
+};
+static const struct builtin *const O_vec_dstst[20] = {
+ &B1_vec_dstst,
+ &B2_vec_dstst,
+ &B3_vec_dstst,
+ &B4_vec_dstst,
+ &B5_vec_dstst,
+ &B6_vec_dstst,
+ &B7_vec_dstst,
+ &B8_vec_dstst,
+ &B9_vec_dstst,
+ &B10_vec_dstst,
+ &B11_vec_dstst,
+ &B12_vec_dstst,
+ &B13_vec_dstst,
+ &B14_vec_dstst,
+ &B15_vec_dstst,
+ &B16_vec_dstst,
+ &B17_vec_dstst,
+ &B18_vec_dstst,
+ &B19_vec_dstst,
+ &B20_vec_dstst,
+};
+static const struct builtin *const O_vec_dststt[20] = {
+ &B1_vec_dststt,
+ &B2_vec_dststt,
+ &B3_vec_dststt,
+ &B4_vec_dststt,
+ &B5_vec_dststt,
+ &B6_vec_dststt,
+ &B7_vec_dststt,
+ &B8_vec_dststt,
+ &B9_vec_dststt,
+ &B10_vec_dststt,
+ &B11_vec_dststt,
+ &B12_vec_dststt,
+ &B13_vec_dststt,
+ &B14_vec_dststt,
+ &B15_vec_dststt,
+ &B16_vec_dststt,
+ &B17_vec_dststt,
+ &B18_vec_dststt,
+ &B19_vec_dststt,
+ &B20_vec_dststt,
+};
+static const struct builtin *const O_vec_dstt[20] = {
+ &B1_vec_dstt,
+ &B2_vec_dstt,
+ &B3_vec_dstt,
+ &B4_vec_dstt,
+ &B5_vec_dstt,
+ &B6_vec_dstt,
+ &B7_vec_dstt,
+ &B8_vec_dstt,
+ &B9_vec_dstt,
+ &B10_vec_dstt,
+ &B11_vec_dstt,
+ &B12_vec_dstt,
+ &B13_vec_dstt,
+ &B14_vec_dstt,
+ &B15_vec_dstt,
+ &B16_vec_dstt,
+ &B17_vec_dstt,
+ &B18_vec_dstt,
+ &B19_vec_dstt,
+ &B20_vec_dstt,
+};
+static const struct builtin *const O_vec_expte[1] = {
+ &B_vec_vexptefp,
+};
+static const struct builtin *const O_vec_floor[1] = {
+ &B_vec_vrfim,
+};
+static const struct builtin *const O_vec_ld[20] = {
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+};
+static const struct builtin *const O_vec_lde[9] = {
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B1_vec_lvehx,
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+ &B2_vec_lvehx,
+};
+static const struct builtin *const O_vec_ldl[20] = {
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+};
+static const struct builtin *const O_vec_loge[1] = {
+ &B_vec_vlogefp,
+};
+static const struct builtin *const O_vec_lvebx[2] = {
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+};
+static const struct builtin *const O_vec_lvehx[2] = {
+ &B1_vec_lvehx,
+ &B2_vec_lvehx,
+};
+static const struct builtin *const O_vec_lvewx[5] = {
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+};
+static const struct builtin *const O_vec_lvsl[9] = {
+ &B1_vec_lvsl,
+ &B2_vec_lvsl,
+ &B3_vec_lvsl,
+ &B4_vec_lvsl,
+ &B5_vec_lvsl,
+ &B6_vec_lvsl,
+ &B7_vec_lvsl,
+ &B8_vec_lvsl,
+ &B9_vec_lvsl,
+};
+static const struct builtin *const O_vec_lvsr[9] = {
+ &B1_vec_lvsr,
+ &B2_vec_lvsr,
+ &B3_vec_lvsr,
+ &B4_vec_lvsr,
+ &B5_vec_lvsr,
+ &B6_vec_lvsr,
+ &B7_vec_lvsr,
+ &B8_vec_lvsr,
+ &B9_vec_lvsr,
+};
+static const struct builtin *const O_vec_lvx[20] = {
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+};
+static const struct builtin *const O_vec_lvxl[20] = {
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+};
+static const struct builtin *const O_vec_madd[1] = {
+ &B_vec_vmaddfp,
+};
+static const struct builtin *const O_vec_madds[1] = {
+ &B_vec_vmhaddshs,
+};
+static const struct builtin *const O_vec_max[19] = {
+ &B1_vec_vmaxsh,
+ &B1_vec_vmaxuh,
+ &B1_vec_vmaxsw,
+ &B1_vec_vmaxuw,
+ &B1_vec_vmaxsb,
+ &B1_vec_vmaxub,
+ &B_vec_vmaxfp,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+};
+static const struct builtin *const O_vec_mergeh[11] = {
+ &B1_vec_vmrghh,
+ &B1_vec_vmrghw,
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghw,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B3_vec_vmrghw,
+ &B2_vec_vmrghb,
+ &B4_vec_vmrghh,
+ &B4_vec_vmrghw,
+ &B3_vec_vmrghb,
+};
+static const struct builtin *const O_vec_mergel[11] = {
+ &B1_vec_vmrglh,
+ &B1_vec_vmrglw,
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglw,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B3_vec_vmrglw,
+ &B2_vec_vmrglb,
+ &B4_vec_vmrglh,
+ &B4_vec_vmrglw,
+ &B3_vec_vmrglb,
+};
+static const struct builtin *const O_vec_mfvscr[1] = {
+ &B_vec_mfvscr,
+};
+static const struct builtin *const O_vec_min[19] = {
+ &B1_vec_vminsh,
+ &B1_vec_vminuh,
+ &B1_vec_vminsw,
+ &B1_vec_vminuw,
+ &B1_vec_vminsb,
+ &B1_vec_vminub,
+ &B_vec_vminfp,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+};
+static const struct builtin *const O_vec_mladd[4] = {
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+};
+static const struct builtin *const O_vec_mradds[1] = {
+ &B_vec_vmhraddshs,
+};
+static const struct builtin *const O_vec_msum[4] = {
+ &B_vec_vmsumshm,
+ &B_vec_vmsummbm,
+ &B_vec_vmsumuhm,
+ &B_vec_vmsumubm,
+};
+static const struct builtin *const O_vec_msums[2] = {
+ &B_vec_vmsumshs,
+ &B_vec_vmsumuhs,
+};
+static const struct builtin *const O_vec_mtvscr[10] = {
+ &B1_vec_mtvscr,
+ &B2_vec_mtvscr,
+ &B3_vec_mtvscr,
+ &B4_vec_mtvscr,
+ &B5_vec_mtvscr,
+ &B6_vec_mtvscr,
+ &B7_vec_mtvscr,
+ &B8_vec_mtvscr,
+ &B9_vec_mtvscr,
+ &B10_vec_mtvscr,
+};
+static const struct builtin *const O_vec_mule[4] = {
+ &B_vec_vmulesh,
+ &B_vec_vmulesb,
+ &B_vec_vmuleuh,
+ &B_vec_vmuleub,
+};
+static const struct builtin *const O_vec_mulo[4] = {
+ &B_vec_vmulosh,
+ &B_vec_vmulosb,
+ &B_vec_vmulouh,
+ &B_vec_vmuloub,
+};
+static const struct builtin *const O_vec_nmsub[1] = {
+ &B_vec_vnmsubfp,
+};
+static const struct builtin *const O_vec_nor[10] = {
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+};
+static const struct builtin *const O_vec_or[24] = {
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+};
+static const struct builtin *const O_vec_pack[6] = {
+ &B1_vec_vpkuhum,
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuhum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuhum,
+ &B3_vec_vpkuwum,
+};
+static const struct builtin *const O_vec_packpx[1] = {
+ &B_vec_vpkpx,
+};
+static const struct builtin *const O_vec_packs[4] = {
+ &B_vec_vpkshss,
+ &B_vec_vpkswss,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_packsu[4] = {
+ &B_vec_vpkshus,
+ &B_vec_vpkswus,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_perm[11] = {
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+};
+static const struct builtin *const O_vec_re[1] = {
+ &B_vec_vrefp,
+};
+static const struct builtin *const O_vec_rl[6] = {
+ &B1_vec_vrlh,
+ &B1_vec_vrlw,
+ &B1_vec_vrlb,
+ &B2_vec_vrlh,
+ &B2_vec_vrlw,
+ &B2_vec_vrlb,
+};
+static const struct builtin *const O_vec_round[1] = {
+ &B_vec_vrfin,
+};
+static const struct builtin *const O_vec_rsqrte[1] = {
+ &B_vec_vrsqrtefp,
+};
+static const struct builtin *const O_vec_sel[20] = {
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+};
+static const struct builtin *const O_vec_sl[6] = {
+ &B1_vec_vslh,
+ &B1_vec_vslw,
+ &B1_vec_vslb,
+ &B2_vec_vslh,
+ &B2_vec_vslw,
+ &B2_vec_vslb,
+};
+static const struct builtin *const O_vec_sld[11] = {
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+};
+static const struct builtin *const O_vec_sll[30] = {
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+};
+static const struct builtin *const O_vec_slo[16] = {
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+};
+static const struct builtin *const O_vec_splat[11] = {
+ &B1_vec_vsplth,
+ &B1_vec_vspltw,
+ &B1_vec_vspltb,
+ &B2_vec_vspltw,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B3_vec_vspltw,
+ &B2_vec_vspltb,
+ &B4_vec_vsplth,
+ &B4_vec_vspltw,
+ &B3_vec_vspltb,
+};
+static const struct builtin *const O_vec_splat_s16[1] = {
+ &B_vec_vspltish,
+};
+static const struct builtin *const O_vec_splat_s32[1] = {
+ &B_vec_vspltisw,
+};
+static const struct builtin *const O_vec_splat_s8[1] = {
+ &B_vec_vspltisb,
+};
+static const struct builtin *const O_vec_splat_u16[1] = {
+ &B_vec_splat_u16,
+};
+static const struct builtin *const O_vec_splat_u32[1] = {
+ &B_vec_splat_u32,
+};
+static const struct builtin *const O_vec_splat_u8[1] = {
+ &B_vec_splat_u8,
+};
+static const struct builtin *const O_vec_sr[6] = {
+ &B1_vec_vsrh,
+ &B1_vec_vsrw,
+ &B1_vec_vsrb,
+ &B2_vec_vsrh,
+ &B2_vec_vsrw,
+ &B2_vec_vsrb,
+};
+static const struct builtin *const O_vec_sra[6] = {
+ &B1_vec_vsrah,
+ &B1_vec_vsraw,
+ &B1_vec_vsrab,
+ &B2_vec_vsrah,
+ &B2_vec_vsraw,
+ &B2_vec_vsrab,
+};
+static const struct builtin *const O_vec_srl[30] = {
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+};
+static const struct builtin *const O_vec_sro[16] = {
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+};
+static const struct builtin *const O_vec_st[30] = {
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+};
+static const struct builtin *const O_vec_ste[19] = {
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvewx,
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B5_vec_stvebx,
+ &B4_vec_stvehx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+ &B6_vec_stvebx,
+};
+static const struct builtin *const O_vec_stl[30] = {
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+};
+static const struct builtin *const O_vec_stvebx[6] = {
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvebx,
+ &B6_vec_stvebx,
+};
+static const struct builtin *const O_vec_stvehx[4] = {
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B4_vec_stvehx,
+};
+static const struct builtin *const O_vec_stvewx[9] = {
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B5_vec_stvewx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+};
+static const struct builtin *const O_vec_stvx[30] = {
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+};
+static const struct builtin *const O_vec_stvxl[30] = {
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+};
+static const struct builtin *const O_vec_sub[19] = {
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B_vec_vsubfp,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+};
+static const struct builtin *const O_vec_subc[1] = {
+ &B_vec_vsubcuw,
+};
+static const struct builtin *const O_vec_subs[18] = {
+ &B1_vec_vsubshs,
+ &B1_vec_vsubuhs,
+ &B1_vec_vsubsws,
+ &B1_vec_vsubuws,
+ &B1_vec_vsubsbs,
+ &B1_vec_vsububs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+};
+static const struct builtin *const O_vec_sum2s[1] = {
+ &B_vec_vsum2sws,
+};
+static const struct builtin *const O_vec_sum4s[3] = {
+ &B_vec_vsum4shs,
+ &B_vec_vsum4sbs,
+ &B_vec_vsum4ubs,
+};
+static const struct builtin *const O_vec_sums[1] = {
+ &B_vec_vsumsws,
+};
+static const struct builtin *const O_vec_trunc[1] = {
+ &B_vec_vrfiz,
+};
+static const struct builtin *const O_vec_unpack2sh[2] = {
+ &B1_vec_unpack2sh,
+ &B2_vec_unpack2sh,
+};
+static const struct builtin *const O_vec_unpack2sl[2] = {
+ &B1_vec_unpack2sl,
+ &B2_vec_unpack2sl,
+};
+static const struct builtin *const O_vec_unpack2uh[2] = {
+ &B1_vec_unpack2uh,
+ &B2_vec_unpack2uh,
+};
+static const struct builtin *const O_vec_unpack2ul[2] = {
+ &B1_vec_unpack2ul,
+ &B2_vec_unpack2ul,
+};
+static const struct builtin *const O_vec_unpackh[5] = {
+ &B1_vec_vupkhsh,
+ &B1_vec_vupkhsb,
+ &B_vec_vupkhpx,
+ &B2_vec_vupkhsh,
+ &B2_vec_vupkhsb,
+};
+static const struct builtin *const O_vec_unpackl[5] = {
+ &B1_vec_vupklsh,
+ &B1_vec_vupklsb,
+ &B_vec_vupklpx,
+ &B2_vec_vupklsh,
+ &B2_vec_vupklsb,
+};
+static const struct builtin *const O_vec_vaddcuw[1] = {
+ &B_vec_vaddcuw,
+};
+static const struct builtin *const O_vec_vaddfp[1] = {
+ &B_vec_vaddfp,
+};
+static const struct builtin *const O_vec_vaddsbs[3] = {
+ &B1_vec_vaddsbs,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+};
+static const struct builtin *const O_vec_vaddshs[3] = {
+ &B1_vec_vaddshs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+};
+static const struct builtin *const O_vec_vaddsws[3] = {
+ &B1_vec_vaddsws,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+};
+static const struct builtin *const O_vec_vaddubm[6] = {
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+};
+static const struct builtin *const O_vec_vaddubs[3] = {
+ &B1_vec_vaddubs,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+};
+static const struct builtin *const O_vec_vadduhm[6] = {
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+};
+static const struct builtin *const O_vec_vadduhs[3] = {
+ &B1_vec_vadduhs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+};
+static const struct builtin *const O_vec_vadduwm[6] = {
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+};
+static const struct builtin *const O_vec_vadduws[3] = {
+ &B1_vec_vadduws,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+};
+static const struct builtin *const O_vec_vand[24] = {
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+};
+static const struct builtin *const O_vec_vandc[24] = {
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+};
+static const struct builtin *const O_vec_vavgsb[1] = {
+ &B_vec_vavgsb,
+};
+static const struct builtin *const O_vec_vavgsh[1] = {
+ &B_vec_vavgsh,
+};
+static const struct builtin *const O_vec_vavgsw[1] = {
+ &B_vec_vavgsw,
+};
+static const struct builtin *const O_vec_vavgub[1] = {
+ &B_vec_vavgub,
+};
+static const struct builtin *const O_vec_vavguh[1] = {
+ &B_vec_vavguh,
+};
+static const struct builtin *const O_vec_vavguw[1] = {
+ &B_vec_vavguw,
+};
+static const struct builtin *const O_vec_vcfsx[1] = {
+ &B_vec_vcfsx,
+};
+static const struct builtin *const O_vec_vcfux[1] = {
+ &B_vec_vcfux,
+};
+static const struct builtin *const O_vec_vcmpbfp[1] = {
+ &B_vec_vcmpbfp,
+};
+static const struct builtin *const O_vec_vcmpeqfp[1] = {
+ &B_vec_vcmpeqfp,
+};
+static const struct builtin *const O_vec_vcmpequb[2] = {
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequb,
+};
+static const struct builtin *const O_vec_vcmpequh[2] = {
+ &B1_vec_vcmpequh,
+ &B2_vec_vcmpequh,
+};
+static const struct builtin *const O_vec_vcmpequw[2] = {
+ &B1_vec_vcmpequw,
+ &B2_vec_vcmpequw,
+};
+static const struct builtin *const O_vec_vcmpgefp[1] = {
+ &B_vec_vcmpgefp,
+};
+static const struct builtin *const O_vec_vcmpgtfp[1] = {
+ &B_vec_vcmpgtfp,
+};
+static const struct builtin *const O_vec_vcmpgtsb[1] = {
+ &B_vec_vcmpgtsb,
+};
+static const struct builtin *const O_vec_vcmpgtsh[1] = {
+ &B_vec_vcmpgtsh,
+};
+static const struct builtin *const O_vec_vcmpgtsw[1] = {
+ &B_vec_vcmpgtsw,
+};
+static const struct builtin *const O_vec_vcmpgtub[1] = {
+ &B_vec_vcmpgtub,
+};
+static const struct builtin *const O_vec_vcmpgtuh[1] = {
+ &B_vec_vcmpgtuh,
+};
+static const struct builtin *const O_vec_vcmpgtuw[1] = {
+ &B_vec_vcmpgtuw,
+};
+static const struct builtin *const O_vec_vctsxs[1] = {
+ &B_vec_vctsxs,
+};
+static const struct builtin *const O_vec_vctuxs[1] = {
+ &B_vec_vctuxs,
+};
+static const struct builtin *const O_vec_vexptefp[1] = {
+ &B_vec_vexptefp,
+};
+static const struct builtin *const O_vec_vlogefp[1] = {
+ &B_vec_vlogefp,
+};
+static const struct builtin *const O_vec_vmaddfp[1] = {
+ &B_vec_vmaddfp,
+};
+static const struct builtin *const O_vec_vmaxfp[1] = {
+ &B_vec_vmaxfp,
+};
+static const struct builtin *const O_vec_vmaxsb[3] = {
+ &B1_vec_vmaxsb,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+};
+static const struct builtin *const O_vec_vmaxsh[3] = {
+ &B1_vec_vmaxsh,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+};
+static const struct builtin *const O_vec_vmaxsw[3] = {
+ &B1_vec_vmaxsw,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+};
+static const struct builtin *const O_vec_vmaxub[3] = {
+ &B1_vec_vmaxub,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+};
+static const struct builtin *const O_vec_vmaxuh[3] = {
+ &B1_vec_vmaxuh,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+};
+static const struct builtin *const O_vec_vmaxuw[3] = {
+ &B1_vec_vmaxuw,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+};
+static const struct builtin *const O_vec_vmhaddshs[1] = {
+ &B_vec_vmhaddshs,
+};
+static const struct builtin *const O_vec_vmhraddshs[1] = {
+ &B_vec_vmhraddshs,
+};
+static const struct builtin *const O_vec_vminfp[1] = {
+ &B_vec_vminfp,
+};
+static const struct builtin *const O_vec_vminsb[3] = {
+ &B1_vec_vminsb,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+};
+static const struct builtin *const O_vec_vminsh[3] = {
+ &B1_vec_vminsh,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+};
+static const struct builtin *const O_vec_vminsw[3] = {
+ &B1_vec_vminsw,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+};
+static const struct builtin *const O_vec_vminub[3] = {
+ &B1_vec_vminub,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+};
+static const struct builtin *const O_vec_vminuh[3] = {
+ &B1_vec_vminuh,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+};
+static const struct builtin *const O_vec_vminuw[3] = {
+ &B1_vec_vminuw,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+};
+static const struct builtin *const O_vec_vmladduhm[4] = {
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+};
+static const struct builtin *const O_vec_vmrghb[3] = {
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghb,
+ &B3_vec_vmrghb,
+};
+static const struct builtin *const O_vec_vmrghh[4] = {
+ &B1_vec_vmrghh,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B4_vec_vmrghh,
+};
+static const struct builtin *const O_vec_vmrghw[4] = {
+ &B1_vec_vmrghw,
+ &B2_vec_vmrghw,
+ &B3_vec_vmrghw,
+ &B4_vec_vmrghw,
+};
+static const struct builtin *const O_vec_vmrglb[3] = {
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglb,
+ &B3_vec_vmrglb,
+};
+static const struct builtin *const O_vec_vmrglh[4] = {
+ &B1_vec_vmrglh,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B4_vec_vmrglh,
+};
+static const struct builtin *const O_vec_vmrglw[4] = {
+ &B1_vec_vmrglw,
+ &B2_vec_vmrglw,
+ &B3_vec_vmrglw,
+ &B4_vec_vmrglw,
+};
+static const struct builtin *const O_vec_vmsummbm[1] = {
+ &B_vec_vmsummbm,
+};
+static const struct builtin *const O_vec_vmsumshm[1] = {
+ &B_vec_vmsumshm,
+};
+static const struct builtin *const O_vec_vmsumshs[1] = {
+ &B_vec_vmsumshs,
+};
+static const struct builtin *const O_vec_vmsumubm[1] = {
+ &B_vec_vmsumubm,
+};
+static const struct builtin *const O_vec_vmsumuhm[1] = {
+ &B_vec_vmsumuhm,
+};
+static const struct builtin *const O_vec_vmsumuhs[1] = {
+ &B_vec_vmsumuhs,
+};
+static const struct builtin *const O_vec_vmulesb[1] = {
+ &B_vec_vmulesb,
+};
+static const struct builtin *const O_vec_vmulesh[1] = {
+ &B_vec_vmulesh,
+};
+static const struct builtin *const O_vec_vmuleub[1] = {
+ &B_vec_vmuleub,
+};
+static const struct builtin *const O_vec_vmuleuh[1] = {
+ &B_vec_vmuleuh,
+};
+static const struct builtin *const O_vec_vmulosb[1] = {
+ &B_vec_vmulosb,
+};
+static const struct builtin *const O_vec_vmulosh[1] = {
+ &B_vec_vmulosh,
+};
+static const struct builtin *const O_vec_vmuloub[1] = {
+ &B_vec_vmuloub,
+};
+static const struct builtin *const O_vec_vmulouh[1] = {
+ &B_vec_vmulouh,
+};
+static const struct builtin *const O_vec_vnmsubfp[1] = {
+ &B_vec_vnmsubfp,
+};
+static const struct builtin *const O_vec_vnor[10] = {
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+};
+static const struct builtin *const O_vec_vor[24] = {
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+};
+static const struct builtin *const O_vec_vperm[11] = {
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+};
+static const struct builtin *const O_vec_vpkpx[1] = {
+ &B_vec_vpkpx,
+};
+static const struct builtin *const O_vec_vpkshss[1] = {
+ &B_vec_vpkshss,
+};
+static const struct builtin *const O_vec_vpkshus[1] = {
+ &B_vec_vpkshus,
+};
+static const struct builtin *const O_vec_vpkswss[1] = {
+ &B_vec_vpkswss,
+};
+static const struct builtin *const O_vec_vpkswus[1] = {
+ &B_vec_vpkswus,
+};
+static const struct builtin *const O_vec_vpkuhum[3] = {
+ &B1_vec_vpkuhum,
+ &B2_vec_vpkuhum,
+ &B3_vec_vpkuhum,
+};
+static const struct builtin *const O_vec_vpkuhus[1] = {
+ &B_vec_vpkuhus,
+};
+static const struct builtin *const O_vec_vpkuwum[3] = {
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuwum,
+};
+static const struct builtin *const O_vec_vpkuwus[1] = {
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_vrefp[1] = {
+ &B_vec_vrefp,
+};
+static const struct builtin *const O_vec_vrfim[1] = {
+ &B_vec_vrfim,
+};
+static const struct builtin *const O_vec_vrfin[1] = {
+ &B_vec_vrfin,
+};
+static const struct builtin *const O_vec_vrfip[1] = {
+ &B_vec_vrfip,
+};
+static const struct builtin *const O_vec_vrfiz[1] = {
+ &B_vec_vrfiz,
+};
+static const struct builtin *const O_vec_vrlb[2] = {
+ &B1_vec_vrlb,
+ &B2_vec_vrlb,
+};
+static const struct builtin *const O_vec_vrlh[2] = {
+ &B1_vec_vrlh,
+ &B2_vec_vrlh,
+};
+static const struct builtin *const O_vec_vrlw[2] = {
+ &B1_vec_vrlw,
+ &B2_vec_vrlw,
+};
+static const struct builtin *const O_vec_vrsqrtefp[1] = {
+ &B_vec_vrsqrtefp,
+};
+static const struct builtin *const O_vec_vsel[20] = {
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+};
+static const struct builtin *const O_vec_vsl[30] = {
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+};
+static const struct builtin *const O_vec_vslb[2] = {
+ &B1_vec_vslb,
+ &B2_vec_vslb,
+};
+static const struct builtin *const O_vec_vsldoi[11] = {
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+};
+static const struct builtin *const O_vec_vslh[2] = {
+ &B1_vec_vslh,
+ &B2_vec_vslh,
+};
+static const struct builtin *const O_vec_vslo[16] = {
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+};
+static const struct builtin *const O_vec_vslw[2] = {
+ &B1_vec_vslw,
+ &B2_vec_vslw,
+};
+static const struct builtin *const O_vec_vspltb[3] = {
+ &B1_vec_vspltb,
+ &B2_vec_vspltb,
+ &B3_vec_vspltb,
+};
+static const struct builtin *const O_vec_vsplth[4] = {
+ &B1_vec_vsplth,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B4_vec_vsplth,
+};
+static const struct builtin *const O_vec_vspltisb[1] = {
+ &B_vec_vspltisb,
+};
+static const struct builtin *const O_vec_vspltish[1] = {
+ &B_vec_vspltish,
+};
+static const struct builtin *const O_vec_vspltisw[1] = {
+ &B_vec_vspltisw,
+};
+static const struct builtin *const O_vec_vspltw[4] = {
+ &B1_vec_vspltw,
+ &B2_vec_vspltw,
+ &B3_vec_vspltw,
+ &B4_vec_vspltw,
+};
+static const struct builtin *const O_vec_vsr[30] = {
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+};
+static const struct builtin *const O_vec_vsrab[2] = {
+ &B1_vec_vsrab,
+ &B2_vec_vsrab,
+};
+static const struct builtin *const O_vec_vsrah[2] = {
+ &B1_vec_vsrah,
+ &B2_vec_vsrah,
+};
+static const struct builtin *const O_vec_vsraw[2] = {
+ &B1_vec_vsraw,
+ &B2_vec_vsraw,
+};
+static const struct builtin *const O_vec_vsrb[2] = {
+ &B1_vec_vsrb,
+ &B2_vec_vsrb,
+};
+static const struct builtin *const O_vec_vsrh[2] = {
+ &B1_vec_vsrh,
+ &B2_vec_vsrh,
+};
+static const struct builtin *const O_vec_vsro[16] = {
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+};
+static const struct builtin *const O_vec_vsrw[2] = {
+ &B1_vec_vsrw,
+ &B2_vec_vsrw,
+};
+static const struct builtin *const O_vec_vsubcuw[1] = {
+ &B_vec_vsubcuw,
+};
+static const struct builtin *const O_vec_vsubfp[1] = {
+ &B_vec_vsubfp,
+};
+static const struct builtin *const O_vec_vsubsbs[3] = {
+ &B1_vec_vsubsbs,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+};
+static const struct builtin *const O_vec_vsubshs[3] = {
+ &B1_vec_vsubshs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+};
+static const struct builtin *const O_vec_vsubsws[3] = {
+ &B1_vec_vsubsws,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+};
+static const struct builtin *const O_vec_vsububm[6] = {
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+};
+static const struct builtin *const O_vec_vsububs[3] = {
+ &B1_vec_vsububs,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+};
+static const struct builtin *const O_vec_vsubuhm[6] = {
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+};
+static const struct builtin *const O_vec_vsubuhs[3] = {
+ &B1_vec_vsubuhs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+};
+static const struct builtin *const O_vec_vsubuwm[6] = {
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+};
+static const struct builtin *const O_vec_vsubuws[3] = {
+ &B1_vec_vsubuws,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+};
+static const struct builtin *const O_vec_vsum2sws[1] = {
+ &B_vec_vsum2sws,
+};
+static const struct builtin *const O_vec_vsum4sbs[1] = {
+ &B_vec_vsum4sbs,
+};
+static const struct builtin *const O_vec_vsum4shs[1] = {
+ &B_vec_vsum4shs,
+};
+static const struct builtin *const O_vec_vsum4ubs[1] = {
+ &B_vec_vsum4ubs,
+};
+static const struct builtin *const O_vec_vsumsws[1] = {
+ &B_vec_vsumsws,
+};
+static const struct builtin *const O_vec_vupkhpx[1] = {
+ &B_vec_vupkhpx,
+};
+static const struct builtin *const O_vec_vupkhsb[2] = {
+ &B1_vec_vupkhsb,
+ &B2_vec_vupkhsb,
+};
+static const struct builtin *const O_vec_vupkhsh[2] = {
+ &B1_vec_vupkhsh,
+ &B2_vec_vupkhsh,
+};
+static const struct builtin *const O_vec_vupklpx[1] = {
+ &B_vec_vupklpx,
+};
+static const struct builtin *const O_vec_vupklsb[2] = {
+ &B1_vec_vupklsb,
+ &B2_vec_vupklsb,
+};
+static const struct builtin *const O_vec_vupklsh[2] = {
+ &B1_vec_vupklsh,
+ &B2_vec_vupklsh,
+};
+static const struct builtin *const O_vec_vxor[24] = {
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+static const struct builtin *const O_vec_xor[24] = {
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+
+const struct overloadx Overload[] = {
+ { "vec_abs", 4, 1, O_vec_abs, O_UID(0) },
+ { "vec_abss", 3, 1, O_vec_abss, O_UID(1) },
+ { "vec_add", 19, 2, O_vec_add, O_UID(2) },
+ { "vec_addc", 1, 2, O_vec_addc, O_UID(3) },
+ { "vec_adds", 18, 2, O_vec_adds, O_UID(4) },
+ { "vec_all_eq", 23, 2, O_vec_all_eq, O_UID(5) },
+ { "vec_all_ge", 19, 2, O_vec_all_ge, O_UID(6) },
+ { "vec_all_gt", 19, 2, O_vec_all_gt, O_UID(7) },
+ { "vec_all_in", 1, 2, O_vec_all_in, O_UID(8) },
+ { "vec_all_le", 19, 2, O_vec_all_le, O_UID(9) },
+ { "vec_all_lt", 19, 2, O_vec_all_lt, O_UID(10) },
+ { "vec_all_nan", 1, 1, O_vec_all_nan, O_UID(11) },
+ { "vec_all_ne", 23, 2, O_vec_all_ne, O_UID(12) },
+ { "vec_all_nge", 1, 2, O_vec_all_nge, O_UID(13) },
+ { "vec_all_ngt", 1, 2, O_vec_all_ngt, O_UID(14) },
+ { "vec_all_nle", 1, 2, O_vec_all_nle, O_UID(15) },
+ { "vec_all_nlt", 1, 2, O_vec_all_nlt, O_UID(16) },
+ { "vec_all_numeric", 1, 1, O_vec_all_numeric, O_UID(17) },
+ { "vec_and", 24, 2, O_vec_and, O_UID(18) },
+ { "vec_andc", 24, 2, O_vec_andc, O_UID(19) },
+ { "vec_any_eq", 23, 2, O_vec_any_eq, O_UID(20) },
+ { "vec_any_ge", 19, 2, O_vec_any_ge, O_UID(21) },
+ { "vec_any_gt", 19, 2, O_vec_any_gt, O_UID(22) },
+ { "vec_any_le", 19, 2, O_vec_any_le, O_UID(23) },
+ { "vec_any_lt", 19, 2, O_vec_any_lt, O_UID(24) },
+ { "vec_any_nan", 1, 1, O_vec_any_nan, O_UID(25) },
+ { "vec_any_ne", 23, 2, O_vec_any_ne, O_UID(26) },
+ { "vec_any_nge", 1, 2, O_vec_any_nge, O_UID(27) },
+ { "vec_any_ngt", 1, 2, O_vec_any_ngt, O_UID(28) },
+ { "vec_any_nle", 1, 2, O_vec_any_nle, O_UID(29) },
+ { "vec_any_nlt", 1, 2, O_vec_any_nlt, O_UID(30) },
+ { "vec_any_numeric", 1, 1, O_vec_any_numeric, O_UID(31) },
+ { "vec_any_out", 1, 2, O_vec_any_out, O_UID(32) },
+ { "vec_avg", 6, 2, O_vec_avg, O_UID(33) },
+ { "vec_ceil", 1, 1, O_vec_ceil, O_UID(34) },
+ { "vec_cmpb", 1, 2, O_vec_cmpb, O_UID(35) },
+ { "vec_cmpeq", 7, 2, O_vec_cmpeq, O_UID(36) },
+ { "vec_cmpge", 1, 2, O_vec_cmpge, O_UID(37) },
+ { "vec_cmpgt", 7, 2, O_vec_cmpgt, O_UID(38) },
+ { "vec_cmple", 1, 2, O_vec_cmple, O_UID(39) },
+ { "vec_cmplt", 7, 2, O_vec_cmplt, O_UID(40) },
+ { "vec_ctf", 2, 2, O_vec_ctf, O_UID(41) },
+ { "vec_cts", 1, 2, O_vec_cts, O_UID(42) },
+ { "vec_ctu", 1, 2, O_vec_ctu, O_UID(43) },
+ { "vec_dss", 1, 1, O_vec_dss, O_UID(44) },
+ { "vec_dssall", 1, 0, O_vec_dssall, O_UID(45) },
+ { "vec_dst", 20, 3, O_vec_dst, O_UID(46) },
+ { "vec_dstst", 20, 3, O_vec_dstst, O_UID(47) },
+ { "vec_dststt", 20, 3, O_vec_dststt, O_UID(48) },
+ { "vec_dstt", 20, 3, O_vec_dstt, O_UID(49) },
+ { "vec_expte", 1, 1, O_vec_expte, O_UID(50) },
+ { "vec_floor", 1, 1, O_vec_floor, O_UID(51) },
+ { "vec_ld", 20, 2, O_vec_ld, O_UID(52) },
+ { "vec_lde", 9, 2, O_vec_lde, O_UID(53) },
+ { "vec_ldl", 20, 2, O_vec_ldl, O_UID(54) },
+ { "vec_loge", 1, 1, O_vec_loge, O_UID(55) },
+ { "vec_lvebx", 2, 2, O_vec_lvebx, O_UID(56) },
+ { "vec_lvehx", 2, 2, O_vec_lvehx, O_UID(57) },
+ { "vec_lvewx", 5, 2, O_vec_lvewx, O_UID(58) },
+ { "vec_lvsl", 9, 2, O_vec_lvsl, O_UID(59) },
+ { "vec_lvsr", 9, 2, O_vec_lvsr, O_UID(60) },
+ { "vec_lvx", 20, 2, O_vec_lvx, O_UID(61) },
+ { "vec_lvxl", 20, 2, O_vec_lvxl, O_UID(62) },
+ { "vec_madd", 1, 3, O_vec_madd, O_UID(63) },
+ { "vec_madds", 1, 3, O_vec_madds, O_UID(64) },
+ { "vec_max", 19, 2, O_vec_max, O_UID(65) },
+ { "vec_mergeh", 11, 2, O_vec_mergeh, O_UID(66) },
+ { "vec_mergel", 11, 2, O_vec_mergel, O_UID(67) },
+ { "vec_mfvscr", 1, 0, O_vec_mfvscr, O_UID(68) },
+ { "vec_min", 19, 2, O_vec_min, O_UID(69) },
+ { "vec_mladd", 4, 3, O_vec_mladd, O_UID(70) },
+ { "vec_mradds", 1, 3, O_vec_mradds, O_UID(71) },
+ { "vec_msum", 4, 3, O_vec_msum, O_UID(72) },
+ { "vec_msums", 2, 3, O_vec_msums, O_UID(73) },
+ { "vec_mtvscr", 10, 1, O_vec_mtvscr, O_UID(74) },
+ { "vec_mule", 4, 2, O_vec_mule, O_UID(75) },
+ { "vec_mulo", 4, 2, O_vec_mulo, O_UID(76) },
+ { "vec_nmsub", 1, 3, O_vec_nmsub, O_UID(77) },
+ { "vec_nor", 10, 2, O_vec_nor, O_UID(78) },
+ { "vec_or", 24, 2, O_vec_or, O_UID(79) },
+ { "vec_pack", 6, 2, O_vec_pack, O_UID(80) },
+ { "vec_packpx", 1, 2, O_vec_packpx, O_UID(81) },
+ { "vec_packs", 4, 2, O_vec_packs, O_UID(82) },
+ { "vec_packsu", 4, 2, O_vec_packsu, O_UID(83) },
+ { "vec_perm", 11, 3, O_vec_perm, O_UID(84) },
+ { "vec_re", 1, 1, O_vec_re, O_UID(85) },
+ { "vec_rl", 6, 2, O_vec_rl, O_UID(86) },
+ { "vec_round", 1, 1, O_vec_round, O_UID(87) },
+ { "vec_rsqrte", 1, 1, O_vec_rsqrte, O_UID(88) },
+ { "vec_sel", 20, 3, O_vec_sel, O_UID(89) },
+ { "vec_sl", 6, 2, O_vec_sl, O_UID(90) },
+ { "vec_sld", 11, 3, O_vec_sld, O_UID(91) },
+ { "vec_sll", 30, 2, O_vec_sll, O_UID(92) },
+ { "vec_slo", 16, 2, O_vec_slo, O_UID(93) },
+ { "vec_splat", 11, 2, O_vec_splat, O_UID(94) },
+ { "vec_splat_s16", 1, 1, O_vec_splat_s16, O_UID(95) },
+ { "vec_splat_s32", 1, 1, O_vec_splat_s32, O_UID(96) },
+ { "vec_splat_s8", 1, 1, O_vec_splat_s8, O_UID(97) },
+ { "vec_splat_u16", 1, 1, O_vec_splat_u16, O_UID(98) },
+ { "vec_splat_u32", 1, 1, O_vec_splat_u32, O_UID(99) },
+ { "vec_splat_u8", 1, 1, O_vec_splat_u8, O_UID(100) },
+ { "vec_sr", 6, 2, O_vec_sr, O_UID(101) },
+ { "vec_sra", 6, 2, O_vec_sra, O_UID(102) },
+ { "vec_srl", 30, 2, O_vec_srl, O_UID(103) },
+ { "vec_sro", 16, 2, O_vec_sro, O_UID(104) },
+ { "vec_st", 30, 3, O_vec_st, O_UID(105) },
+ { "vec_ste", 19, 3, O_vec_ste, O_UID(106) },
+ { "vec_stl", 30, 3, O_vec_stl, O_UID(107) },
+ { "vec_stvebx", 6, 3, O_vec_stvebx, O_UID(108) },
+ { "vec_stvehx", 4, 3, O_vec_stvehx, O_UID(109) },
+ { "vec_stvewx", 9, 3, O_vec_stvewx, O_UID(110) },
+ { "vec_stvx", 30, 3, O_vec_stvx, O_UID(111) },
+ { "vec_stvxl", 30, 3, O_vec_stvxl, O_UID(112) },
+ { "vec_sub", 19, 2, O_vec_sub, O_UID(113) },
+ { "vec_subc", 1, 2, O_vec_subc, O_UID(114) },
+ { "vec_subs", 18, 2, O_vec_subs, O_UID(115) },
+ { "vec_sum2s", 1, 2, O_vec_sum2s, O_UID(116) },
+ { "vec_sum4s", 3, 2, O_vec_sum4s, O_UID(117) },
+ { "vec_sums", 1, 2, O_vec_sums, O_UID(118) },
+ { "vec_trunc", 1, 1, O_vec_trunc, O_UID(119) },
+ { "vec_unpack2sh", 2, 2, O_vec_unpack2sh, O_UID(120) },
+ { "vec_unpack2sl", 2, 2, O_vec_unpack2sl, O_UID(121) },
+ { "vec_unpack2uh", 2, 2, O_vec_unpack2uh, O_UID(122) },
+ { "vec_unpack2ul", 2, 2, O_vec_unpack2ul, O_UID(123) },
+ { "vec_unpackh", 5, 1, O_vec_unpackh, O_UID(124) },
+ { "vec_unpackl", 5, 1, O_vec_unpackl, O_UID(125) },
+ { "vec_vaddcuw", 1, 2, O_vec_vaddcuw, O_UID(126) },
+ { "vec_vaddfp", 1, 2, O_vec_vaddfp, O_UID(127) },
+ { "vec_vaddsbs", 3, 2, O_vec_vaddsbs, O_UID(128) },
+ { "vec_vaddshs", 3, 2, O_vec_vaddshs, O_UID(129) },
+ { "vec_vaddsws", 3, 2, O_vec_vaddsws, O_UID(130) },
+ { "vec_vaddubm", 6, 2, O_vec_vaddubm, O_UID(131) },
+ { "vec_vaddubs", 3, 2, O_vec_vaddubs, O_UID(132) },
+ { "vec_vadduhm", 6, 2, O_vec_vadduhm, O_UID(133) },
+ { "vec_vadduhs", 3, 2, O_vec_vadduhs, O_UID(134) },
+ { "vec_vadduwm", 6, 2, O_vec_vadduwm, O_UID(135) },
+ { "vec_vadduws", 3, 2, O_vec_vadduws, O_UID(136) },
+ { "vec_vand", 24, 2, O_vec_vand, O_UID(137) },
+ { "vec_vandc", 24, 2, O_vec_vandc, O_UID(138) },
+ { "vec_vavgsb", 1, 2, O_vec_vavgsb, O_UID(139) },
+ { "vec_vavgsh", 1, 2, O_vec_vavgsh, O_UID(140) },
+ { "vec_vavgsw", 1, 2, O_vec_vavgsw, O_UID(141) },
+ { "vec_vavgub", 1, 2, O_vec_vavgub, O_UID(142) },
+ { "vec_vavguh", 1, 2, O_vec_vavguh, O_UID(143) },
+ { "vec_vavguw", 1, 2, O_vec_vavguw, O_UID(144) },
+ { "vec_vcfsx", 1, 2, O_vec_vcfsx, O_UID(145) },
+ { "vec_vcfux", 1, 2, O_vec_vcfux, O_UID(146) },
+ { "vec_vcmpbfp", 1, 2, O_vec_vcmpbfp, O_UID(147) },
+ { "vec_vcmpeqfp", 1, 2, O_vec_vcmpeqfp, O_UID(148) },
+ { "vec_vcmpequb", 2, 2, O_vec_vcmpequb, O_UID(149) },
+ { "vec_vcmpequh", 2, 2, O_vec_vcmpequh, O_UID(150) },
+ { "vec_vcmpequw", 2, 2, O_vec_vcmpequw, O_UID(151) },
+ { "vec_vcmpgefp", 1, 2, O_vec_vcmpgefp, O_UID(152) },
+ { "vec_vcmpgtfp", 1, 2, O_vec_vcmpgtfp, O_UID(153) },
+ { "vec_vcmpgtsb", 1, 2, O_vec_vcmpgtsb, O_UID(154) },
+ { "vec_vcmpgtsh", 1, 2, O_vec_vcmpgtsh, O_UID(155) },
+ { "vec_vcmpgtsw", 1, 2, O_vec_vcmpgtsw, O_UID(156) },
+ { "vec_vcmpgtub", 1, 2, O_vec_vcmpgtub, O_UID(157) },
+ { "vec_vcmpgtuh", 1, 2, O_vec_vcmpgtuh, O_UID(158) },
+ { "vec_vcmpgtuw", 1, 2, O_vec_vcmpgtuw, O_UID(159) },
+ { "vec_vctsxs", 1, 2, O_vec_vctsxs, O_UID(160) },
+ { "vec_vctuxs", 1, 2, O_vec_vctuxs, O_UID(161) },
+ { "vec_vexptefp", 1, 1, O_vec_vexptefp, O_UID(162) },
+ { "vec_vlogefp", 1, 1, O_vec_vlogefp, O_UID(163) },
+ { "vec_vmaddfp", 1, 3, O_vec_vmaddfp, O_UID(164) },
+ { "vec_vmaxfp", 1, 2, O_vec_vmaxfp, O_UID(165) },
+ { "vec_vmaxsb", 3, 2, O_vec_vmaxsb, O_UID(166) },
+ { "vec_vmaxsh", 3, 2, O_vec_vmaxsh, O_UID(167) },
+ { "vec_vmaxsw", 3, 2, O_vec_vmaxsw, O_UID(168) },
+ { "vec_vmaxub", 3, 2, O_vec_vmaxub, O_UID(169) },
+ { "vec_vmaxuh", 3, 2, O_vec_vmaxuh, O_UID(170) },
+ { "vec_vmaxuw", 3, 2, O_vec_vmaxuw, O_UID(171) },
+ { "vec_vmhaddshs", 1, 3, O_vec_vmhaddshs, O_UID(172) },
+ { "vec_vmhraddshs", 1, 3, O_vec_vmhraddshs, O_UID(173) },
+ { "vec_vminfp", 1, 2, O_vec_vminfp, O_UID(174) },
+ { "vec_vminsb", 3, 2, O_vec_vminsb, O_UID(175) },
+ { "vec_vminsh", 3, 2, O_vec_vminsh, O_UID(176) },
+ { "vec_vminsw", 3, 2, O_vec_vminsw, O_UID(177) },
+ { "vec_vminub", 3, 2, O_vec_vminub, O_UID(178) },
+ { "vec_vminuh", 3, 2, O_vec_vminuh, O_UID(179) },
+ { "vec_vminuw", 3, 2, O_vec_vminuw, O_UID(180) },
+ { "vec_vmladduhm", 4, 3, O_vec_vmladduhm, O_UID(181) },
+ { "vec_vmrghb", 3, 2, O_vec_vmrghb, O_UID(182) },
+ { "vec_vmrghh", 4, 2, O_vec_vmrghh, O_UID(183) },
+ { "vec_vmrghw", 4, 2, O_vec_vmrghw, O_UID(184) },
+ { "vec_vmrglb", 3, 2, O_vec_vmrglb, O_UID(185) },
+ { "vec_vmrglh", 4, 2, O_vec_vmrglh, O_UID(186) },
+ { "vec_vmrglw", 4, 2, O_vec_vmrglw, O_UID(187) },
+ { "vec_vmsummbm", 1, 3, O_vec_vmsummbm, O_UID(188) },
+ { "vec_vmsumshm", 1, 3, O_vec_vmsumshm, O_UID(189) },
+ { "vec_vmsumshs", 1, 3, O_vec_vmsumshs, O_UID(190) },
+ { "vec_vmsumubm", 1, 3, O_vec_vmsumubm, O_UID(191) },
+ { "vec_vmsumuhm", 1, 3, O_vec_vmsumuhm, O_UID(192) },
+ { "vec_vmsumuhs", 1, 3, O_vec_vmsumuhs, O_UID(193) },
+ { "vec_vmulesb", 1, 2, O_vec_vmulesb, O_UID(194) },
+ { "vec_vmulesh", 1, 2, O_vec_vmulesh, O_UID(195) },
+ { "vec_vmuleub", 1, 2, O_vec_vmuleub, O_UID(196) },
+ { "vec_vmuleuh", 1, 2, O_vec_vmuleuh, O_UID(197) },
+ { "vec_vmulosb", 1, 2, O_vec_vmulosb, O_UID(198) },
+ { "vec_vmulosh", 1, 2, O_vec_vmulosh, O_UID(199) },
+ { "vec_vmuloub", 1, 2, O_vec_vmuloub, O_UID(200) },
+ { "vec_vmulouh", 1, 2, O_vec_vmulouh, O_UID(201) },
+ { "vec_vnmsubfp", 1, 3, O_vec_vnmsubfp, O_UID(202) },
+ { "vec_vnor", 10, 2, O_vec_vnor, O_UID(203) },
+ { "vec_vor", 24, 2, O_vec_vor, O_UID(204) },
+ { "vec_vperm", 11, 3, O_vec_vperm, O_UID(205) },
+ { "vec_vpkpx", 1, 2, O_vec_vpkpx, O_UID(206) },
+ { "vec_vpkshss", 1, 2, O_vec_vpkshss, O_UID(207) },
+ { "vec_vpkshus", 1, 2, O_vec_vpkshus, O_UID(208) },
+ { "vec_vpkswss", 1, 2, O_vec_vpkswss, O_UID(209) },
+ { "vec_vpkswus", 1, 2, O_vec_vpkswus, O_UID(210) },
+ { "vec_vpkuhum", 3, 2, O_vec_vpkuhum, O_UID(211) },
+ { "vec_vpkuhus", 1, 2, O_vec_vpkuhus, O_UID(212) },
+ { "vec_vpkuwum", 3, 2, O_vec_vpkuwum, O_UID(213) },
+ { "vec_vpkuwus", 1, 2, O_vec_vpkuwus, O_UID(214) },
+ { "vec_vrefp", 1, 1, O_vec_vrefp, O_UID(215) },
+ { "vec_vrfim", 1, 1, O_vec_vrfim, O_UID(216) },
+ { "vec_vrfin", 1, 1, O_vec_vrfin, O_UID(217) },
+ { "vec_vrfip", 1, 1, O_vec_vrfip, O_UID(218) },
+ { "vec_vrfiz", 1, 1, O_vec_vrfiz, O_UID(219) },
+ { "vec_vrlb", 2, 2, O_vec_vrlb, O_UID(220) },
+ { "vec_vrlh", 2, 2, O_vec_vrlh, O_UID(221) },
+ { "vec_vrlw", 2, 2, O_vec_vrlw, O_UID(222) },
+ { "vec_vrsqrtefp", 1, 1, O_vec_vrsqrtefp, O_UID(223) },
+ { "vec_vsel", 20, 3, O_vec_vsel, O_UID(224) },
+ { "vec_vsl", 30, 2, O_vec_vsl, O_UID(225) },
+ { "vec_vslb", 2, 2, O_vec_vslb, O_UID(226) },
+ { "vec_vsldoi", 11, 3, O_vec_vsldoi, O_UID(227) },
+ { "vec_vslh", 2, 2, O_vec_vslh, O_UID(228) },
+ { "vec_vslo", 16, 2, O_vec_vslo, O_UID(229) },
+ { "vec_vslw", 2, 2, O_vec_vslw, O_UID(230) },
+ { "vec_vspltb", 3, 2, O_vec_vspltb, O_UID(231) },
+ { "vec_vsplth", 4, 2, O_vec_vsplth, O_UID(232) },
+ { "vec_vspltisb", 1, 1, O_vec_vspltisb, O_UID(233) },
+ { "vec_vspltish", 1, 1, O_vec_vspltish, O_UID(234) },
+ { "vec_vspltisw", 1, 1, O_vec_vspltisw, O_UID(235) },
+ { "vec_vspltw", 4, 2, O_vec_vspltw, O_UID(236) },
+ { "vec_vsr", 30, 2, O_vec_vsr, O_UID(237) },
+ { "vec_vsrab", 2, 2, O_vec_vsrab, O_UID(238) },
+ { "vec_vsrah", 2, 2, O_vec_vsrah, O_UID(239) },
+ { "vec_vsraw", 2, 2, O_vec_vsraw, O_UID(240) },
+ { "vec_vsrb", 2, 2, O_vec_vsrb, O_UID(241) },
+ { "vec_vsrh", 2, 2, O_vec_vsrh, O_UID(242) },
+ { "vec_vsro", 16, 2, O_vec_vsro, O_UID(243) },
+ { "vec_vsrw", 2, 2, O_vec_vsrw, O_UID(244) },
+ { "vec_vsubcuw", 1, 2, O_vec_vsubcuw, O_UID(245) },
+ { "vec_vsubfp", 1, 2, O_vec_vsubfp, O_UID(246) },
+ { "vec_vsubsbs", 3, 2, O_vec_vsubsbs, O_UID(247) },
+ { "vec_vsubshs", 3, 2, O_vec_vsubshs, O_UID(248) },
+ { "vec_vsubsws", 3, 2, O_vec_vsubsws, O_UID(249) },
+ { "vec_vsububm", 6, 2, O_vec_vsububm, O_UID(250) },
+ { "vec_vsububs", 3, 2, O_vec_vsububs, O_UID(251) },
+ { "vec_vsubuhm", 6, 2, O_vec_vsubuhm, O_UID(252) },
+ { "vec_vsubuhs", 3, 2, O_vec_vsubuhs, O_UID(253) },
+ { "vec_vsubuwm", 6, 2, O_vec_vsubuwm, O_UID(254) },
+ { "vec_vsubuws", 3, 2, O_vec_vsubuws, O_UID(255) },
+ { "vec_vsum2sws", 1, 2, O_vec_vsum2sws, O_UID(256) },
+ { "vec_vsum4sbs", 1, 2, O_vec_vsum4sbs, O_UID(257) },
+ { "vec_vsum4shs", 1, 2, O_vec_vsum4shs, O_UID(258) },
+ { "vec_vsum4ubs", 1, 2, O_vec_vsum4ubs, O_UID(259) },
+ { "vec_vsumsws", 1, 2, O_vec_vsumsws, O_UID(260) },
+ { "vec_vupkhpx", 1, 1, O_vec_vupkhpx, O_UID(261) },
+ { "vec_vupkhsb", 2, 1, O_vec_vupkhsb, O_UID(262) },
+ { "vec_vupkhsh", 2, 1, O_vec_vupkhsh, O_UID(263) },
+ { "vec_vupklpx", 1, 1, O_vec_vupklpx, O_UID(264) },
+ { "vec_vupklsb", 2, 1, O_vec_vupklsb, O_UID(265) },
+ { "vec_vupklsh", 2, 1, O_vec_vupklsh, O_UID(266) },
+ { "vec_vxor", 24, 2, O_vec_vxor, O_UID(267) },
+ { "vec_xor", 24, 2, O_vec_xor, O_UID(268) },
+ { NULL, 0, 0, NULL, 0 }
+};
+#define LAST_O_UID O_UID(269)
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/vec.ops b/gcc-4.2.1-5666.3/gcc/config/rs6000/vec.ops
new file mode 100644
index 000000000..5ef80a2d6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/vec.ops
@@ -0,0 +1,1025 @@
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+vec_abs vec_s8 = vec_s8 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 1 FALSE FALSE transform_vec_abs
+vec_abs vec_s16 = vec_s16 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 2 FALSE FALSE transform_vec_abs
+vec_abs vec_s32 = vec_s32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 3 FALSE FALSE transform_vec_abs
+vec_abs vec_f32 = vec_f32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 4 FALSE FALSE transform_vec_abs
+vec_abss vec_s8 = vec_s8 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 5 FALSE FALSE transform_vec_abs
+vec_abss vec_s16 = vec_s16 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 6 FALSE FALSE transform_vec_abs
+vec_abss vec_s32 = vec_s32 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 7 FALSE FALSE transform_vec_abs
+vec_cmplt vec_u8 vec_u8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtub FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_u16 vec_u16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuh FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_u32 vec_u32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuw FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s8 vec_s8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsb FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s16 vec_s16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsh FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s32 vec_s32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsw FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_f32 vec_f32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfp FALSE FALSE transform_vec_cmp_reverse
+vec_cmple vec_f32 vec_f32 = vec_b32 vec_cmple BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefp FALSE FALSE transform_vec_cmp_reverse
+vec_add vec_s8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s8 vec_b8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s16 vec_b16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s32 vec_b32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u8 vec_b8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u16 vec_b16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u32 vec_b32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s8 vec_b8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s16 vec_b16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s32 vec_b32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u8 vec_b8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u16 vec_b16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u32 vec_b32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s8 vec_b8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s16 vec_b16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s32 vec_b32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u8 vec_b8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u16 vec_b16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u32 vec_b32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s8 vec_b8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s16 vec_b16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s32 vec_b32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u8 vec_b8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u16 vec_b16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u32 vec_b32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_addc vec_u32 vec_u32 = vec_u32 vec_vaddcuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subc vec_u32 vec_u32 = vec_u32 vec_vsubcuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_u8 vec_u8 = vec_u16 vec_vmuloub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_u16 vec_u16 = vec_u32 vec_vmulouh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_s8 vec_s8 = vec_s16 vec_vmulosb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_s16 vec_s16 = vec_s32 vec_vmulosh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_u8 vec_u8 = vec_u16 vec_vmuleub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_u16 vec_u16 = vec_u32 vec_vmuleuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_s8 vec_s8 = vec_s16 vec_vmulesb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_s16 vec_s16 = vec_s32 vec_vmulesh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_u16 vec_u16 vec_u16 = vec_u16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_s16 vec_u16 vec_u16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_u16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_madds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mradds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhraddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_s8 vec_u8 vec_s32 = vec_s32 vec_vmsummbm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_u8 vec_u8 vec_u32 = vec_u32 vec_vmsumubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msums vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msums vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sums vec_s32 vec_s32 = vec_s32 vec_vsumsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum2s vec_s32 vec_s32 = vec_s32 vec_vsum2sws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_s8 vec_s32 = vec_s32 vec_vsum4sbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_u8 vec_u32 = vec_u32 vec_vsum4ubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_s16 vec_s32 = vec_s32 vec_vsum4shs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s8 vec_s8 = vec_s8 vec_vavgsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s16 vec_s16 = vec_s16 vec_vavgsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u8 vec_u8 = vec_u8 vec_vavgub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u16 vec_u16 = vec_u16 vec_vavguh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s32 vec_s32 = vec_s32 vec_vavgsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u32 vec_u32 = vec_u32 vec_vavguw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s8 vec_b8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u8 vec_b8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_b8 = vec_b8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s16 vec_b16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u16 vec_b16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_b16 = vec_b16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s32 vec_b32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u32 vec_b32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_b32 = vec_b32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_f32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_f32 vec_b32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s8 vec_b8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u8 vec_b8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_b8 = vec_b8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s16 vec_b16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u16 vec_b16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_b16 = vec_b16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s32 vec_b32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u32 vec_b32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_b32 = vec_b32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_f32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_f32 vec_b32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s8 vec_b8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u8 vec_b8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_b8 = vec_b8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s16 vec_b16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u16 vec_b16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_b16 = vec_b16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s32 vec_b32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u32 vec_b32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_b32 = vec_b32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_f32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_f32 vec_b32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s8 vec_b8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u8 vec_b8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_b8 = vec_b8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s16 vec_b16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u16 vec_b16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_b16 = vec_b16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s32 vec_b32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u32 vec_b32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_b32 = vec_b32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_f32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_f32 vec_b32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u8 vec_u8 = vec_u8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s8 vec_s8 = vec_s8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b8 vec_b8 = vec_b8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u16 vec_u16 = vec_u16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s16 vec_s16 = vec_s16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b16 vec_b16 = vec_b16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u32 vec_u32 = vec_u32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s32 vec_s32 = vec_s32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b32 vec_b32 = vec_b32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_f32 vec_f32 = vec_f32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u8 vec_u8 = vec_u8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u16 vec_u16 = vec_u16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u32 vec_u32 = vec_u32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s8 vec_u8 = vec_s8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s16 vec_u16 = vec_s16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s32 vec_u32 = vec_s32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u8 vec_u8 = vec_u8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u16 vec_u16 = vec_u16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u32 vec_u32 = vec_u32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s8 vec_u8 = vec_s8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s16 vec_u16 = vec_s16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s32 vec_u32 = vec_s32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u8 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u8 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u8 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u8 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u8 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u8 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u8 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u8 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u8 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u8 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u16 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u16 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u16 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u16 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u16 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u16 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u16 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u16 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u16 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u16 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u32 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u32 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u32 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u32 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u32 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u32 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u32 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u32 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u32 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u32 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u8 vec_u8 = vec_u8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u16 vec_u16 = vec_u16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u32 vec_u32 = vec_u32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s8 vec_u8 = vec_s8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s16 vec_u16 = vec_s16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s32 vec_u32 = vec_s32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u8 vec_u8 = vec_u8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u16 vec_u16 = vec_u16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u32 vec_u32 = vec_u32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s8 vec_u8 = vec_s8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s16 vec_u16 = vec_s16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s32 vec_u32 = vec_s32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u8 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u8 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u8 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u8 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u8 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u8 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u8 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u8 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u8 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u8 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u16 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u16 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u16 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u16 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u16 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u16 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u16 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u16 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u16 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u16 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u32 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u32 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u32 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u32 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u32 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u32 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u32 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u32 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u32 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u32 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u8 vec_u8 = vec_b8 vec_vcmpgtub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u16 vec_u16 = vec_b16 vec_vcmpgtuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u32 vec_u32 = vec_b32 vec_vcmpgtuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s8 vec_s8 = vec_b8 vec_vcmpgtsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s16 vec_s16 = vec_b16 vec_vcmpgtsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s32 vec_s32 = vec_b32 vec_vcmpgtsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u8 vec_u8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u16 vec_u16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u32 vec_u32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s8 vec_s8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s16 vec_s16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s32 vec_s32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b8 vec_b8 vec_b8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b8 vec_b8 vec_u8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u8 vec_u8 vec_u8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u8 vec_u8 vec_b8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s8 vec_s8 vec_u8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s8 vec_s8 vec_b8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b16 vec_b16 vec_b16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b16 vec_b16 vec_u16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u16 vec_u16 vec_u16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u16 vec_u16 vec_b16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s16 vec_s16 vec_u16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s16 vec_s16 vec_b16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b32 vec_b32 vec_b32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b32 vec_b32 vec_u32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u32 vec_u32 vec_u32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u32 vec_u32 vec_b32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s32 vec_s32 vec_u32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s32 vec_s32 vec_b32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_f32 vec_f32 vec_b32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_f32 vec_f32 vec_u32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_u16 vec_u16 = vec_u8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_u32 vec_u32 = vec_u16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_s16 vec_s16 = vec_s8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_s32 vec_s32 = vec_s16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_b16 vec_b16 = vec_b8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_b32 vec_b32 = vec_b16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_s16 vec_s16 = vec_s8 vec_vpkshss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_s32 vec_s32 = vec_s16 vec_vpkswss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_s16 vec_s16 = vec_u8 vec_vpkshus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_s32 vec_s32 = vec_u16 vec_vpkswus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packpx vec_u32 vec_u32 = vec_p16 vec_vpkpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_s8 = vec_s16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_s16 = vec_s32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_b8 = vec_b16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_b16 = vec_b32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_p16 = vec_u32 vec_vupkhpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_s8 = vec_s16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_s16 = vec_s32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_b8 = vec_b16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_b16 = vec_b32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_p16 = vec_u32 vec_vupklpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u8 vec_u8 = vec_u8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u16 vec_u16 = vec_u16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u32 vec_u32 = vec_u32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s8 vec_s8 = vec_s8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s16 vec_s16 = vec_s16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s32 vec_s32 = vec_s32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_f32 vec_f32 = vec_f32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_p16 vec_p16 = vec_p16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b8 vec_b8 = vec_b8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b16 vec_b16 = vec_b16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b32 vec_b32 = vec_b32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpack2uh vec_u8 vec_u8 = vec_u16 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb
+vec_unpack2uh vec_u16 vec_u16 = vec_u32 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh
+vec_unpack2sh vec_u8 vec_u8 = vec_s16 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb
+vec_unpack2sh vec_u16 vec_u16 = vec_s32 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh
+vec_mergel vec_u8 vec_u8 = vec_u8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_u16 vec_u16 = vec_u16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_u32 vec_u32 = vec_u32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s8 vec_s8 = vec_s8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s16 vec_s16 = vec_s16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s32 vec_s32 = vec_s32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_f32 vec_f32 = vec_f32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_p16 vec_p16 = vec_p16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b8 vec_b8 = vec_b8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b16 vec_b16 = vec_b16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b32 vec_b32 = vec_b32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpack2ul vec_u8 vec_u8 = vec_u16 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb
+vec_unpack2ul vec_u16 vec_u16 = vec_u32 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh
+vec_unpack2sl vec_u8 vec_u8 = vec_s16 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb
+vec_unpack2sl vec_u16 vec_u16 = vec_s32 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh
+vec_splat vec_u8 immed_u5 = vec_u8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_u16 immed_u5 = vec_u16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_u32 immed_u5 = vec_u32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s8 immed_u5 = vec_s8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s16 immed_u5 = vec_s16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s32 immed_u5 = vec_s32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b8 immed_u5 = vec_b8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b16 immed_u5 = vec_b16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b32 immed_u5 = vec_b32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_p16 immed_u5 = vec_p16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_f32 immed_u5 = vec_f32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s8 immed_s5 = vec_s8 vec_vspltisb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s16 immed_s5 = vec_s16 vec_vspltish BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s32 immed_s5 = vec_s32 vec_vspltisw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_u8 immed_s5 = vec_u8 vec_splat_u8 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisb
+vec_splat_u16 immed_s5 = vec_u16 vec_splat_u16 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltish
+vec_splat_u32 immed_s5 = vec_u32 vec_splat_u32 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisw
+vec_perm vec_u8 vec_u8 vec_u8 = vec_u8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_u16 vec_u16 vec_u8 = vec_u16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_u32 vec_u32 vec_u8 = vec_u32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s8 vec_s8 vec_u8 = vec_s8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s16 vec_s16 vec_u8 = vec_s16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s32 vec_s32 vec_u8 = vec_s32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b8 vec_b8 vec_u8 = vec_b8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b16 vec_b16 vec_u8 = vec_b16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b32 vec_b32 vec_u8 = vec_b32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_p16 vec_p16 vec_u8 = vec_p16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_f32 vec_f32 vec_u8 = vec_f32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u8 vec_u8 immed_u4 = vec_u8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u16 vec_u16 immed_u4 = vec_u16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u32 vec_u32 immed_u4 = vec_u32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s8 vec_s8 immed_u4 = vec_s8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s16 vec_s16 immed_u4 = vec_s16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s32 vec_s32 immed_u4 = vec_s32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_p16 vec_p16 immed_u4 = vec_p16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_f32 vec_f32 immed_u4 = vec_f32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b8 vec_b8 immed_u4 = vec_b8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b16 vec_b16 immed_u4 = vec_b16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b32 vec_b32 immed_u4 = vec_b32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u8 vec_u8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u16 vec_u8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u32 vec_u8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s8 vec_u8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s16 vec_u8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s32 vec_u8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_p16 vec_u8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u8 vec_s8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u16 vec_s8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u32 vec_s8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s8 vec_s8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s16 vec_s8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s32 vec_s8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_p16 vec_s8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_f32 vec_u8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_f32 vec_s8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u8 vec_u8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u16 vec_u8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u32 vec_u8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s8 vec_u8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s16 vec_u8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s32 vec_u8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_p16 vec_u8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u8 vec_s8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u16 vec_s8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u32 vec_s8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s8 vec_s8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s16 vec_s8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s32 vec_s8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_p16 vec_s8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_f32 vec_u8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_f32 vec_s8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u8 vec_b8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s8 vec_b8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u16 vec_b16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s16 vec_b16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u32 vec_b32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s32 vec_b32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u8 vec_b8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s8 vec_b8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u16 vec_b16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s16 vec_b16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u32 vec_b32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s32 vec_b32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_lde int const_unsigned_char_ptr = vec_u8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE
+vec_lde int const_unsigned_short_ptr = vec_u16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE
+vec_lde int const_unsigned_int_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_unsigned_long_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_signed_char_ptr = vec_s8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE
+vec_lde int const_short_ptr = vec_s16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE
+vec_lde int const_int_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_long_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_float_ptr = vec_f32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_ld int const_unsigned_char_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_short_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_int_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_long_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_signed_char_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_short_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_int_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_long_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_float_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_char_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_short_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_int_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_long_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_signed_char_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_short_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_int_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_long_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_float_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ld int const_vec_u8_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_u16_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_u32_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s8_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s16_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s32_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_p16_ptr = vec_p16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b8_ptr = vec_b8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b16_ptr = vec_b16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b32_ptr = vec_b32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_f32_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ldl int const_vec_u8_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_u16_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_u32_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s8_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s16_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s32_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_p16_ptr = vec_p16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b8_ptr = vec_b8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b16_ptr = vec_b16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b32_ptr = vec_b32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_f32_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ste vec_u8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_f32 int float_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_p16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_p16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b16 int unsigned_short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b16 int short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_st vec_u8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_f32 int float_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_stl vec_u8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_f32 int float_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_st vec_u8 int vec_u8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u16 int vec_u16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int vec_u32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s8 int vec_s8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s16 int vec_s16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int vec_s32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int vec_b8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int vec_b16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int vec_b32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int vec_p16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_f32 int vec_f32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_stl vec_u8 int vec_u8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u16 int vec_u16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int vec_u32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s8 int vec_s8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s16 int vec_s16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int vec_s32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int vec_b8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int vec_b16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int vec_b32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int vec_p16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_f32 int vec_f32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_lvsl int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_signed_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_float_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsr int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_signed_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_float_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_mfvscr = volatile_vec_u16 vec_mfvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_p16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_dst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_float_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dstt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_float_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_float_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dststt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_float_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dstt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dststt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dss immed_u2 = volatile_void_load_op vec_dss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_dssall = volatile_void_load_op vec_dssall BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_f32 vec_f32 = vec_f32 vec_vaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_f32 vec_f32 = vec_f32 vec_vsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_madd vec_f32 vec_f32 vec_f32 = vec_f32 vec_vmaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nmsub vec_f32 vec_f32 vec_f32 = vec_f32 vec_vnmsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_f32 vec_f32 = vec_b32 vec_vcmpgtfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpge vec_f32 vec_f32 = vec_b32 vec_vcmpgefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_f32 vec_f32 = vec_b32 vec_vcmpeqfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpb vec_f32 vec_f32 = vec_s32 vec_vcmpbfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_f32 vec_f32 = vec_f32 vec_vmaxfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_f32 vec_f32 = vec_f32 vec_vminfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_re vec_f32 = vec_f32 vec_vrefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rsqrte vec_f32 = vec_f32 vec_vrsqrtefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_loge vec_f32 = vec_f32 vec_vlogefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_expte vec_f32 = vec_f32 vec_vexptefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_trunc vec_f32 = vec_f32 vec_vrfiz BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_round vec_f32 = vec_f32 vec_vrfin BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ceil vec_f32 = vec_f32 vec_vrfip BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_floor vec_f32 = vec_f32 vec_vrfim BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctf vec_u32 immed_u5 = vec_f32 vec_vcfux BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctf vec_s32 immed_u5 = vec_f32 vec_vcfsx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctu vec_f32 immed_u5 = vec_u32 vec_vctuxs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cts vec_f32 immed_u5 = vec_s32 vec_vctsxs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_all_gt vec_u8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_u8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_b8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_u8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_u8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_b8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_u8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_u8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_b8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_u8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_u8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_b8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_s8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_s8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_b8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_s8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_s8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_b8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_s8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_s8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_b8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_s8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_s8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_b8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_u8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_u8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_b8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_u8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_u8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_b8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_u8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_u8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_b8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_u8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_u8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_b8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_s8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_s8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_b8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_s8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_s8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_b8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_s8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_s8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_b8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_s8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_s8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_b8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_u16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_u16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_b16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_u16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_u16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_b16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_u16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_u16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_b16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_u16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_u16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_b16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_s16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_s16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_b16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_s16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_s16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_b16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_s16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_s16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_b16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_s16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_s16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_b16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_u16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_u16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_b16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_u16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_u16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_b16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_u16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_u16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_b16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_u16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_u16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_b16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_s16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_s16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_b16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_s16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_s16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_b16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_s16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_s16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_b16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_s16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_s16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_b16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_u32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_u32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_b32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_u32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_u32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_b32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_u32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_u32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_b32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_u32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_u32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_b32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_s32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_gt vec_s32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_gt vec_b32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_s32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_s32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_b32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_s32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_s32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_b32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_s32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_s32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_b32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_u32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_u32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_b32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_u32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_u32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_b32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_u32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_u32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_b32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_u32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_u32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_b32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_s32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_s32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_b32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_s32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_s32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_b32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_s32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_s32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_b32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_s32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_s32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_b32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_eq vec_u8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_u8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_u8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_u8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_u8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_u8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_u8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_u8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_s8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_s8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_s8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_s8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_s8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_s8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_s8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_s8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_u16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_u16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_p16 vec_p16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_u16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_u16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_p16 vec_p16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_u16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_u16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_p16 vec_p16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_u16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_u16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_p16 vec_p16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_s16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_s16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_s16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_s16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_s16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_s16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_s16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_s16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_u32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_u32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_u32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_u32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_u32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_u32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_u32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_u32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_s32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_s32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_s32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_s32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_s32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_s32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_s32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_s32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_gt vec_f32 vec_f32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_ngt vec_f32 vec_f32 = cc26t vec_all_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_ngt vec_f32 vec_f32 = cc24f vec_any_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_gt vec_f32 vec_f32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_lt vec_f32 vec_f32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_nlt vec_f32 vec_f32 = cc26tr vec_all_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_nlt vec_f32 vec_f32 = cc24fr vec_any_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_lt vec_f32 vec_f32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_ge vec_f32 vec_f32 = cc24t vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_nge vec_f32 vec_f32 = cc26t vec_all_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_nge vec_f32 vec_f32 = cc24f vec_any_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_ge vec_f32 vec_f32 = cc26f vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_le vec_f32 vec_f32 = cc24tr vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_nle vec_f32 vec_f32 = cc26tr vec_all_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_nle vec_f32 vec_f32 = cc24fr vec_any_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_le vec_f32 vec_f32 = cc26fr vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_eq vec_f32 vec_f32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_ne vec_f32 vec_f32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_ne vec_f32 vec_f32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_eq vec_f32 vec_f32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_numeric vec_f32 = cc24td vec_all_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_nan vec_f32 = cc26td vec_all_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_nan vec_f32 = cc24fd vec_any_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_numeric vec_f32 = cc26fd vec_any_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_in vec_f32 vec_f32 = cc26t vec_all_in BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD
+vec_any_out vec_f32 vec_f32 = cc26f vec_any_out BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin b/gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin
new file mode 100644
index 000000000..033ab6bf5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin
@@ -0,0 +1,4 @@
+host-ppc-darwin.o : $(srcdir)/config/rs6000/host-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ config/host-darwin.h $(DIAGNOSTIC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
diff --git a/gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin64 b/gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin64
new file mode 100644
index 000000000..3cb423db3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rs6000/x-darwin64
@@ -0,0 +1,4 @@
+host-ppc64-darwin.o : $(srcdir)/config/rs6000/host-ppc64-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ config/host-darwin.h $(DIAGNOSTIC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
diff --git a/gcc-4.2.1-5666.3/gcc/config/rtems.h b/gcc-4.2.1-5666.3/gcc/config/rtems.h
new file mode 100644
index 000000000..546c1da75
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/rtems.h
@@ -0,0 +1,44 @@
+/* Configuration common to all targets running RTEMS.
+ Copyright (C) 2000, 2002, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* The system headers under RTEMS are C++-aware. */
+#define NO_IMPLICIT_EXTERN_C
+
+/*
+ * Dummy start/end specification to let linker work as
+ * needed by autoconf scripts using this compiler.
+ */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+
+/*
+ * Some targets do not set up LIB_SPECS, override it, here.
+ */
+#define STD_LIB_SPEC "%{!shared:%{g*:-lg} %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}"
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!qrtems: " STD_LIB_SPEC "} " \
+"%{!nostdlib: %{qrtems: --start-group \
+ %{!qrtems_debug: -lrtemsbsp -lrtemscpu} \
+ %{qrtems_debug: -lrtemsbsp_g -lrtemscpu_g} \
+ -lc -lgcc --end-group %{!qnolinkcmds: -T linkcmds%s}}}"
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/README b/gcc-4.2.1-5666.3/gcc/config/soft-fp/README
new file mode 100644
index 000000000..870025cc5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/README
@@ -0,0 +1,4 @@
+Except for t-softfp, the files in this directory are part of the GNU C
+Library, not part of GCC. As described at
+<http://gcc.gnu.org/codingconventions.html>, changes should be made to
+the GNU C Library and the changed files then imported into GCC.
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/adddf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/adddf3.c
new file mode 100644
index 000000000..24c03db0a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/adddf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a + b
+ Copyright (C) 1997,1999, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __adddf3(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R);
+ DFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_D(A, a);
+ FP_UNPACK_SEMIRAW_D(B, b);
+ FP_ADD_D(R, A, B);
+ FP_PACK_SEMIRAW_D(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/addsf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/addsf3.c
new file mode 100644
index 000000000..b86991ee5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/addsf3.c
@@ -0,0 +1,50 @@
+/* Software floating-point emulation.
+ Return a + b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __addsf3(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B); FP_DECL_S(R);
+ SFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_S(A, a);
+ FP_UNPACK_SEMIRAW_S(B, b);
+ FP_ADD_S(R, A, B);
+ FP_PACK_SEMIRAW_S(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/addtf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/addtf3.c
new file mode 100644
index 000000000..49b67f0ba
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/addtf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a + b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __addtf3(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(R);
+ TFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(A, a);
+ FP_UNPACK_SEMIRAW_Q(B, b);
+ FP_ADD_Q(R, A, B);
+ FP_PACK_SEMIRAW_Q(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/divdf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/divdf3.c
new file mode 100644
index 000000000..c3bb0d247
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/divdf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a / b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __divdf3(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R);
+ DFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_D(A, a);
+ FP_UNPACK_D(B, b);
+ FP_DIV_D(R, A, B);
+ FP_PACK_D(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/divsf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/divsf3.c
new file mode 100644
index 000000000..176bb3c2c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/divsf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a / b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __divsf3(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B); FP_DECL_S(R);
+ SFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_S(A, a);
+ FP_UNPACK_S(B, b);
+ FP_DIV_S(R, A, B);
+ FP_PACK_S(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/divtf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/divtf3.c
new file mode 100644
index 000000000..916fbfe97
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/divtf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a / b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __divtf3(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(R);
+ TFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_Q(A, a);
+ FP_UNPACK_Q(B, b);
+ FP_DIV_Q(R, A, B);
+ FP_PACK_Q(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/double.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/double.h
new file mode 100644
index 000000000..b012d9d51
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/double.h
@@ -0,0 +1,264 @@
+/* Software floating-point emulation.
+ Definitions for IEEE Double Precision
+ Copyright (C) 1997,1998,1999,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#if _FP_W_TYPE_SIZE < 32
+#error "Here's a nickel kid. Go buy yourself a real computer."
+#endif
+
+#if _FP_W_TYPE_SIZE < 64
+#define _FP_FRACTBITS_D (2 * _FP_W_TYPE_SIZE)
+#else
+#define _FP_FRACTBITS_D _FP_W_TYPE_SIZE
+#endif
+
+#define _FP_FRACBITS_D 53
+#define _FP_FRACXBITS_D (_FP_FRACTBITS_D - _FP_FRACBITS_D)
+#define _FP_WFRACBITS_D (_FP_WORKBITS + _FP_FRACBITS_D)
+#define _FP_WFRACXBITS_D (_FP_FRACTBITS_D - _FP_WFRACBITS_D)
+#define _FP_EXPBITS_D 11
+#define _FP_EXPBIAS_D 1023
+#define _FP_EXPMAX_D 2047
+
+#define _FP_QNANBIT_D \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-2) % _FP_W_TYPE_SIZE)
+#define _FP_QNANBIT_SH_D \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-2+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
+#define _FP_IMPLBIT_D \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-1) % _FP_W_TYPE_SIZE)
+#define _FP_IMPLBIT_SH_D \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_D-1+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
+#define _FP_OVERFLOW_D \
+ ((_FP_W_TYPE)1 << _FP_WFRACBITS_D % _FP_W_TYPE_SIZE)
+
+typedef float DFtype __attribute__((mode(DF)));
+
+#if _FP_W_TYPE_SIZE < 64
+
+union _FP_UNION_D
+{
+ DFtype flt;
+ struct {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned sign : 1;
+ unsigned exp : _FP_EXPBITS_D;
+ unsigned frac1 : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0) - _FP_W_TYPE_SIZE;
+ unsigned frac0 : _FP_W_TYPE_SIZE;
+#else
+ unsigned frac0 : _FP_W_TYPE_SIZE;
+ unsigned frac1 : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0) - _FP_W_TYPE_SIZE;
+ unsigned exp : _FP_EXPBITS_D;
+ unsigned sign : 1;
+#endif
+ } bits __attribute__((packed));
+};
+
+#define FP_DECL_D(X) _FP_DECL(2,X)
+#define FP_UNPACK_RAW_D(X,val) _FP_UNPACK_RAW_2(D,X,val)
+#define FP_UNPACK_RAW_DP(X,val) _FP_UNPACK_RAW_2_P(D,X,val)
+#define FP_PACK_RAW_D(val,X) _FP_PACK_RAW_2(D,val,X)
+#define FP_PACK_RAW_DP(val,X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_2_P(D,val,X); \
+ } while (0)
+
+#define FP_UNPACK_D(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2(D,X,val); \
+ _FP_UNPACK_CANONICAL(D,2,X); \
+ } while (0)
+
+#define FP_UNPACK_DP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2_P(D,X,val); \
+ _FP_UNPACK_CANONICAL(D,2,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_D(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2(D,X,val); \
+ _FP_UNPACK_SEMIRAW(D,2,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_DP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2_P(D,X,val); \
+ _FP_UNPACK_SEMIRAW(D,2,X); \
+ } while (0)
+
+#define FP_PACK_D(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(D,2,X); \
+ _FP_PACK_RAW_2(D,val,X); \
+ } while (0)
+
+#define FP_PACK_DP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(D,2,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_2_P(D,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_D(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(D,2,X); \
+ _FP_PACK_RAW_2(D,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_DP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(D,2,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_2_P(D,val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_D(X) _FP_ISSIGNAN(D,2,X)
+#define FP_NEG_D(R,X) _FP_NEG(D,2,R,X)
+#define FP_ADD_D(R,X,Y) _FP_ADD(D,2,R,X,Y)
+#define FP_SUB_D(R,X,Y) _FP_SUB(D,2,R,X,Y)
+#define FP_MUL_D(R,X,Y) _FP_MUL(D,2,R,X,Y)
+#define FP_DIV_D(R,X,Y) _FP_DIV(D,2,R,X,Y)
+#define FP_SQRT_D(R,X) _FP_SQRT(D,2,R,X)
+#define _FP_SQRT_MEAT_D(R,S,T,X,Q) _FP_SQRT_MEAT_2(R,S,T,X,Q)
+
+#define FP_CMP_D(r,X,Y,un) _FP_CMP(D,2,r,X,Y,un)
+#define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,2,r,X,Y)
+#define FP_CMP_UNORD_D(r,X,Y) _FP_CMP_UNORD(D,2,r,X,Y)
+
+#define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,2,r,X,rsz,rsg)
+#define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,2,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_D(X) _FP_FRAC_HIGH_2(X)
+#define _FP_FRAC_HIGH_RAW_D(X) _FP_FRAC_HIGH_2(X)
+
+#else
+
+union _FP_UNION_D
+{
+ DFtype flt;
+ struct {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned sign : 1;
+ unsigned exp : _FP_EXPBITS_D;
+ _FP_W_TYPE frac : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0);
+#else
+ _FP_W_TYPE frac : _FP_FRACBITS_D - (_FP_IMPLBIT_D != 0);
+ unsigned exp : _FP_EXPBITS_D;
+ unsigned sign : 1;
+#endif
+ } bits __attribute__((packed));
+};
+
+#define FP_DECL_D(X) _FP_DECL(1,X)
+#define FP_UNPACK_RAW_D(X,val) _FP_UNPACK_RAW_1(D,X,val)
+#define FP_UNPACK_RAW_DP(X,val) _FP_UNPACK_RAW_1_P(D,X,val)
+#define FP_PACK_RAW_D(val,X) _FP_PACK_RAW_1(D,val,X)
+#define FP_PACK_RAW_DP(val,X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_1_P(D,val,X); \
+ } while (0)
+
+#define FP_UNPACK_D(X,val) \
+ do { \
+ _FP_UNPACK_RAW_1(D,X,val); \
+ _FP_UNPACK_CANONICAL(D,1,X); \
+ } while (0)
+
+#define FP_UNPACK_DP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_1_P(D,X,val); \
+ _FP_UNPACK_CANONICAL(D,1,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_D(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2(1,X,val); \
+ _FP_UNPACK_SEMIRAW(D,1,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_DP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2_P(1,X,val); \
+ _FP_UNPACK_SEMIRAW(D,1,X); \
+ } while (0)
+
+#define FP_PACK_D(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(D,1,X); \
+ _FP_PACK_RAW_1(D,val,X); \
+ } while (0)
+
+#define FP_PACK_DP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(D,1,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_1_P(D,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_D(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(D,1,X); \
+ _FP_PACK_RAW_1(D,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_DP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(D,1,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_1_P(D,val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_D(X) _FP_ISSIGNAN(D,1,X)
+#define FP_NEG_D(R,X) _FP_NEG(D,1,R,X)
+#define FP_ADD_D(R,X,Y) _FP_ADD(D,1,R,X,Y)
+#define FP_SUB_D(R,X,Y) _FP_SUB(D,1,R,X,Y)
+#define FP_MUL_D(R,X,Y) _FP_MUL(D,1,R,X,Y)
+#define FP_DIV_D(R,X,Y) _FP_DIV(D,1,R,X,Y)
+#define FP_SQRT_D(R,X) _FP_SQRT(D,1,R,X)
+#define _FP_SQRT_MEAT_D(R,S,T,X,Q) _FP_SQRT_MEAT_1(R,S,T,X,Q)
+
+/* The implementation of _FP_MUL_D and _FP_DIV_D should be chosen by
+ the target machine. */
+
+#define FP_CMP_D(r,X,Y,un) _FP_CMP(D,1,r,X,Y,un)
+#define FP_CMP_EQ_D(r,X,Y) _FP_CMP_EQ(D,1,r,X,Y)
+#define FP_CMP_UNORD_D(r,X,Y) _FP_CMP_UNORD(D,1,r,X,Y)
+
+#define FP_TO_INT_D(r,X,rsz,rsg) _FP_TO_INT(D,1,r,X,rsz,rsg)
+#define FP_FROM_INT_D(X,r,rs,rt) _FP_FROM_INT(D,1,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_D(X) _FP_FRAC_HIGH_1(X)
+#define _FP_FRAC_HIGH_RAW_D(X) _FP_FRAC_HIGH_1(X)
+
+#endif /* W_TYPE_SIZE < 64 */
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqdf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqdf2.c
new file mode 100644
index 000000000..efa769e98
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqdf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 otherwise
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+int __eqdf2(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ int r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_EQ_D(r, A, B);
+ if (r && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__eqdf2, __nedf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqsf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqsf2.c
new file mode 100644
index 000000000..7e01c01d2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqsf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 otherwise
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+int __eqsf2(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ int r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_EQ_S(r, A, B);
+ if (r && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__eqsf2, __nesf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqtf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqtf2.c
new file mode 100644
index 000000000..fd6ffd1eb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/eqtf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 otherwise
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+int __eqtf2(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B);
+ int r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_UNPACK_RAW_Q(B, b);
+ FP_CMP_EQ_Q(r, A, B);
+ if (r && (FP_ISSIGNAN_Q(A) || FP_ISSIGNAN_Q(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__eqtf2, __netf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/extenddftf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extenddftf2.c
new file mode 100644
index 000000000..4101639a9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extenddftf2.c
@@ -0,0 +1,54 @@
+/* Software floating-point emulation.
+ Return a converted to IEEE quad
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+#include "quad.h"
+
+TFtype __extenddftf2(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ FP_DECL_Q(R);
+ TFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_RAW_D(A, a);
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_EXTEND(Q,D,4,2,R,A);
+#else
+ FP_EXTEND(Q,D,2,1,R,A);
+#endif
+ FP_PACK_RAW_Q(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/extended.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extended.h
new file mode 100644
index 000000000..e5f16debe
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extended.h
@@ -0,0 +1,431 @@
+/* Software floating-point emulation.
+ Definitions for IEEE Extended Precision.
+ Copyright (C) 1999,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#if _FP_W_TYPE_SIZE < 32
+#error "Here's a nickel, kid. Go buy yourself a real computer."
+#endif
+
+#if _FP_W_TYPE_SIZE < 64
+#define _FP_FRACTBITS_E (4*_FP_W_TYPE_SIZE)
+#else
+#define _FP_FRACTBITS_E (2*_FP_W_TYPE_SIZE)
+#endif
+
+#define _FP_FRACBITS_E 64
+#define _FP_FRACXBITS_E (_FP_FRACTBITS_E - _FP_FRACBITS_E)
+#define _FP_WFRACBITS_E (_FP_WORKBITS + _FP_FRACBITS_E)
+#define _FP_WFRACXBITS_E (_FP_FRACTBITS_E - _FP_WFRACBITS_E)
+#define _FP_EXPBITS_E 15
+#define _FP_EXPBIAS_E 16383
+#define _FP_EXPMAX_E 32767
+
+#define _FP_QNANBIT_E \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2) % _FP_W_TYPE_SIZE)
+#define _FP_QNANBIT_SH_E \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
+#define _FP_IMPLBIT_E \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1) % _FP_W_TYPE_SIZE)
+#define _FP_IMPLBIT_SH_E \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
+#define _FP_OVERFLOW_E \
+ ((_FP_W_TYPE)1 << (_FP_WFRACBITS_E % _FP_W_TYPE_SIZE))
+
+typedef float XFtype __attribute__((mode(XF)));
+
+#if _FP_W_TYPE_SIZE < 64
+
+union _FP_UNION_E
+{
+ XFtype flt;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned long pad1 : _FP_W_TYPE_SIZE;
+ unsigned long pad2 : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
+ unsigned long sign : 1;
+ unsigned long exp : _FP_EXPBITS_E;
+ unsigned long frac1 : _FP_W_TYPE_SIZE;
+ unsigned long frac0 : _FP_W_TYPE_SIZE;
+#else
+ unsigned long frac0 : _FP_W_TYPE_SIZE;
+ unsigned long frac1 : _FP_W_TYPE_SIZE;
+ unsigned exp : _FP_EXPBITS_E;
+ unsigned sign : 1;
+#endif /* not bigendian */
+ } bits __attribute__((packed));
+};
+
+
+#define FP_DECL_E(X) _FP_DECL(4,X)
+
+#define FP_UNPACK_RAW_E(X, val) \
+ do { \
+ union _FP_UNION_E _flo; _flo.flt = (val); \
+ \
+ X##_f[2] = 0; X##_f[3] = 0; \
+ X##_f[0] = _flo.bits.frac0; \
+ X##_f[1] = _flo.bits.frac1; \
+ X##_e = _flo.bits.exp; \
+ X##_s = _flo.bits.sign; \
+ } while (0)
+
+#define FP_UNPACK_RAW_EP(X, val) \
+ do { \
+ union _FP_UNION_E *_flo = \
+ (union _FP_UNION_E *)(val); \
+ \
+ X##_f[2] = 0; X##_f[3] = 0; \
+ X##_f[0] = _flo->bits.frac0; \
+ X##_f[1] = _flo->bits.frac1; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+#define FP_PACK_RAW_E(val, X) \
+ do { \
+ union _FP_UNION_E _flo; \
+ \
+ if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
+ else X##_f[1] &= ~(_FP_IMPLBIT_E); \
+ _flo.bits.frac0 = X##_f[0]; \
+ _flo.bits.frac1 = X##_f[1]; \
+ _flo.bits.exp = X##_e; \
+ _flo.bits.sign = X##_s; \
+ \
+ (val) = _flo.flt; \
+ } while (0)
+
+#define FP_PACK_RAW_EP(val, X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ { \
+ union _FP_UNION_E *_flo = \
+ (union _FP_UNION_E *)(val); \
+ \
+ if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
+ else X##_f[1] &= ~(_FP_IMPLBIT_E); \
+ _flo->bits.frac0 = X##_f[0]; \
+ _flo->bits.frac1 = X##_f[1]; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } \
+ } while (0)
+
+#define FP_UNPACK_E(X,val) \
+ do { \
+ FP_UNPACK_RAW_E(X,val); \
+ _FP_UNPACK_CANONICAL(E,4,X); \
+ } while (0)
+
+#define FP_UNPACK_EP(X,val) \
+ do { \
+ FP_UNPACK_RAW_EP(X,val); \
+ _FP_UNPACK_CANONICAL(E,4,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_E(X,val) \
+ do { \
+ FP_UNPACK_RAW_E(X,val); \
+ _FP_UNPACK_SEMIRAW(E,4,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_EP(X,val) \
+ do { \
+ FP_UNPACK_RAW_EP(X,val); \
+ _FP_UNPACK_SEMIRAW(E,4,X); \
+ } while (0)
+
+#define FP_PACK_E(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(E,4,X); \
+ FP_PACK_RAW_E(val,X); \
+ } while (0)
+
+#define FP_PACK_EP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(E,4,X); \
+ FP_PACK_RAW_EP(val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_E(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(E,4,X); \
+ FP_PACK_RAW_E(val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_EP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(E,4,X); \
+ FP_PACK_RAW_EP(val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,4,X)
+#define FP_NEG_E(R,X) _FP_NEG(E,4,R,X)
+#define FP_ADD_E(R,X,Y) _FP_ADD(E,4,R,X,Y)
+#define FP_SUB_E(R,X,Y) _FP_SUB(E,4,R,X,Y)
+#define FP_MUL_E(R,X,Y) _FP_MUL(E,4,R,X,Y)
+#define FP_DIV_E(R,X,Y) _FP_DIV(E,4,R,X,Y)
+#define FP_SQRT_E(R,X) _FP_SQRT(E,4,R,X)
+
+/*
+ * Square root algorithms:
+ * We have just one right now, maybe Newton approximation
+ * should be added for those machines where division is fast.
+ * This has special _E version because standard _4 square
+ * root would not work (it has to start normally with the
+ * second word and not the first), but as we have to do it
+ * anyway, we optimize it by doing most of the calculations
+ * in two UWtype registers instead of four.
+ */
+
+#define _FP_SQRT_MEAT_E(R, S, T, X, q) \
+ do { \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ _FP_FRAC_SRL_4(X, (_FP_WORKBITS)); \
+ while (q) \
+ { \
+ T##_f[1] = S##_f[1] + q; \
+ if (T##_f[1] <= X##_f[1]) \
+ { \
+ S##_f[1] = T##_f[1] + q; \
+ X##_f[1] -= T##_f[1]; \
+ R##_f[1] += q; \
+ } \
+ _FP_FRAC_SLL_2(X, 1); \
+ q >>= 1; \
+ } \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ while (q) \
+ { \
+ T##_f[0] = S##_f[0] + q; \
+ T##_f[1] = S##_f[1]; \
+ if (T##_f[1] < X##_f[1] || \
+ (T##_f[1] == X##_f[1] && \
+ T##_f[0] <= X##_f[0])) \
+ { \
+ S##_f[0] = T##_f[0] + q; \
+ S##_f[1] += (T##_f[0] > S##_f[0]); \
+ _FP_FRAC_DEC_2(X, T); \
+ R##_f[0] += q; \
+ } \
+ _FP_FRAC_SLL_2(X, 1); \
+ q >>= 1; \
+ } \
+ _FP_FRAC_SLL_4(R, (_FP_WORKBITS)); \
+ if (X##_f[0] | X##_f[1]) \
+ { \
+ if (S##_f[1] < X##_f[1] || \
+ (S##_f[1] == X##_f[1] && \
+ S##_f[0] < X##_f[0])) \
+ R##_f[0] |= _FP_WORK_ROUND; \
+ R##_f[0] |= _FP_WORK_STICKY; \
+ } \
+ } while (0)
+
+#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,4,r,X,Y,un)
+#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,4,r,X,Y)
+#define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,4,r,X,Y)
+
+#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,4,r,X,rsz,rsg)
+#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,4,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_E(X) (X##_f[2])
+#define _FP_FRAC_HIGH_RAW_E(X) (X##_f[1])
+
+#else /* not _FP_W_TYPE_SIZE < 64 */
+union _FP_UNION_E
+{
+ XFtype flt;
+ struct {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ _FP_W_TYPE pad : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
+ unsigned sign : 1;
+ unsigned exp : _FP_EXPBITS_E;
+ _FP_W_TYPE frac : _FP_W_TYPE_SIZE;
+#else
+ _FP_W_TYPE frac : _FP_W_TYPE_SIZE;
+ unsigned exp : _FP_EXPBITS_E;
+ unsigned sign : 1;
+#endif
+ } bits;
+};
+
+#define FP_DECL_E(X) _FP_DECL(2,X)
+
+#define FP_UNPACK_RAW_E(X, val) \
+ do { \
+ union _FP_UNION_E _flo; _flo.flt = (val); \
+ \
+ X##_f0 = _flo.bits.frac; \
+ X##_f1 = 0; \
+ X##_e = _flo.bits.exp; \
+ X##_s = _flo.bits.sign; \
+ } while (0)
+
+#define FP_UNPACK_RAW_EP(X, val) \
+ do { \
+ union _FP_UNION_E *_flo = \
+ (union _FP_UNION_E *)(val); \
+ \
+ X##_f0 = _flo->bits.frac; \
+ X##_f1 = 0; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+#define FP_PACK_RAW_E(val, X) \
+ do { \
+ union _FP_UNION_E _flo; \
+ \
+ if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
+ else X##_f0 &= ~(_FP_IMPLBIT_E); \
+ _flo.bits.frac = X##_f0; \
+ _flo.bits.exp = X##_e; \
+ _flo.bits.sign = X##_s; \
+ \
+ (val) = _flo.flt; \
+ } while (0)
+
+#define FP_PACK_RAW_EP(fs, val, X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ { \
+ union _FP_UNION_E *_flo = \
+ (union _FP_UNION_E *)(val); \
+ \
+ if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
+ else X##_f0 &= ~(_FP_IMPLBIT_E); \
+ _flo->bits.frac = X##_f0; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } \
+ } while (0)
+
+
+#define FP_UNPACK_E(X,val) \
+ do { \
+ FP_UNPACK_RAW_E(X,val); \
+ _FP_UNPACK_CANONICAL(E,2,X); \
+ } while (0)
+
+#define FP_UNPACK_EP(X,val) \
+ do { \
+ FP_UNPACK_RAW_EP(X,val); \
+ _FP_UNPACK_CANONICAL(E,2,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_E(X,val) \
+ do { \
+ FP_UNPACK_RAW_E(X,val); \
+ _FP_UNPACK_SEMIRAW(E,2,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_EP(X,val) \
+ do { \
+ FP_UNPACK_RAW_EP(X,val); \
+ _FP_UNPACK_SEMIRAW(E,2,X); \
+ } while (0)
+
+#define FP_PACK_E(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(E,2,X); \
+ FP_PACK_RAW_E(val,X); \
+ } while (0)
+
+#define FP_PACK_EP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(E,2,X); \
+ FP_PACK_RAW_EP(val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_E(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(E,2,X); \
+ FP_PACK_RAW_E(val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_EP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(E,2,X); \
+ FP_PACK_RAW_EP(val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,2,X)
+#define FP_NEG_E(R,X) _FP_NEG(E,2,R,X)
+#define FP_ADD_E(R,X,Y) _FP_ADD(E,2,R,X,Y)
+#define FP_SUB_E(R,X,Y) _FP_SUB(E,2,R,X,Y)
+#define FP_MUL_E(R,X,Y) _FP_MUL(E,2,R,X,Y)
+#define FP_DIV_E(R,X,Y) _FP_DIV(E,2,R,X,Y)
+#define FP_SQRT_E(R,X) _FP_SQRT(E,2,R,X)
+
+/*
+ * Square root algorithms:
+ * We have just one right now, maybe Newton approximation
+ * should be added for those machines where division is fast.
+ * We optimize it by doing most of the calculations
+ * in one UWtype registers instead of two, although we don't
+ * have to.
+ */
+#define _FP_SQRT_MEAT_E(R, S, T, X, q) \
+ do { \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ _FP_FRAC_SRL_2(X, (_FP_WORKBITS)); \
+ while (q) \
+ { \
+ T##_f0 = S##_f0 + q; \
+ if (T##_f0 <= X##_f0) \
+ { \
+ S##_f0 = T##_f0 + q; \
+ X##_f0 -= T##_f0; \
+ R##_f0 += q; \
+ } \
+ _FP_FRAC_SLL_1(X, 1); \
+ q >>= 1; \
+ } \
+ _FP_FRAC_SLL_2(R, (_FP_WORKBITS)); \
+ if (X##_f0) \
+ { \
+ if (S##_f0 < X##_f0) \
+ R##_f0 |= _FP_WORK_ROUND; \
+ R##_f0 |= _FP_WORK_STICKY; \
+ } \
+ } while (0)
+
+#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,2,r,X,Y,un)
+#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,2,r,X,Y)
+#define FP_CMP_UNORD_E(r,X,Y) _FP_CMP_UNORD(E,2,r,X,Y)
+
+#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,2,r,X,rsz,rsg)
+#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,2,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_E(X) (X##_f1)
+#define _FP_FRAC_HIGH_RAW_E(X) (X##_f0)
+
+#endif /* not _FP_W_TYPE_SIZE < 64 */
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsfdf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsfdf2.c
new file mode 100644
index 000000000..fba22d5a1
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsfdf2.c
@@ -0,0 +1,54 @@
+/* Software floating-point emulation.
+ Return a converted to IEEE double
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+#include "double.h"
+
+DFtype __extendsfdf2(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ FP_DECL_D(R);
+ DFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_RAW_S(A, a);
+#if _FP_W_TYPE_SIZE < _FP_FRACBITS_D
+ FP_EXTEND(D,S,2,1,R,A);
+#else
+ FP_EXTEND(D,S,1,1,R,A);
+#endif
+ FP_PACK_RAW_D(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsftf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsftf2.c
new file mode 100644
index 000000000..c43cf1ede
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/extendsftf2.c
@@ -0,0 +1,54 @@
+/* Software floating-point emulation.
+ Return a converted to IEEE quad
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+#include "quad.h"
+
+TFtype __extendsftf2(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ FP_DECL_Q(R);
+ TFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_RAW_S(A, a);
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_EXTEND(Q,S,4,1,R,A);
+#else
+ FP_EXTEND(Q,S,2,1,R,A);
+#endif
+ FP_PACK_RAW_Q(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfdi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfdi.c
new file mode 100644
index 000000000..fdfe35af5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfdi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 64bit signed integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DItype __fixdfdi(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ UDItype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_TO_INT_D(r, A, DI_BITS, 1);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfsi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfsi.c
new file mode 100644
index 000000000..a05f3e39a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixdfsi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 32bit signed integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+SItype __fixdfsi(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ USItype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_TO_INT_D(r, A, SI_BITS, 1);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfdi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfdi.c
new file mode 100644
index 000000000..384d9bdd5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfdi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 64bit signed integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+DItype __fixsfdi(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ UDItype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_TO_INT_S(r, A, DI_BITS, 1);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfsi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfsi.c
new file mode 100644
index 000000000..1d40ed05d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixsfsi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 32bit signed integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SItype __fixsfsi(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ USItype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_TO_INT_S(r, A, SI_BITS, 1);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfdi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfdi.c
new file mode 100644
index 000000000..ea10ce2dd
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfdi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 64bit signed integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+DItype __fixtfdi(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ UDItype r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_TO_INT_Q(r, A, DI_BITS, 1);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfsi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfsi.c
new file mode 100644
index 000000000..eb71038bc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixtfsi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 32bit signed integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+SItype __fixtfsi(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ USItype r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_TO_INT_Q(r, A, SI_BITS, 1);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfdi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfdi.c
new file mode 100644
index 000000000..d85198f18
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfdi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 64bit unsigned integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+UDItype __fixunsdfdi(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ UDItype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_TO_INT_D(r, A, DI_BITS, 0);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfsi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfsi.c
new file mode 100644
index 000000000..492ffdea6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunsdfsi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 32bit unsigned integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+USItype __fixunsdfsi(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ USItype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_TO_INT_D(r, A, SI_BITS, 0);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfdi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfdi.c
new file mode 100644
index 000000000..548415383
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfdi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 64bit unsigned integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+UDItype __fixunssfdi(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ UDItype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_TO_INT_S(r, A, DI_BITS, 0);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfsi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfsi.c
new file mode 100644
index 000000000..ac9d4b965
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunssfsi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 32bit unsigned integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+USItype __fixunssfsi(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ USItype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_TO_INT_S(r, A, SI_BITS, 0);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfdi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfdi.c
new file mode 100644
index 000000000..86f1fc856
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfdi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 64bit unsigned integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+UDItype __fixunstfdi(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ UDItype r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_TO_INT_Q(r, A, DI_BITS, 0);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfsi.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfsi.c
new file mode 100644
index 000000000..e0335da47
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/fixunstfsi.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a to 32bit unsigned integer
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+USItype __fixunstfsi(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ USItype r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_TO_INT_Q(r, A, SI_BITS, 0);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdidf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdidf.c
new file mode 100644
index 000000000..21e9fb189
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdidf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 64bit signed integer to IEEE double
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __floatdidf(DItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ DFtype a;
+
+ FP_FROM_INT_D(A, i, DI_BITS, UDItype);
+ FP_PACK_RAW_D(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdisf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdisf.c
new file mode 100644
index 000000000..ee57915c3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatdisf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 64bit signed integer to IEEE single
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __floatdisf(DItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ SFtype a;
+
+ FP_FROM_INT_S(A, i, DI_BITS, UDItype);
+ FP_PACK_RAW_S(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatditf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatditf.c
new file mode 100644
index 000000000..564800bc0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatditf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 64bit signed integer to IEEE quad
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __floatditf(DItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ TFtype a;
+
+ FP_FROM_INT_Q(A, i, DI_BITS, UDItype);
+ FP_PACK_RAW_Q(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsidf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsidf.c
new file mode 100644
index 000000000..b6d5f8d1d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsidf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 32bit signed integer to IEEE double
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __floatsidf(SItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ DFtype a;
+
+ FP_FROM_INT_D(A, i, SI_BITS, USItype);
+ FP_PACK_RAW_D(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsisf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsisf.c
new file mode 100644
index 000000000..76217fe34
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsisf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 32bit signed integer to IEEE single
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __floatsisf(SItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ SFtype a;
+
+ FP_FROM_INT_S(A, i, SI_BITS, USItype);
+ FP_PACK_RAW_S(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsitf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsitf.c
new file mode 100644
index 000000000..8c3d9cc61
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatsitf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 32bit signed integer to IEEE quad
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __floatsitf(SItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ TFtype a;
+
+ FP_FROM_INT_Q(A, i, SI_BITS, USItype);
+ FP_PACK_RAW_Q(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundidf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundidf.c
new file mode 100644
index 000000000..af8e4a5ae
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundidf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 64bit unsigned integer to IEEE double
+ Copyright (C) 1997, 1999, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __floatundidf(UDItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ DFtype a;
+
+ FP_FROM_INT_D(A, i, DI_BITS, UDItype);
+ FP_PACK_RAW_D(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundisf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundisf.c
new file mode 100644
index 000000000..977f7dfc7
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatundisf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 64bit unsigned integer to IEEE single
+ Copyright (C) 1997, 1999, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __floatundisf(UDItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ SFtype a;
+
+ FP_FROM_INT_S(A, i, DI_BITS, UDItype);
+ FP_PACK_RAW_S(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunditf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunditf.c
new file mode 100644
index 000000000..ab357f051
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunditf.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Convert a 64bit unsigned integer to IEEE quad
+ Copyright (C) 1997,1999, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype
+__floatunditf(UDItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ TFtype a;
+
+ FP_FROM_INT_Q(A, i, DI_BITS, UDItype);
+ FP_PACK_RAW_Q(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsidf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsidf.c
new file mode 100644
index 000000000..12d0f25bf
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsidf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 32bit unsigned integer to IEEE double
+ Copyright (C) 1997, 1999, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __floatunsidf(USItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ DFtype a;
+
+ FP_FROM_INT_D(A, i, SI_BITS, USItype);
+ FP_PACK_RAW_D(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsisf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsisf.c
new file mode 100644
index 000000000..80c5d3d35
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsisf.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Convert a 32bit unsigned integer to IEEE single
+ Copyright (C) 1997, 1999, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __floatunsisf(USItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A);
+ SFtype a;
+
+ FP_FROM_INT_S(A, i, SI_BITS, USItype);
+ FP_PACK_RAW_S(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsitf.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsitf.c
new file mode 100644
index 000000000..c993716e5
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/floatunsitf.c
@@ -0,0 +1,47 @@
+/* Software floating-point emulation.
+ Convert a 32bit unsigned integer to IEEE quad
+ Copyright (C) 1997,1999, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype
+__floatunsitf(USItype i)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ TFtype a;
+
+ FP_FROM_INT_Q(A, i, SI_BITS, USItype);
+ FP_PACK_RAW_Q(a, A);
+ FP_HANDLE_EXCEPTIONS;
+
+ return a;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/gedf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/gedf2.c
new file mode 100644
index 000000000..e0dc8620e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/gedf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 iff a > b, -2 iff a ? b, -1 iff a < b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+int __gedf2(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ int r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__gedf2, __gtdf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/gesf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/gesf2.c
new file mode 100644
index 000000000..d1f3ba2f9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/gesf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 iff a > b, -2 iff a ? b, -1 iff a < b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+int __gesf2(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ int r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__gesf2, __gtsf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/getf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/getf2.c
new file mode 100644
index 000000000..82ff283d0
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/getf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 iff a > b, -2 iff a ? b, -1 iff a < b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+int __getf2(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B);
+ int r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_UNPACK_RAW_Q(B, b);
+ FP_CMP_Q(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_Q(A) || FP_ISSIGNAN_Q(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__getf2, __gttf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/ledf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/ledf2.c
new file mode 100644
index 000000000..528a9819c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/ledf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 iff a > b, 2 iff a ? b, -1 iff a < b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+int __ledf2(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ int r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__ledf2, __ltdf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/lesf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/lesf2.c
new file mode 100644
index 000000000..c564bd953
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/lesf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 iff a > b, 2 iff a ? b, -1 iff a < b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+int __lesf2(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ int r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__lesf2, __ltsf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/letf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/letf2.c
new file mode 100644
index 000000000..35e03aaac
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/letf2.c
@@ -0,0 +1,51 @@
+/* Software floating-point emulation.
+ Return 0 iff a == b, 1 iff a > b, 2 iff a ? b, -1 iff a < b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+int __letf2(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B);
+ int r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_UNPACK_RAW_Q(B, b);
+ FP_CMP_Q(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_Q(A) || FP_ISSIGNAN_Q(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+strong_alias(__letf2, __lttf2);
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/muldf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/muldf3.c
new file mode 100644
index 000000000..7eb2015ae
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/muldf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a * b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __muldf3(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R);
+ DFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_D(A, a);
+ FP_UNPACK_D(B, b);
+ FP_MUL_D(R, A, B);
+ FP_PACK_D(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/mulsf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/mulsf3.c
new file mode 100644
index 000000000..5df440687
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/mulsf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a * b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __mulsf3(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B); FP_DECL_S(R);
+ SFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_S(A, a);
+ FP_UNPACK_S(B, b);
+ FP_MUL_S(R, A, B);
+ FP_PACK_S(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/multf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/multf3.c
new file mode 100644
index 000000000..0abab6ddc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/multf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a * b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __multf3(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(R);
+ TFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_Q(A, a);
+ FP_UNPACK_Q(B, b);
+ FP_MUL_Q(R, A, B);
+ FP_PACK_Q(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/negdf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/negdf2.c
new file mode 100644
index 000000000..54869e9a6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/negdf2.c
@@ -0,0 +1,48 @@
+/* Software floating-point emulation.
+ Return -a
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __negdf2(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(R);
+ DFtype r;
+
+ FP_UNPACK_D(A, a);
+ FP_NEG_D(R, A);
+ FP_PACK_D(r, R);
+ FP_CLEAR_EXCEPTIONS;
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/negsf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/negsf2.c
new file mode 100644
index 000000000..bf5db7a45
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/negsf2.c
@@ -0,0 +1,48 @@
+/* Software floating-point emulation.
+ Return -a
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __negsf2(SFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(R);
+ SFtype r;
+
+ FP_UNPACK_S(A, a);
+ FP_NEG_S(R, A);
+ FP_PACK_S(r, R);
+ FP_CLEAR_EXCEPTIONS;
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/negtf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/negtf2.c
new file mode 100644
index 000000000..5524c82df
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/negtf2.c
@@ -0,0 +1,48 @@
+/* Software floating-point emulation.
+ Return -a
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __negtf2(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(R);
+ TFtype r;
+
+ FP_UNPACK_Q(A, a);
+ FP_NEG_Q(R, A);
+ FP_PACK_Q(r, R);
+ FP_CLEAR_EXCEPTIONS;
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-1.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-1.h
new file mode 100644
index 000000000..35cd0ba7b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-1.h
@@ -0,0 +1,302 @@
+/* Software floating-point emulation.
+ Basic one-word fraction declaration and manipulation.
+ Copyright (C) 1997,1998,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#define _FP_FRAC_DECL_1(X) _FP_W_TYPE X##_f
+#define _FP_FRAC_COPY_1(D,S) (D##_f = S##_f)
+#define _FP_FRAC_SET_1(X,I) (X##_f = I)
+#define _FP_FRAC_HIGH_1(X) (X##_f)
+#define _FP_FRAC_LOW_1(X) (X##_f)
+#define _FP_FRAC_WORD_1(X,w) (X##_f)
+
+#define _FP_FRAC_ADDI_1(X,I) (X##_f += I)
+#define _FP_FRAC_SLL_1(X,N) \
+ do { \
+ if (__builtin_constant_p(N) && (N) == 1) \
+ X##_f += X##_f; \
+ else \
+ X##_f <<= (N); \
+ } while (0)
+#define _FP_FRAC_SRL_1(X,N) (X##_f >>= N)
+
+/* Right shift with sticky-lsb. */
+#define _FP_FRAC_SRST_1(X,S,N,sz) __FP_FRAC_SRST_1(X##_f, S, N, sz)
+#define _FP_FRAC_SRS_1(X,N,sz) __FP_FRAC_SRS_1(X##_f, N, sz)
+
+#define __FP_FRAC_SRST_1(X,S,N,sz) \
+do { \
+ S = (__builtin_constant_p(N) && (N) == 1 \
+ ? X & 1 : (X << (_FP_W_TYPE_SIZE - (N))) != 0); \
+ X = X >> (N); \
+} while (0)
+
+#define __FP_FRAC_SRS_1(X,N,sz) \
+ (X = (X >> (N) | (__builtin_constant_p(N) && (N) == 1 \
+ ? X & 1 : (X << (_FP_W_TYPE_SIZE - (N))) != 0)))
+
+#define _FP_FRAC_ADD_1(R,X,Y) (R##_f = X##_f + Y##_f)
+#define _FP_FRAC_SUB_1(R,X,Y) (R##_f = X##_f - Y##_f)
+#define _FP_FRAC_DEC_1(X,Y) (X##_f -= Y##_f)
+#define _FP_FRAC_CLZ_1(z, X) __FP_CLZ(z, X##_f)
+
+/* Predicates */
+#define _FP_FRAC_NEGP_1(X) ((_FP_WS_TYPE)X##_f < 0)
+#define _FP_FRAC_ZEROP_1(X) (X##_f == 0)
+#define _FP_FRAC_OVERP_1(fs,X) (X##_f & _FP_OVERFLOW_##fs)
+#define _FP_FRAC_CLEAR_OVERP_1(fs,X) (X##_f &= ~_FP_OVERFLOW_##fs)
+#define _FP_FRAC_EQ_1(X, Y) (X##_f == Y##_f)
+#define _FP_FRAC_GE_1(X, Y) (X##_f >= Y##_f)
+#define _FP_FRAC_GT_1(X, Y) (X##_f > Y##_f)
+
+#define _FP_ZEROFRAC_1 0
+#define _FP_MINFRAC_1 1
+#define _FP_MAXFRAC_1 (~(_FP_WS_TYPE)0)
+
+/*
+ * Unpack the raw bits of a native fp value. Do not classify or
+ * normalize the data.
+ */
+
+#define _FP_UNPACK_RAW_1(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs _flo; _flo.flt = (val); \
+ \
+ X##_f = _flo.bits.frac; \
+ X##_e = _flo.bits.exp; \
+ X##_s = _flo.bits.sign; \
+ } while (0)
+
+#define _FP_UNPACK_RAW_1_P(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)(val); \
+ \
+ X##_f = _flo->bits.frac; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+/*
+ * Repack the raw bits of a native fp value.
+ */
+
+#define _FP_PACK_RAW_1(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs _flo; \
+ \
+ _flo.bits.frac = X##_f; \
+ _flo.bits.exp = X##_e; \
+ _flo.bits.sign = X##_s; \
+ \
+ (val) = _flo.flt; \
+ } while (0)
+
+#define _FP_PACK_RAW_1_P(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)(val); \
+ \
+ _flo->bits.frac = X##_f; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } while (0)
+
+
+/*
+ * Multiplication algorithms:
+ */
+
+/* Basic. Assuming the host word size is >= 2*FRACBITS, we can do the
+ multiplication immediately. */
+
+#define _FP_MUL_MEAT_1_imm(wfracbits, R, X, Y) \
+ do { \
+ R##_f = X##_f * Y##_f; \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_1(R, wfracbits-1, 2*wfracbits); \
+ } while (0)
+
+/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
+
+#define _FP_MUL_MEAT_1_wide(wfracbits, R, X, Y, doit) \
+ do { \
+ _FP_W_TYPE _Z_f0, _Z_f1; \
+ doit(_Z_f1, _Z_f0, X##_f, Y##_f); \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_2(_Z, wfracbits-1, 2*wfracbits); \
+ R##_f = _Z_f0; \
+ } while (0)
+
+/* Finally, a simple widening multiply algorithm. What fun! */
+
+#define _FP_MUL_MEAT_1_hard(wfracbits, R, X, Y) \
+ do { \
+ _FP_W_TYPE _xh, _xl, _yh, _yl, _z_f0, _z_f1, _a_f0, _a_f1; \
+ \
+ /* split the words in half */ \
+ _xh = X##_f >> (_FP_W_TYPE_SIZE/2); \
+ _xl = X##_f & (((_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE/2)) - 1); \
+ _yh = Y##_f >> (_FP_W_TYPE_SIZE/2); \
+ _yl = Y##_f & (((_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE/2)) - 1); \
+ \
+ /* multiply the pieces */ \
+ _z_f0 = _xl * _yl; \
+ _a_f0 = _xh * _yl; \
+ _a_f1 = _xl * _yh; \
+ _z_f1 = _xh * _yh; \
+ \
+ /* reassemble into two full words */ \
+ if ((_a_f0 += _a_f1) < _a_f1) \
+ _z_f1 += (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE/2); \
+ _a_f1 = _a_f0 >> (_FP_W_TYPE_SIZE/2); \
+ _a_f0 = _a_f0 << (_FP_W_TYPE_SIZE/2); \
+ _FP_FRAC_ADD_2(_z, _z, _a); \
+ \
+ /* normalize */ \
+ _FP_FRAC_SRS_2(_z, wfracbits - 1, 2*wfracbits); \
+ R##_f = _z_f0; \
+ } while (0)
+
+
+/*
+ * Division algorithms:
+ */
+
+/* Basic. Assuming the host word size is >= 2*FRACBITS, we can do the
+ division immediately. Give this macro either _FP_DIV_HELP_imm for
+ C primitives or _FP_DIV_HELP_ldiv for the ISO function. Which you
+ choose will depend on what the compiler does with divrem4. */
+
+#define _FP_DIV_MEAT_1_imm(fs, R, X, Y, doit) \
+ do { \
+ _FP_W_TYPE _q, _r; \
+ X##_f <<= (X##_f < Y##_f \
+ ? R##_e--, _FP_WFRACBITS_##fs \
+ : _FP_WFRACBITS_##fs - 1); \
+ doit(_q, _r, X##_f, Y##_f); \
+ R##_f = _q | (_r != 0); \
+ } while (0)
+
+/* GCC's longlong.h defines a 2W / 1W => (1W,1W) primitive udiv_qrnnd
+ that may be useful in this situation. This first is for a primitive
+ that requires normalization, the second for one that does not. Look
+ for UDIV_NEEDS_NORMALIZATION to tell which your machine needs. */
+
+#define _FP_DIV_MEAT_1_udiv_norm(fs, R, X, Y) \
+ do { \
+ _FP_W_TYPE _nh, _nl, _q, _r, _y; \
+ \
+ /* Normalize Y -- i.e. make the most significant bit set. */ \
+ _y = Y##_f << _FP_WFRACXBITS_##fs; \
+ \
+ /* Shift X op correspondingly high, that is, up one full word. */ \
+ if (X##_f < Y##_f) \
+ { \
+ R##_e--; \
+ _nl = 0; \
+ _nh = X##_f; \
+ } \
+ else \
+ { \
+ _nl = X##_f << (_FP_W_TYPE_SIZE - 1); \
+ _nh = X##_f >> 1; \
+ } \
+ \
+ udiv_qrnnd(_q, _r, _nh, _nl, _y); \
+ R##_f = _q | (_r != 0); \
+ } while (0)
+
+#define _FP_DIV_MEAT_1_udiv(fs, R, X, Y) \
+ do { \
+ _FP_W_TYPE _nh, _nl, _q, _r; \
+ if (X##_f < Y##_f) \
+ { \
+ R##_e--; \
+ _nl = X##_f << _FP_WFRACBITS_##fs; \
+ _nh = X##_f >> _FP_WFRACXBITS_##fs; \
+ } \
+ else \
+ { \
+ _nl = X##_f << (_FP_WFRACBITS_##fs - 1); \
+ _nh = X##_f >> (_FP_WFRACXBITS_##fs + 1); \
+ } \
+ udiv_qrnnd(_q, _r, _nh, _nl, Y##_f); \
+ R##_f = _q | (_r != 0); \
+ } while (0)
+
+
+/*
+ * Square root algorithms:
+ * We have just one right now, maybe Newton approximation
+ * should be added for those machines where division is fast.
+ */
+
+#define _FP_SQRT_MEAT_1(R, S, T, X, q) \
+ do { \
+ while (q != _FP_WORK_ROUND) \
+ { \
+ T##_f = S##_f + q; \
+ if (T##_f <= X##_f) \
+ { \
+ S##_f = T##_f + q; \
+ X##_f -= T##_f; \
+ R##_f += q; \
+ } \
+ _FP_FRAC_SLL_1(X, 1); \
+ q >>= 1; \
+ } \
+ if (X##_f) \
+ { \
+ if (S##_f < X##_f) \
+ R##_f |= _FP_WORK_ROUND; \
+ R##_f |= _FP_WORK_STICKY; \
+ } \
+ } while (0)
+
+/*
+ * Assembly/disassembly for converting to/from integral types.
+ * No shifting or overflow handled here.
+ */
+
+#define _FP_FRAC_ASSEMBLE_1(r, X, rsize) (r = X##_f)
+#define _FP_FRAC_DISASSEMBLE_1(X, r, rsize) (X##_f = r)
+
+
+/*
+ * Convert FP values between word sizes
+ */
+
+#define _FP_FRAC_COPY_1_1(D, S) (D##_f = S##_f)
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-2.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-2.h
new file mode 100644
index 000000000..3a3b3aa06
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-2.h
@@ -0,0 +1,617 @@
+/* Software floating-point emulation.
+ Basic two-word fraction declaration and manipulation.
+ Copyright (C) 1997,1998,1999,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#define _FP_FRAC_DECL_2(X) _FP_W_TYPE X##_f0, X##_f1
+#define _FP_FRAC_COPY_2(D,S) (D##_f0 = S##_f0, D##_f1 = S##_f1)
+#define _FP_FRAC_SET_2(X,I) __FP_FRAC_SET_2(X, I)
+#define _FP_FRAC_HIGH_2(X) (X##_f1)
+#define _FP_FRAC_LOW_2(X) (X##_f0)
+#define _FP_FRAC_WORD_2(X,w) (X##_f##w)
+
+#define _FP_FRAC_SLL_2(X,N) \
+(void)(((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ if (__builtin_constant_p(N) && (N) == 1) \
+ { \
+ X##_f1 = X##_f1 + X##_f1 + (((_FP_WS_TYPE)(X##_f0)) < 0); \
+ X##_f0 += X##_f0; \
+ } \
+ else \
+ { \
+ X##_f1 = X##_f1 << (N) | X##_f0 >> (_FP_W_TYPE_SIZE - (N)); \
+ X##_f0 <<= (N); \
+ } \
+ 0; \
+ }) \
+ : ({ \
+ X##_f1 = X##_f0 << ((N) - _FP_W_TYPE_SIZE); \
+ X##_f0 = 0; \
+ }))
+
+
+#define _FP_FRAC_SRL_2(X,N) \
+(void)(((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ X##_f0 = X##_f0 >> (N) | X##_f1 << (_FP_W_TYPE_SIZE - (N)); \
+ X##_f1 >>= (N); \
+ }) \
+ : ({ \
+ X##_f0 = X##_f1 >> ((N) - _FP_W_TYPE_SIZE); \
+ X##_f1 = 0; \
+ }))
+
+/* Right shift with sticky-lsb. */
+#define _FP_FRAC_SRST_2(X,S, N,sz) \
+(void)(((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ S = (__builtin_constant_p(N) && (N) == 1 \
+ ? X##_f0 & 1 \
+ : (X##_f0 << (_FP_W_TYPE_SIZE - (N))) != 0); \
+ X##_f0 = (X##_f1 << (_FP_W_TYPE_SIZE - (N)) | X##_f0 >> (N)); \
+ X##_f1 >>= (N); \
+ }) \
+ : ({ \
+ S = ((((N) == _FP_W_TYPE_SIZE \
+ ? 0 \
+ : (X##_f1 << (2*_FP_W_TYPE_SIZE - (N)))) \
+ | X##_f0) != 0); \
+ X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE)); \
+ X##_f1 = 0; \
+ }))
+
+#define _FP_FRAC_SRS_2(X,N,sz) \
+(void)(((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ X##_f0 = (X##_f1 << (_FP_W_TYPE_SIZE - (N)) | X##_f0 >> (N) | \
+ (__builtin_constant_p(N) && (N) == 1 \
+ ? X##_f0 & 1 \
+ : (X##_f0 << (_FP_W_TYPE_SIZE - (N))) != 0)); \
+ X##_f1 >>= (N); \
+ }) \
+ : ({ \
+ X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE) | \
+ ((((N) == _FP_W_TYPE_SIZE \
+ ? 0 \
+ : (X##_f1 << (2*_FP_W_TYPE_SIZE - (N)))) \
+ | X##_f0) != 0)); \
+ X##_f1 = 0; \
+ }))
+
+#define _FP_FRAC_ADDI_2(X,I) \
+ __FP_FRAC_ADDI_2(X##_f1, X##_f0, I)
+
+#define _FP_FRAC_ADD_2(R,X,Y) \
+ __FP_FRAC_ADD_2(R##_f1, R##_f0, X##_f1, X##_f0, Y##_f1, Y##_f0)
+
+#define _FP_FRAC_SUB_2(R,X,Y) \
+ __FP_FRAC_SUB_2(R##_f1, R##_f0, X##_f1, X##_f0, Y##_f1, Y##_f0)
+
+#define _FP_FRAC_DEC_2(X,Y) \
+ __FP_FRAC_DEC_2(X##_f1, X##_f0, Y##_f1, Y##_f0)
+
+#define _FP_FRAC_CLZ_2(R,X) \
+ do { \
+ if (X##_f1) \
+ __FP_CLZ(R,X##_f1); \
+ else \
+ { \
+ __FP_CLZ(R,X##_f0); \
+ R += _FP_W_TYPE_SIZE; \
+ } \
+ } while(0)
+
+/* Predicates */
+#define _FP_FRAC_NEGP_2(X) ((_FP_WS_TYPE)X##_f1 < 0)
+#define _FP_FRAC_ZEROP_2(X) ((X##_f1 | X##_f0) == 0)
+#define _FP_FRAC_OVERP_2(fs,X) (_FP_FRAC_HIGH_##fs(X) & _FP_OVERFLOW_##fs)
+#define _FP_FRAC_CLEAR_OVERP_2(fs,X) (_FP_FRAC_HIGH_##fs(X) &= ~_FP_OVERFLOW_##fs)
+#define _FP_FRAC_EQ_2(X, Y) (X##_f1 == Y##_f1 && X##_f0 == Y##_f0)
+#define _FP_FRAC_GT_2(X, Y) \
+ (X##_f1 > Y##_f1 || (X##_f1 == Y##_f1 && X##_f0 > Y##_f0))
+#define _FP_FRAC_GE_2(X, Y) \
+ (X##_f1 > Y##_f1 || (X##_f1 == Y##_f1 && X##_f0 >= Y##_f0))
+
+#define _FP_ZEROFRAC_2 0, 0
+#define _FP_MINFRAC_2 0, 1
+#define _FP_MAXFRAC_2 (~(_FP_WS_TYPE)0), (~(_FP_WS_TYPE)0)
+
+/*
+ * Internals
+ */
+
+#define __FP_FRAC_SET_2(X,I1,I0) (X##_f0 = I0, X##_f1 = I1)
+
+#define __FP_CLZ_2(R, xh, xl) \
+ do { \
+ if (xh) \
+ __FP_CLZ(R,xh); \
+ else \
+ { \
+ __FP_CLZ(R,xl); \
+ R += _FP_W_TYPE_SIZE; \
+ } \
+ } while(0)
+
+#if 0
+
+#ifndef __FP_FRAC_ADDI_2
+#define __FP_FRAC_ADDI_2(xh, xl, i) \
+ (xh += ((xl += i) < i))
+#endif
+#ifndef __FP_FRAC_ADD_2
+#define __FP_FRAC_ADD_2(rh, rl, xh, xl, yh, yl) \
+ (rh = xh + yh + ((rl = xl + yl) < xl))
+#endif
+#ifndef __FP_FRAC_SUB_2
+#define __FP_FRAC_SUB_2(rh, rl, xh, xl, yh, yl) \
+ (rh = xh - yh - ((rl = xl - yl) > xl))
+#endif
+#ifndef __FP_FRAC_DEC_2
+#define __FP_FRAC_DEC_2(xh, xl, yh, yl) \
+ do { \
+ UWtype _t = xl; \
+ xh -= yh + ((xl -= yl) > _t); \
+ } while (0)
+#endif
+
+#else
+
+#undef __FP_FRAC_ADDI_2
+#define __FP_FRAC_ADDI_2(xh, xl, i) add_ssaaaa(xh, xl, xh, xl, 0, i)
+#undef __FP_FRAC_ADD_2
+#define __FP_FRAC_ADD_2 add_ssaaaa
+#undef __FP_FRAC_SUB_2
+#define __FP_FRAC_SUB_2 sub_ddmmss
+#undef __FP_FRAC_DEC_2
+#define __FP_FRAC_DEC_2(xh, xl, yh, yl) sub_ddmmss(xh, xl, xh, xl, yh, yl)
+
+#endif
+
+/*
+ * Unpack the raw bits of a native fp value. Do not classify or
+ * normalize the data.
+ */
+
+#define _FP_UNPACK_RAW_2(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs _flo; _flo.flt = (val); \
+ \
+ X##_f0 = _flo.bits.frac0; \
+ X##_f1 = _flo.bits.frac1; \
+ X##_e = _flo.bits.exp; \
+ X##_s = _flo.bits.sign; \
+ } while (0)
+
+#define _FP_UNPACK_RAW_2_P(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)(val); \
+ \
+ X##_f0 = _flo->bits.frac0; \
+ X##_f1 = _flo->bits.frac1; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+
+/*
+ * Repack the raw bits of a native fp value.
+ */
+
+#define _FP_PACK_RAW_2(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs _flo; \
+ \
+ _flo.bits.frac0 = X##_f0; \
+ _flo.bits.frac1 = X##_f1; \
+ _flo.bits.exp = X##_e; \
+ _flo.bits.sign = X##_s; \
+ \
+ (val) = _flo.flt; \
+ } while (0)
+
+#define _FP_PACK_RAW_2_P(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)(val); \
+ \
+ _flo->bits.frac0 = X##_f0; \
+ _flo->bits.frac1 = X##_f1; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } while (0)
+
+
+/*
+ * Multiplication algorithms:
+ */
+
+/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
+
+#define _FP_MUL_MEAT_2_wide(wfracbits, R, X, Y, doit) \
+ do { \
+ _FP_FRAC_DECL_4(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
+ \
+ doit(_FP_FRAC_WORD_4(_z,1), _FP_FRAC_WORD_4(_z,0), X##_f0, Y##_f0); \
+ doit(_b_f1, _b_f0, X##_f0, Y##_f1); \
+ doit(_c_f1, _c_f0, X##_f1, Y##_f0); \
+ doit(_FP_FRAC_WORD_4(_z,3), _FP_FRAC_WORD_4(_z,2), X##_f1, Y##_f1); \
+ \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1), 0, _b_f1, _b_f0, \
+ _FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1), 0, _c_f1, _c_f0, \
+ _FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1)); \
+ \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
+ R##_f0 = _FP_FRAC_WORD_4(_z,0); \
+ R##_f1 = _FP_FRAC_WORD_4(_z,1); \
+ } while (0)
+
+/* Given a 1W * 1W => 2W primitive, do the extended multiplication.
+ Do only 3 multiplications instead of four. This one is for machines
+ where multiplication is much more expensive than subtraction. */
+
+#define _FP_MUL_MEAT_2_wide_3mul(wfracbits, R, X, Y, doit) \
+ do { \
+ _FP_FRAC_DECL_4(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
+ _FP_W_TYPE _d; \
+ int _c1, _c2; \
+ \
+ _b_f0 = X##_f0 + X##_f1; \
+ _c1 = _b_f0 < X##_f0; \
+ _b_f1 = Y##_f0 + Y##_f1; \
+ _c2 = _b_f1 < Y##_f0; \
+ doit(_d, _FP_FRAC_WORD_4(_z,0), X##_f0, Y##_f0); \
+ doit(_FP_FRAC_WORD_4(_z,2), _FP_FRAC_WORD_4(_z,1), _b_f0, _b_f1); \
+ doit(_c_f1, _c_f0, X##_f1, Y##_f1); \
+ \
+ _b_f0 &= -_c2; \
+ _b_f1 &= -_c1; \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1), (_c1 & _c2), 0, _d, \
+ 0, _FP_FRAC_WORD_4(_z,2), _FP_FRAC_WORD_4(_z,1)); \
+ __FP_FRAC_ADDI_2(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _b_f0); \
+ __FP_FRAC_ADDI_2(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _b_f1); \
+ __FP_FRAC_DEC_3(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1), \
+ 0, _d, _FP_FRAC_WORD_4(_z,0)); \
+ __FP_FRAC_DEC_3(_FP_FRAC_WORD_4(_z,3),_FP_FRAC_WORD_4(_z,2), \
+ _FP_FRAC_WORD_4(_z,1), 0, _c_f1, _c_f0); \
+ __FP_FRAC_ADD_2(_FP_FRAC_WORD_4(_z,3), _FP_FRAC_WORD_4(_z,2), \
+ _c_f1, _c_f0, \
+ _FP_FRAC_WORD_4(_z,3), _FP_FRAC_WORD_4(_z,2)); \
+ \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
+ R##_f0 = _FP_FRAC_WORD_4(_z,0); \
+ R##_f1 = _FP_FRAC_WORD_4(_z,1); \
+ } while (0)
+
+#define _FP_MUL_MEAT_2_gmp(wfracbits, R, X, Y) \
+ do { \
+ _FP_FRAC_DECL_4(_z); \
+ _FP_W_TYPE _x[2], _y[2]; \
+ _x[0] = X##_f0; _x[1] = X##_f1; \
+ _y[0] = Y##_f0; _y[1] = Y##_f1; \
+ \
+ mpn_mul_n(_z_f, _x, _y, 2); \
+ \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
+ R##_f0 = _z_f[0]; \
+ R##_f1 = _z_f[1]; \
+ } while (0)
+
+/* Do at most 120x120=240 bits multiplication using double floating
+ point multiplication. This is useful if floating point
+ multiplication has much bigger throughput than integer multiply.
+ It is supposed to work for _FP_W_TYPE_SIZE 64 and wfracbits
+ between 106 and 120 only.
+ Caller guarantees that X and Y has (1LLL << (wfracbits - 1)) set.
+ SETFETZ is a macro which will disable all FPU exceptions and set rounding
+ towards zero, RESETFE should optionally reset it back. */
+
+#define _FP_MUL_MEAT_2_120_240_double(wfracbits, R, X, Y, setfetz, resetfe) \
+ do { \
+ static const double _const[] = { \
+ /* 2^-24 */ 5.9604644775390625e-08, \
+ /* 2^-48 */ 3.5527136788005009e-15, \
+ /* 2^-72 */ 2.1175823681357508e-22, \
+ /* 2^-96 */ 1.2621774483536189e-29, \
+ /* 2^28 */ 2.68435456e+08, \
+ /* 2^4 */ 1.600000e+01, \
+ /* 2^-20 */ 9.5367431640625e-07, \
+ /* 2^-44 */ 5.6843418860808015e-14, \
+ /* 2^-68 */ 3.3881317890172014e-21, \
+ /* 2^-92 */ 2.0194839173657902e-28, \
+ /* 2^-116 */ 1.2037062152420224e-35}; \
+ double _a240, _b240, _c240, _d240, _e240, _f240, \
+ _g240, _h240, _i240, _j240, _k240; \
+ union { double d; UDItype i; } _l240, _m240, _n240, _o240, \
+ _p240, _q240, _r240, _s240; \
+ UDItype _t240, _u240, _v240, _w240, _x240, _y240 = 0; \
+ \
+ if (wfracbits < 106 || wfracbits > 120) \
+ abort(); \
+ \
+ setfetz; \
+ \
+ _e240 = (double)(long)(X##_f0 & 0xffffff); \
+ _j240 = (double)(long)(Y##_f0 & 0xffffff); \
+ _d240 = (double)(long)((X##_f0 >> 24) & 0xffffff); \
+ _i240 = (double)(long)((Y##_f0 >> 24) & 0xffffff); \
+ _c240 = (double)(long)(((X##_f1 << 16) & 0xffffff) | (X##_f0 >> 48)); \
+ _h240 = (double)(long)(((Y##_f1 << 16) & 0xffffff) | (Y##_f0 >> 48)); \
+ _b240 = (double)(long)((X##_f1 >> 8) & 0xffffff); \
+ _g240 = (double)(long)((Y##_f1 >> 8) & 0xffffff); \
+ _a240 = (double)(long)(X##_f1 >> 32); \
+ _f240 = (double)(long)(Y##_f1 >> 32); \
+ _e240 *= _const[3]; \
+ _j240 *= _const[3]; \
+ _d240 *= _const[2]; \
+ _i240 *= _const[2]; \
+ _c240 *= _const[1]; \
+ _h240 *= _const[1]; \
+ _b240 *= _const[0]; \
+ _g240 *= _const[0]; \
+ _s240.d = _e240*_j240;\
+ _r240.d = _d240*_j240 + _e240*_i240;\
+ _q240.d = _c240*_j240 + _d240*_i240 + _e240*_h240;\
+ _p240.d = _b240*_j240 + _c240*_i240 + _d240*_h240 + _e240*_g240;\
+ _o240.d = _a240*_j240 + _b240*_i240 + _c240*_h240 + _d240*_g240 + _e240*_f240;\
+ _n240.d = _a240*_i240 + _b240*_h240 + _c240*_g240 + _d240*_f240; \
+ _m240.d = _a240*_h240 + _b240*_g240 + _c240*_f240; \
+ _l240.d = _a240*_g240 + _b240*_f240; \
+ _k240 = _a240*_f240; \
+ _r240.d += _s240.d; \
+ _q240.d += _r240.d; \
+ _p240.d += _q240.d; \
+ _o240.d += _p240.d; \
+ _n240.d += _o240.d; \
+ _m240.d += _n240.d; \
+ _l240.d += _m240.d; \
+ _k240 += _l240.d; \
+ _s240.d -= ((_const[10]+_s240.d)-_const[10]); \
+ _r240.d -= ((_const[9]+_r240.d)-_const[9]); \
+ _q240.d -= ((_const[8]+_q240.d)-_const[8]); \
+ _p240.d -= ((_const[7]+_p240.d)-_const[7]); \
+ _o240.d += _const[7]; \
+ _n240.d += _const[6]; \
+ _m240.d += _const[5]; \
+ _l240.d += _const[4]; \
+ if (_s240.d != 0.0) _y240 = 1; \
+ if (_r240.d != 0.0) _y240 = 1; \
+ if (_q240.d != 0.0) _y240 = 1; \
+ if (_p240.d != 0.0) _y240 = 1; \
+ _t240 = (DItype)_k240; \
+ _u240 = _l240.i; \
+ _v240 = _m240.i; \
+ _w240 = _n240.i; \
+ _x240 = _o240.i; \
+ R##_f1 = (_t240 << (128 - (wfracbits - 1))) \
+ | ((_u240 & 0xffffff) >> ((wfracbits - 1) - 104)); \
+ R##_f0 = ((_u240 & 0xffffff) << (168 - (wfracbits - 1))) \
+ | ((_v240 & 0xffffff) << (144 - (wfracbits - 1))) \
+ | ((_w240 & 0xffffff) << (120 - (wfracbits - 1))) \
+ | ((_x240 & 0xffffff) >> ((wfracbits - 1) - 96)) \
+ | _y240; \
+ resetfe; \
+ } while (0)
+
+/*
+ * Division algorithms:
+ */
+
+#define _FP_DIV_MEAT_2_udiv(fs, R, X, Y) \
+ do { \
+ _FP_W_TYPE _n_f2, _n_f1, _n_f0, _r_f1, _r_f0, _m_f1, _m_f0; \
+ if (_FP_FRAC_GT_2(X, Y)) \
+ { \
+ _n_f2 = X##_f1 >> 1; \
+ _n_f1 = X##_f1 << (_FP_W_TYPE_SIZE - 1) | X##_f0 >> 1; \
+ _n_f0 = X##_f0 << (_FP_W_TYPE_SIZE - 1); \
+ } \
+ else \
+ { \
+ R##_e--; \
+ _n_f2 = X##_f1; \
+ _n_f1 = X##_f0; \
+ _n_f0 = 0; \
+ } \
+ \
+ /* Normalize, i.e. make the most significant bit of the \
+ denominator set. */ \
+ _FP_FRAC_SLL_2(Y, _FP_WFRACXBITS_##fs); \
+ \
+ udiv_qrnnd(R##_f1, _r_f1, _n_f2, _n_f1, Y##_f1); \
+ umul_ppmm(_m_f1, _m_f0, R##_f1, Y##_f0); \
+ _r_f0 = _n_f0; \
+ if (_FP_FRAC_GT_2(_m, _r)) \
+ { \
+ R##_f1--; \
+ _FP_FRAC_ADD_2(_r, Y, _r); \
+ if (_FP_FRAC_GE_2(_r, Y) && _FP_FRAC_GT_2(_m, _r)) \
+ { \
+ R##_f1--; \
+ _FP_FRAC_ADD_2(_r, Y, _r); \
+ } \
+ } \
+ _FP_FRAC_DEC_2(_r, _m); \
+ \
+ if (_r_f1 == Y##_f1) \
+ { \
+ /* This is a special case, not an optimization \
+ (_r/Y##_f1 would not fit into UWtype). \
+ As _r is guaranteed to be < Y, R##_f0 can be either \
+ (UWtype)-1 or (UWtype)-2. But as we know what kind \
+ of bits it is (sticky, guard, round), we don't care. \
+ We also don't care what the reminder is, because the \
+ guard bit will be set anyway. -jj */ \
+ R##_f0 = -1; \
+ } \
+ else \
+ { \
+ udiv_qrnnd(R##_f0, _r_f1, _r_f1, _r_f0, Y##_f1); \
+ umul_ppmm(_m_f1, _m_f0, R##_f0, Y##_f0); \
+ _r_f0 = 0; \
+ if (_FP_FRAC_GT_2(_m, _r)) \
+ { \
+ R##_f0--; \
+ _FP_FRAC_ADD_2(_r, Y, _r); \
+ if (_FP_FRAC_GE_2(_r, Y) && _FP_FRAC_GT_2(_m, _r)) \
+ { \
+ R##_f0--; \
+ _FP_FRAC_ADD_2(_r, Y, _r); \
+ } \
+ } \
+ if (!_FP_FRAC_EQ_2(_r, _m)) \
+ R##_f0 |= _FP_WORK_STICKY; \
+ } \
+ } while (0)
+
+
+#define _FP_DIV_MEAT_2_gmp(fs, R, X, Y) \
+ do { \
+ _FP_W_TYPE _x[4], _y[2], _z[4]; \
+ _y[0] = Y##_f0; _y[1] = Y##_f1; \
+ _x[0] = _x[3] = 0; \
+ if (_FP_FRAC_GT_2(X, Y)) \
+ { \
+ R##_e++; \
+ _x[1] = (X##_f0 << (_FP_WFRACBITS_##fs-1 - _FP_W_TYPE_SIZE) | \
+ X##_f1 >> (_FP_W_TYPE_SIZE - \
+ (_FP_WFRACBITS_##fs-1 - _FP_W_TYPE_SIZE))); \
+ _x[2] = X##_f1 << (_FP_WFRACBITS_##fs-1 - _FP_W_TYPE_SIZE); \
+ } \
+ else \
+ { \
+ _x[1] = (X##_f0 << (_FP_WFRACBITS_##fs - _FP_W_TYPE_SIZE) | \
+ X##_f1 >> (_FP_W_TYPE_SIZE - \
+ (_FP_WFRACBITS_##fs - _FP_W_TYPE_SIZE))); \
+ _x[2] = X##_f1 << (_FP_WFRACBITS_##fs - _FP_W_TYPE_SIZE); \
+ } \
+ \
+ (void) mpn_divrem (_z, 0, _x, 4, _y, 2); \
+ R##_f1 = _z[1]; \
+ R##_f0 = _z[0] | ((_x[0] | _x[1]) != 0); \
+ } while (0)
+
+
+/*
+ * Square root algorithms:
+ * We have just one right now, maybe Newton approximation
+ * should be added for those machines where division is fast.
+ */
+
+#define _FP_SQRT_MEAT_2(R, S, T, X, q) \
+ do { \
+ while (q) \
+ { \
+ T##_f1 = S##_f1 + q; \
+ if (T##_f1 <= X##_f1) \
+ { \
+ S##_f1 = T##_f1 + q; \
+ X##_f1 -= T##_f1; \
+ R##_f1 += q; \
+ } \
+ _FP_FRAC_SLL_2(X, 1); \
+ q >>= 1; \
+ } \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ while (q != _FP_WORK_ROUND) \
+ { \
+ T##_f0 = S##_f0 + q; \
+ T##_f1 = S##_f1; \
+ if (T##_f1 < X##_f1 || \
+ (T##_f1 == X##_f1 && T##_f0 <= X##_f0)) \
+ { \
+ S##_f0 = T##_f0 + q; \
+ S##_f1 += (T##_f0 > S##_f0); \
+ _FP_FRAC_DEC_2(X, T); \
+ R##_f0 += q; \
+ } \
+ _FP_FRAC_SLL_2(X, 1); \
+ q >>= 1; \
+ } \
+ if (X##_f0 | X##_f1) \
+ { \
+ if (S##_f1 < X##_f1 || \
+ (S##_f1 == X##_f1 && S##_f0 < X##_f0)) \
+ R##_f0 |= _FP_WORK_ROUND; \
+ R##_f0 |= _FP_WORK_STICKY; \
+ } \
+ } while (0)
+
+
+/*
+ * Assembly/disassembly for converting to/from integral types.
+ * No shifting or overflow handled here.
+ */
+
+#define _FP_FRAC_ASSEMBLE_2(r, X, rsize) \
+(void)((rsize <= _FP_W_TYPE_SIZE) \
+ ? ({ r = X##_f0; }) \
+ : ({ \
+ r = X##_f1; \
+ r <<= _FP_W_TYPE_SIZE; \
+ r += X##_f0; \
+ }))
+
+#define _FP_FRAC_DISASSEMBLE_2(X, r, rsize) \
+ do { \
+ X##_f0 = r; \
+ X##_f1 = (rsize <= _FP_W_TYPE_SIZE ? 0 : r >> _FP_W_TYPE_SIZE); \
+ } while (0)
+
+/*
+ * Convert FP values between word sizes
+ */
+
+#define _FP_FRAC_COPY_1_2(D, S) (D##_f = S##_f0)
+
+#define _FP_FRAC_COPY_2_1(D, S) ((D##_f0 = S##_f), (D##_f1 = 0))
+
+#define _FP_FRAC_COPY_2_2(D,S) _FP_FRAC_COPY_2(D,S)
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-4.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-4.h
new file mode 100644
index 000000000..70b9fafbe
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-4.h
@@ -0,0 +1,688 @@
+/* Software floating-point emulation.
+ Basic four-word fraction declaration and manipulation.
+ Copyright (C) 1997,1998,1999,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#define _FP_FRAC_DECL_4(X) _FP_W_TYPE X##_f[4]
+#define _FP_FRAC_COPY_4(D,S) \
+ (D##_f[0] = S##_f[0], D##_f[1] = S##_f[1], \
+ D##_f[2] = S##_f[2], D##_f[3] = S##_f[3])
+#define _FP_FRAC_SET_4(X,I) __FP_FRAC_SET_4(X, I)
+#define _FP_FRAC_HIGH_4(X) (X##_f[3])
+#define _FP_FRAC_LOW_4(X) (X##_f[0])
+#define _FP_FRAC_WORD_4(X,w) (X##_f[w])
+
+#define _FP_FRAC_SLL_4(X,N) \
+ do { \
+ _FP_I_TYPE _up, _down, _skip, _i; \
+ _skip = (N) / _FP_W_TYPE_SIZE; \
+ _up = (N) % _FP_W_TYPE_SIZE; \
+ _down = _FP_W_TYPE_SIZE - _up; \
+ if (!_up) \
+ for (_i = 3; _i >= _skip; --_i) \
+ X##_f[_i] = X##_f[_i-_skip]; \
+ else \
+ { \
+ for (_i = 3; _i > _skip; --_i) \
+ X##_f[_i] = X##_f[_i-_skip] << _up \
+ | X##_f[_i-_skip-1] >> _down; \
+ X##_f[_i--] = X##_f[0] << _up; \
+ } \
+ for (; _i >= 0; --_i) \
+ X##_f[_i] = 0; \
+ } while (0)
+
+/* This one was broken too */
+#define _FP_FRAC_SRL_4(X,N) \
+ do { \
+ _FP_I_TYPE _up, _down, _skip, _i; \
+ _skip = (N) / _FP_W_TYPE_SIZE; \
+ _down = (N) % _FP_W_TYPE_SIZE; \
+ _up = _FP_W_TYPE_SIZE - _down; \
+ if (!_down) \
+ for (_i = 0; _i <= 3-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip]; \
+ else \
+ { \
+ for (_i = 0; _i < 3-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip] >> _down \
+ | X##_f[_i+_skip+1] << _up; \
+ X##_f[_i++] = X##_f[3] >> _down; \
+ } \
+ for (; _i < 4; ++_i) \
+ X##_f[_i] = 0; \
+ } while (0)
+
+
+/* Right shift with sticky-lsb.
+ * What this actually means is that we do a standard right-shift,
+ * but that if any of the bits that fall off the right hand side
+ * were one then we always set the LSbit.
+ */
+#define _FP_FRAC_SRST_4(X,S,N,size) \
+ do { \
+ _FP_I_TYPE _up, _down, _skip, _i; \
+ _FP_W_TYPE _s; \
+ _skip = (N) / _FP_W_TYPE_SIZE; \
+ _down = (N) % _FP_W_TYPE_SIZE; \
+ _up = _FP_W_TYPE_SIZE - _down; \
+ for (_s = _i = 0; _i < _skip; ++_i) \
+ _s |= X##_f[_i]; \
+ if (!_down) \
+ for (_i = 0; _i <= 3-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip]; \
+ else \
+ { \
+ _s |= X##_f[_i] << _up; \
+ for (_i = 0; _i < 3-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip] >> _down \
+ | X##_f[_i+_skip+1] << _up; \
+ X##_f[_i++] = X##_f[3] >> _down; \
+ } \
+ for (; _i < 4; ++_i) \
+ X##_f[_i] = 0; \
+ S = (_s != 0); \
+ } while (0)
+
+#define _FP_FRAC_SRS_4(X,N,size) \
+ do { \
+ int _sticky; \
+ _FP_FRAC_SRST_4(X, _sticky, N, size); \
+ X##_f[0] |= _sticky; \
+ } while (0)
+
+#define _FP_FRAC_ADD_4(R,X,Y) \
+ __FP_FRAC_ADD_4(R##_f[3], R##_f[2], R##_f[1], R##_f[0], \
+ X##_f[3], X##_f[2], X##_f[1], X##_f[0], \
+ Y##_f[3], Y##_f[2], Y##_f[1], Y##_f[0])
+
+#define _FP_FRAC_SUB_4(R,X,Y) \
+ __FP_FRAC_SUB_4(R##_f[3], R##_f[2], R##_f[1], R##_f[0], \
+ X##_f[3], X##_f[2], X##_f[1], X##_f[0], \
+ Y##_f[3], Y##_f[2], Y##_f[1], Y##_f[0])
+
+#define _FP_FRAC_DEC_4(X,Y) \
+ __FP_FRAC_DEC_4(X##_f[3], X##_f[2], X##_f[1], X##_f[0], \
+ Y##_f[3], Y##_f[2], Y##_f[1], Y##_f[0])
+
+#define _FP_FRAC_ADDI_4(X,I) \
+ __FP_FRAC_ADDI_4(X##_f[3], X##_f[2], X##_f[1], X##_f[0], I)
+
+#define _FP_ZEROFRAC_4 0,0,0,0
+#define _FP_MINFRAC_4 0,0,0,1
+#define _FP_MAXFRAC_4 (~(_FP_WS_TYPE)0), (~(_FP_WS_TYPE)0), (~(_FP_WS_TYPE)0), (~(_FP_WS_TYPE)0)
+
+#define _FP_FRAC_ZEROP_4(X) ((X##_f[0] | X##_f[1] | X##_f[2] | X##_f[3]) == 0)
+#define _FP_FRAC_NEGP_4(X) ((_FP_WS_TYPE)X##_f[3] < 0)
+#define _FP_FRAC_OVERP_4(fs,X) (_FP_FRAC_HIGH_##fs(X) & _FP_OVERFLOW_##fs)
+#define _FP_FRAC_CLEAR_OVERP_4(fs,X) (_FP_FRAC_HIGH_##fs(X) &= ~_FP_OVERFLOW_##fs)
+
+#define _FP_FRAC_EQ_4(X,Y) \
+ (X##_f[0] == Y##_f[0] && X##_f[1] == Y##_f[1] \
+ && X##_f[2] == Y##_f[2] && X##_f[3] == Y##_f[3])
+
+#define _FP_FRAC_GT_4(X,Y) \
+ (X##_f[3] > Y##_f[3] || \
+ (X##_f[3] == Y##_f[3] && (X##_f[2] > Y##_f[2] || \
+ (X##_f[2] == Y##_f[2] && (X##_f[1] > Y##_f[1] || \
+ (X##_f[1] == Y##_f[1] && X##_f[0] > Y##_f[0]) \
+ )) \
+ )) \
+ )
+
+#define _FP_FRAC_GE_4(X,Y) \
+ (X##_f[3] > Y##_f[3] || \
+ (X##_f[3] == Y##_f[3] && (X##_f[2] > Y##_f[2] || \
+ (X##_f[2] == Y##_f[2] && (X##_f[1] > Y##_f[1] || \
+ (X##_f[1] == Y##_f[1] && X##_f[0] >= Y##_f[0]) \
+ )) \
+ )) \
+ )
+
+
+#define _FP_FRAC_CLZ_4(R,X) \
+ do { \
+ if (X##_f[3]) \
+ { \
+ __FP_CLZ(R,X##_f[3]); \
+ } \
+ else if (X##_f[2]) \
+ { \
+ __FP_CLZ(R,X##_f[2]); \
+ R += _FP_W_TYPE_SIZE; \
+ } \
+ else if (X##_f[1]) \
+ { \
+ __FP_CLZ(R,X##_f[1]); \
+ R += _FP_W_TYPE_SIZE*2; \
+ } \
+ else \
+ { \
+ __FP_CLZ(R,X##_f[0]); \
+ R += _FP_W_TYPE_SIZE*3; \
+ } \
+ } while(0)
+
+
+#define _FP_UNPACK_RAW_4(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs _flo; _flo.flt = (val); \
+ X##_f[0] = _flo.bits.frac0; \
+ X##_f[1] = _flo.bits.frac1; \
+ X##_f[2] = _flo.bits.frac2; \
+ X##_f[3] = _flo.bits.frac3; \
+ X##_e = _flo.bits.exp; \
+ X##_s = _flo.bits.sign; \
+ } while (0)
+
+#define _FP_UNPACK_RAW_4_P(fs, X, val) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)(val); \
+ \
+ X##_f[0] = _flo->bits.frac0; \
+ X##_f[1] = _flo->bits.frac1; \
+ X##_f[2] = _flo->bits.frac2; \
+ X##_f[3] = _flo->bits.frac3; \
+ X##_e = _flo->bits.exp; \
+ X##_s = _flo->bits.sign; \
+ } while (0)
+
+#define _FP_PACK_RAW_4(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs _flo; \
+ _flo.bits.frac0 = X##_f[0]; \
+ _flo.bits.frac1 = X##_f[1]; \
+ _flo.bits.frac2 = X##_f[2]; \
+ _flo.bits.frac3 = X##_f[3]; \
+ _flo.bits.exp = X##_e; \
+ _flo.bits.sign = X##_s; \
+ (val) = _flo.flt; \
+ } while (0)
+
+#define _FP_PACK_RAW_4_P(fs, val, X) \
+ do { \
+ union _FP_UNION_##fs *_flo = \
+ (union _FP_UNION_##fs *)(val); \
+ \
+ _flo->bits.frac0 = X##_f[0]; \
+ _flo->bits.frac1 = X##_f[1]; \
+ _flo->bits.frac2 = X##_f[2]; \
+ _flo->bits.frac3 = X##_f[3]; \
+ _flo->bits.exp = X##_e; \
+ _flo->bits.sign = X##_s; \
+ } while (0)
+
+/*
+ * Multiplication algorithms:
+ */
+
+/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
+
+#define _FP_MUL_MEAT_4_wide(wfracbits, R, X, Y, doit) \
+ do { \
+ _FP_FRAC_DECL_8(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
+ _FP_FRAC_DECL_2(_d); _FP_FRAC_DECL_2(_e); _FP_FRAC_DECL_2(_f); \
+ \
+ doit(_FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0), X##_f[0], Y##_f[0]); \
+ doit(_b_f1, _b_f0, X##_f[0], Y##_f[1]); \
+ doit(_c_f1, _c_f0, X##_f[1], Y##_f[0]); \
+ doit(_d_f1, _d_f0, X##_f[1], Y##_f[1]); \
+ doit(_e_f1, _e_f0, X##_f[0], Y##_f[2]); \
+ doit(_f_f1, _f_f0, X##_f[2], Y##_f[0]); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,3),_FP_FRAC_WORD_8(_z,2), \
+ _FP_FRAC_WORD_8(_z,1), 0,_b_f1,_b_f0, \
+ 0,0,_FP_FRAC_WORD_8(_z,1)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,3),_FP_FRAC_WORD_8(_z,2), \
+ _FP_FRAC_WORD_8(_z,1), 0,_c_f1,_c_f0, \
+ _FP_FRAC_WORD_8(_z,3),_FP_FRAC_WORD_8(_z,2), \
+ _FP_FRAC_WORD_8(_z,1)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,4),_FP_FRAC_WORD_8(_z,3), \
+ _FP_FRAC_WORD_8(_z,2), 0,_d_f1,_d_f0, \
+ 0,_FP_FRAC_WORD_8(_z,3),_FP_FRAC_WORD_8(_z,2)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,4),_FP_FRAC_WORD_8(_z,3), \
+ _FP_FRAC_WORD_8(_z,2), 0,_e_f1,_e_f0, \
+ _FP_FRAC_WORD_8(_z,4),_FP_FRAC_WORD_8(_z,3), \
+ _FP_FRAC_WORD_8(_z,2)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,4),_FP_FRAC_WORD_8(_z,3), \
+ _FP_FRAC_WORD_8(_z,2), 0,_f_f1,_f_f0, \
+ _FP_FRAC_WORD_8(_z,4),_FP_FRAC_WORD_8(_z,3), \
+ _FP_FRAC_WORD_8(_z,2)); \
+ doit(_b_f1, _b_f0, X##_f[0], Y##_f[3]); \
+ doit(_c_f1, _c_f0, X##_f[3], Y##_f[0]); \
+ doit(_d_f1, _d_f0, X##_f[1], Y##_f[2]); \
+ doit(_e_f1, _e_f0, X##_f[2], Y##_f[1]); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3), 0,_b_f1,_b_f0, \
+ 0,_FP_FRAC_WORD_8(_z,4),_FP_FRAC_WORD_8(_z,3)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3), 0,_c_f1,_c_f0, \
+ _FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3), 0,_d_f1,_d_f0, \
+ _FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3), 0,_e_f1,_e_f0, \
+ _FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4), \
+ _FP_FRAC_WORD_8(_z,3)); \
+ doit(_b_f1, _b_f0, X##_f[2], Y##_f[2]); \
+ doit(_c_f1, _c_f0, X##_f[1], Y##_f[3]); \
+ doit(_d_f1, _d_f0, X##_f[3], Y##_f[1]); \
+ doit(_e_f1, _e_f0, X##_f[2], Y##_f[3]); \
+ doit(_f_f1, _f_f0, X##_f[3], Y##_f[2]); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,6),_FP_FRAC_WORD_8(_z,5), \
+ _FP_FRAC_WORD_8(_z,4), 0,_b_f1,_b_f0, \
+ 0,_FP_FRAC_WORD_8(_z,5),_FP_FRAC_WORD_8(_z,4)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,6),_FP_FRAC_WORD_8(_z,5), \
+ _FP_FRAC_WORD_8(_z,4), 0,_c_f1,_c_f0, \
+ _FP_FRAC_WORD_8(_z,6),_FP_FRAC_WORD_8(_z,5), \
+ _FP_FRAC_WORD_8(_z,4)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,6),_FP_FRAC_WORD_8(_z,5), \
+ _FP_FRAC_WORD_8(_z,4), 0,_d_f1,_d_f0, \
+ _FP_FRAC_WORD_8(_z,6),_FP_FRAC_WORD_8(_z,5), \
+ _FP_FRAC_WORD_8(_z,4)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,7),_FP_FRAC_WORD_8(_z,6), \
+ _FP_FRAC_WORD_8(_z,5), 0,_e_f1,_e_f0, \
+ 0,_FP_FRAC_WORD_8(_z,6),_FP_FRAC_WORD_8(_z,5)); \
+ __FP_FRAC_ADD_3(_FP_FRAC_WORD_8(_z,7),_FP_FRAC_WORD_8(_z,6), \
+ _FP_FRAC_WORD_8(_z,5), 0,_f_f1,_f_f0, \
+ _FP_FRAC_WORD_8(_z,7),_FP_FRAC_WORD_8(_z,6), \
+ _FP_FRAC_WORD_8(_z,5)); \
+ doit(_b_f1, _b_f0, X##_f[3], Y##_f[3]); \
+ __FP_FRAC_ADD_2(_FP_FRAC_WORD_8(_z,7),_FP_FRAC_WORD_8(_z,6), \
+ _b_f1,_b_f0, \
+ _FP_FRAC_WORD_8(_z,7),_FP_FRAC_WORD_8(_z,6)); \
+ \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_8(_z, wfracbits-1, 2*wfracbits); \
+ __FP_FRAC_SET_4(R, _FP_FRAC_WORD_8(_z,3), _FP_FRAC_WORD_8(_z,2), \
+ _FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0)); \
+ } while (0)
+
+#define _FP_MUL_MEAT_4_gmp(wfracbits, R, X, Y) \
+ do { \
+ _FP_FRAC_DECL_8(_z); \
+ \
+ mpn_mul_n(_z_f, _x_f, _y_f, 4); \
+ \
+ /* Normalize since we know where the msb of the multiplicands \
+ were (bit B), we know that the msb of the of the product is \
+ at either 2B or 2B-1. */ \
+ _FP_FRAC_SRS_8(_z, wfracbits-1, 2*wfracbits); \
+ __FP_FRAC_SET_4(R, _FP_FRAC_WORD_8(_z,3), _FP_FRAC_WORD_8(_z,2), \
+ _FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0)); \
+ } while (0)
+
+/*
+ * Helper utility for _FP_DIV_MEAT_4_udiv:
+ * pppp = m * nnn
+ */
+#define umul_ppppmnnn(p3,p2,p1,p0,m,n2,n1,n0) \
+ do { \
+ UWtype _t; \
+ umul_ppmm(p1,p0,m,n0); \
+ umul_ppmm(p2,_t,m,n1); \
+ __FP_FRAC_ADDI_2(p2,p1,_t); \
+ umul_ppmm(p3,_t,m,n2); \
+ __FP_FRAC_ADDI_2(p3,p2,_t); \
+ } while (0)
+
+/*
+ * Division algorithms:
+ */
+
+#define _FP_DIV_MEAT_4_udiv(fs, R, X, Y) \
+ do { \
+ int _i; \
+ _FP_FRAC_DECL_4(_n); _FP_FRAC_DECL_4(_m); \
+ _FP_FRAC_SET_4(_n, _FP_ZEROFRAC_4); \
+ if (_FP_FRAC_GT_4(X, Y)) \
+ { \
+ _n_f[3] = X##_f[0] << (_FP_W_TYPE_SIZE - 1); \
+ _FP_FRAC_SRL_4(X, 1); \
+ } \
+ else \
+ R##_e--; \
+ \
+ /* Normalize, i.e. make the most significant bit of the \
+ denominator set. */ \
+ _FP_FRAC_SLL_4(Y, _FP_WFRACXBITS_##fs); \
+ \
+ for (_i = 3; ; _i--) \
+ { \
+ if (X##_f[3] == Y##_f[3]) \
+ { \
+ /* This is a special case, not an optimization \
+ (X##_f[3]/Y##_f[3] would not fit into UWtype). \
+ As X## is guaranteed to be < Y, R##_f[_i] can be either \
+ (UWtype)-1 or (UWtype)-2. */ \
+ R##_f[_i] = -1; \
+ if (!_i) \
+ break; \
+ __FP_FRAC_SUB_4(X##_f[3], X##_f[2], X##_f[1], X##_f[0], \
+ Y##_f[2], Y##_f[1], Y##_f[0], 0, \
+ X##_f[2], X##_f[1], X##_f[0], _n_f[_i]); \
+ _FP_FRAC_SUB_4(X, Y, X); \
+ if (X##_f[3] > Y##_f[3]) \
+ { \
+ R##_f[_i] = -2; \
+ _FP_FRAC_ADD_4(X, Y, X); \
+ } \
+ } \
+ else \
+ { \
+ udiv_qrnnd(R##_f[_i], X##_f[3], X##_f[3], X##_f[2], Y##_f[3]); \
+ umul_ppppmnnn(_m_f[3], _m_f[2], _m_f[1], _m_f[0], \
+ R##_f[_i], Y##_f[2], Y##_f[1], Y##_f[0]); \
+ X##_f[2] = X##_f[1]; \
+ X##_f[1] = X##_f[0]; \
+ X##_f[0] = _n_f[_i]; \
+ if (_FP_FRAC_GT_4(_m, X)) \
+ { \
+ R##_f[_i]--; \
+ _FP_FRAC_ADD_4(X, Y, X); \
+ if (_FP_FRAC_GE_4(X, Y) && _FP_FRAC_GT_4(_m, X)) \
+ { \
+ R##_f[_i]--; \
+ _FP_FRAC_ADD_4(X, Y, X); \
+ } \
+ } \
+ _FP_FRAC_DEC_4(X, _m); \
+ if (!_i) \
+ { \
+ if (!_FP_FRAC_EQ_4(X, _m)) \
+ R##_f[0] |= _FP_WORK_STICKY; \
+ break; \
+ } \
+ } \
+ } \
+ } while (0)
+
+
+/*
+ * Square root algorithms:
+ * We have just one right now, maybe Newton approximation
+ * should be added for those machines where division is fast.
+ */
+
+#define _FP_SQRT_MEAT_4(R, S, T, X, q) \
+ do { \
+ while (q) \
+ { \
+ T##_f[3] = S##_f[3] + q; \
+ if (T##_f[3] <= X##_f[3]) \
+ { \
+ S##_f[3] = T##_f[3] + q; \
+ X##_f[3] -= T##_f[3]; \
+ R##_f[3] += q; \
+ } \
+ _FP_FRAC_SLL_4(X, 1); \
+ q >>= 1; \
+ } \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ while (q) \
+ { \
+ T##_f[2] = S##_f[2] + q; \
+ T##_f[3] = S##_f[3]; \
+ if (T##_f[3] < X##_f[3] || \
+ (T##_f[3] == X##_f[3] && T##_f[2] <= X##_f[2])) \
+ { \
+ S##_f[2] = T##_f[2] + q; \
+ S##_f[3] += (T##_f[2] > S##_f[2]); \
+ __FP_FRAC_DEC_2(X##_f[3], X##_f[2], \
+ T##_f[3], T##_f[2]); \
+ R##_f[2] += q; \
+ } \
+ _FP_FRAC_SLL_4(X, 1); \
+ q >>= 1; \
+ } \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ while (q) \
+ { \
+ T##_f[1] = S##_f[1] + q; \
+ T##_f[2] = S##_f[2]; \
+ T##_f[3] = S##_f[3]; \
+ if (T##_f[3] < X##_f[3] || \
+ (T##_f[3] == X##_f[3] && (T##_f[2] < X##_f[2] || \
+ (T##_f[2] == X##_f[2] && T##_f[1] <= X##_f[1])))) \
+ { \
+ S##_f[1] = T##_f[1] + q; \
+ S##_f[2] += (T##_f[1] > S##_f[1]); \
+ S##_f[3] += (T##_f[2] > S##_f[2]); \
+ __FP_FRAC_DEC_3(X##_f[3], X##_f[2], X##_f[1], \
+ T##_f[3], T##_f[2], T##_f[1]); \
+ R##_f[1] += q; \
+ } \
+ _FP_FRAC_SLL_4(X, 1); \
+ q >>= 1; \
+ } \
+ q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
+ while (q != _FP_WORK_ROUND) \
+ { \
+ T##_f[0] = S##_f[0] + q; \
+ T##_f[1] = S##_f[1]; \
+ T##_f[2] = S##_f[2]; \
+ T##_f[3] = S##_f[3]; \
+ if (_FP_FRAC_GE_4(X,T)) \
+ { \
+ S##_f[0] = T##_f[0] + q; \
+ S##_f[1] += (T##_f[0] > S##_f[0]); \
+ S##_f[2] += (T##_f[1] > S##_f[1]); \
+ S##_f[3] += (T##_f[2] > S##_f[2]); \
+ _FP_FRAC_DEC_4(X, T); \
+ R##_f[0] += q; \
+ } \
+ _FP_FRAC_SLL_4(X, 1); \
+ q >>= 1; \
+ } \
+ if (!_FP_FRAC_ZEROP_4(X)) \
+ { \
+ if (_FP_FRAC_GT_4(X,S)) \
+ R##_f[0] |= _FP_WORK_ROUND; \
+ R##_f[0] |= _FP_WORK_STICKY; \
+ } \
+ } while (0)
+
+
+/*
+ * Internals
+ */
+
+#define __FP_FRAC_SET_4(X,I3,I2,I1,I0) \
+ (X##_f[3] = I3, X##_f[2] = I2, X##_f[1] = I1, X##_f[0] = I0)
+
+#ifndef __FP_FRAC_ADD_3
+#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
+ do { \
+ _FP_W_TYPE _c1, _c2; \
+ r0 = x0 + y0; \
+ _c1 = r0 < x0; \
+ r1 = x1 + y1; \
+ _c2 = r1 < x1; \
+ r1 += _c1; \
+ _c2 |= r1 < _c1; \
+ r2 = x2 + y2 + _c2; \
+ } while (0)
+#endif
+
+#ifndef __FP_FRAC_ADD_4
+#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
+ do { \
+ _FP_W_TYPE _c1, _c2, _c3; \
+ r0 = x0 + y0; \
+ _c1 = r0 < x0; \
+ r1 = x1 + y1; \
+ _c2 = r1 < x1; \
+ r1 += _c1; \
+ _c2 |= r1 < _c1; \
+ r2 = x2 + y2; \
+ _c3 = r2 < x2; \
+ r2 += _c2; \
+ _c3 |= r2 < _c2; \
+ r3 = x3 + y3 + _c3; \
+ } while (0)
+#endif
+
+#ifndef __FP_FRAC_SUB_3
+#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
+ do { \
+ _FP_W_TYPE _c1, _c2; \
+ r0 = x0 - y0; \
+ _c1 = r0 > x0; \
+ r1 = x1 - y1; \
+ _c2 = r1 > x1; \
+ r1 -= _c1; \
+ _c2 |= _c1 && (y1 == x1); \
+ r2 = x2 - y2 - _c2; \
+ } while (0)
+#endif
+
+#ifndef __FP_FRAC_SUB_4
+#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
+ do { \
+ _FP_W_TYPE _c1, _c2, _c3; \
+ r0 = x0 - y0; \
+ _c1 = r0 > x0; \
+ r1 = x1 - y1; \
+ _c2 = r1 > x1; \
+ r1 -= _c1; \
+ _c2 |= _c1 && (y1 == x1); \
+ r2 = x2 - y2; \
+ _c3 = r2 > x2; \
+ r2 -= _c2; \
+ _c3 |= _c2 && (y2 == x2); \
+ r3 = x3 - y3 - _c3; \
+ } while (0)
+#endif
+
+#ifndef __FP_FRAC_DEC_3
+#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) \
+ do { \
+ UWtype _t0, _t1, _t2; \
+ _t0 = x0, _t1 = x1, _t2 = x2; \
+ __FP_FRAC_SUB_3 (x2, x1, x0, _t2, _t1, _t0, y2, y1, y0); \
+ } while (0)
+#endif
+
+#ifndef __FP_FRAC_DEC_4
+#define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) \
+ do { \
+ UWtype _t0, _t1, _t2, _t3; \
+ _t0 = x0, _t1 = x1, _t2 = x2, _t3 = x3; \
+ __FP_FRAC_SUB_4 (x3,x2,x1,x0,_t3,_t2,_t1,_t0, y3,y2,y1,y0); \
+ } while (0)
+#endif
+
+#ifndef __FP_FRAC_ADDI_4
+#define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \
+ do { \
+ UWtype _t; \
+ _t = ((x0 += i) < i); \
+ x1 += _t; _t = (x1 < _t); \
+ x2 += _t; _t = (x2 < _t); \
+ x3 += _t; \
+ } while (0)
+#endif
+
+/* Convert FP values between word sizes. This appears to be more
+ * complicated than I'd have expected it to be, so these might be
+ * wrong... These macros are in any case somewhat bogus because they
+ * use information about what various FRAC_n variables look like
+ * internally [eg, that 2 word vars are X_f0 and x_f1]. But so do
+ * the ones in op-2.h and op-1.h.
+ */
+#define _FP_FRAC_COPY_1_4(D, S) (D##_f = S##_f[0])
+
+#define _FP_FRAC_COPY_2_4(D, S) \
+do { \
+ D##_f0 = S##_f[0]; \
+ D##_f1 = S##_f[1]; \
+} while (0)
+
+/* Assembly/disassembly for converting to/from integral types.
+ * No shifting or overflow handled here.
+ */
+/* Put the FP value X into r, which is an integer of size rsize. */
+#define _FP_FRAC_ASSEMBLE_4(r, X, rsize) \
+ do { \
+ if (rsize <= _FP_W_TYPE_SIZE) \
+ r = X##_f[0]; \
+ else if (rsize <= 2*_FP_W_TYPE_SIZE) \
+ { \
+ r = X##_f[1]; \
+ r <<= _FP_W_TYPE_SIZE; \
+ r += X##_f[0]; \
+ } \
+ else \
+ { \
+ /* I'm feeling lazy so we deal with int == 3words (implausible)*/ \
+ /* and int == 4words as a single case. */ \
+ r = X##_f[3]; \
+ r <<= _FP_W_TYPE_SIZE; \
+ r += X##_f[2]; \
+ r <<= _FP_W_TYPE_SIZE; \
+ r += X##_f[1]; \
+ r <<= _FP_W_TYPE_SIZE; \
+ r += X##_f[0]; \
+ } \
+ } while (0)
+
+/* "No disassemble Number Five!" */
+/* move an integer of size rsize into X's fractional part. We rely on
+ * the _f[] array consisting of words of size _FP_W_TYPE_SIZE to avoid
+ * having to mask the values we store into it.
+ */
+#define _FP_FRAC_DISASSEMBLE_4(X, r, rsize) \
+ do { \
+ X##_f[0] = r; \
+ X##_f[1] = (rsize <= _FP_W_TYPE_SIZE ? 0 : r >> _FP_W_TYPE_SIZE); \
+ X##_f[2] = (rsize <= 2*_FP_W_TYPE_SIZE ? 0 : r >> 2*_FP_W_TYPE_SIZE); \
+ X##_f[3] = (rsize <= 3*_FP_W_TYPE_SIZE ? 0 : r >> 3*_FP_W_TYPE_SIZE); \
+ } while (0);
+
+#define _FP_FRAC_COPY_4_1(D, S) \
+do { \
+ D##_f[0] = S##_f; \
+ D##_f[1] = D##_f[2] = D##_f[3] = 0; \
+} while (0)
+
+#define _FP_FRAC_COPY_4_2(D, S) \
+do { \
+ D##_f[0] = S##_f0; \
+ D##_f[1] = S##_f1; \
+ D##_f[2] = D##_f[3] = 0; \
+} while (0)
+
+#define _FP_FRAC_COPY_4_4(D,S) _FP_FRAC_COPY_4(D,S)
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-8.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-8.h
new file mode 100644
index 000000000..e0612a5e6
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-8.h
@@ -0,0 +1,111 @@
+/* Software floating-point emulation.
+ Basic eight-word fraction declaration and manipulation.
+ Copyright (C) 1997,1998,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* We need just a few things from here for op-4, if we ever need some
+ other macros, they can be added. */
+#define _FP_FRAC_DECL_8(X) _FP_W_TYPE X##_f[8]
+#define _FP_FRAC_HIGH_8(X) (X##_f[7])
+#define _FP_FRAC_LOW_8(X) (X##_f[0])
+#define _FP_FRAC_WORD_8(X,w) (X##_f[w])
+
+#define _FP_FRAC_SLL_8(X,N) \
+ do { \
+ _FP_I_TYPE _up, _down, _skip, _i; \
+ _skip = (N) / _FP_W_TYPE_SIZE; \
+ _up = (N) % _FP_W_TYPE_SIZE; \
+ _down = _FP_W_TYPE_SIZE - _up; \
+ if (!_up) \
+ for (_i = 7; _i >= _skip; --_i) \
+ X##_f[_i] = X##_f[_i-_skip]; \
+ else \
+ { \
+ for (_i = 7; _i > _skip; --_i) \
+ X##_f[_i] = X##_f[_i-_skip] << _up \
+ | X##_f[_i-_skip-1] >> _down; \
+ X##_f[_i--] = X##_f[0] << _up; \
+ } \
+ for (; _i >= 0; --_i) \
+ X##_f[_i] = 0; \
+ } while (0)
+
+#define _FP_FRAC_SRL_8(X,N) \
+ do { \
+ _FP_I_TYPE _up, _down, _skip, _i; \
+ _skip = (N) / _FP_W_TYPE_SIZE; \
+ _down = (N) % _FP_W_TYPE_SIZE; \
+ _up = _FP_W_TYPE_SIZE - _down; \
+ if (!_down) \
+ for (_i = 0; _i <= 7-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip]; \
+ else \
+ { \
+ for (_i = 0; _i < 7-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip] >> _down \
+ | X##_f[_i+_skip+1] << _up; \
+ X##_f[_i++] = X##_f[7] >> _down; \
+ } \
+ for (; _i < 8; ++_i) \
+ X##_f[_i] = 0; \
+ } while (0)
+
+
+/* Right shift with sticky-lsb.
+ * What this actually means is that we do a standard right-shift,
+ * but that if any of the bits that fall off the right hand side
+ * were one then we always set the LSbit.
+ */
+#define _FP_FRAC_SRS_8(X,N,size) \
+ do { \
+ _FP_I_TYPE _up, _down, _skip, _i; \
+ _FP_W_TYPE _s; \
+ _skip = (N) / _FP_W_TYPE_SIZE; \
+ _down = (N) % _FP_W_TYPE_SIZE; \
+ _up = _FP_W_TYPE_SIZE - _down; \
+ for (_s = _i = 0; _i < _skip; ++_i) \
+ _s |= X##_f[_i]; \
+ if (!_down) \
+ for (_i = 0; _i <= 7-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip]; \
+ else \
+ { \
+ _s |= X##_f[_i] << _up; \
+ for (_i = 0; _i < 7-_skip; ++_i) \
+ X##_f[_i] = X##_f[_i+_skip] >> _down \
+ | X##_f[_i+_skip+1] << _up; \
+ X##_f[_i++] = X##_f[7] >> _down; \
+ } \
+ for (; _i < 8; ++_i) \
+ X##_f[_i] = 0; \
+ /* don't fix the LSB until the very end when we're sure f[0] is stable */ \
+ X##_f[0] |= (_s != 0); \
+ } while (0)
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-common.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-common.h
new file mode 100644
index 000000000..ef11b527b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/op-common.h
@@ -0,0 +1,1359 @@
+/* Software floating-point emulation. Common operations.
+ Copyright (C) 1997,1998,1999,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#define _FP_DECL(wc, X) \
+ _FP_I_TYPE X##_c __attribute__((unused)), X##_s, X##_e; \
+ _FP_FRAC_DECL_##wc(X)
+
+/*
+ * Finish truely unpacking a native fp value by classifying the kind
+ * of fp value and normalizing both the exponent and the fraction.
+ */
+
+#define _FP_UNPACK_CANONICAL(fs, wc, X) \
+do { \
+ switch (X##_e) \
+ { \
+ default: \
+ _FP_FRAC_HIGH_RAW_##fs(X) |= _FP_IMPLBIT_##fs; \
+ _FP_FRAC_SLL_##wc(X, _FP_WORKBITS); \
+ X##_e -= _FP_EXPBIAS_##fs; \
+ X##_c = FP_CLS_NORMAL; \
+ break; \
+ \
+ case 0: \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ X##_c = FP_CLS_ZERO; \
+ else \
+ { \
+ /* a denormalized number */ \
+ _FP_I_TYPE _shift; \
+ _FP_FRAC_CLZ_##wc(_shift, X); \
+ _shift -= _FP_FRACXBITS_##fs; \
+ _FP_FRAC_SLL_##wc(X, (_shift+_FP_WORKBITS)); \
+ X##_e -= _FP_EXPBIAS_##fs - 1 + _shift; \
+ X##_c = FP_CLS_NORMAL; \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ } \
+ break; \
+ \
+ case _FP_EXPMAX_##fs: \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ X##_c = FP_CLS_INF; \
+ else \
+ { \
+ X##_c = FP_CLS_NAN; \
+ /* Check for signaling NaN */ \
+ if (!(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ } \
+ break; \
+ } \
+} while (0)
+
+/* Finish unpacking an fp value in semi-raw mode: the mantissa is
+ shifted by _FP_WORKBITS but the implicit MSB is not inserted and
+ other classification is not done. */
+#define _FP_UNPACK_SEMIRAW(fs, wc, X) _FP_FRAC_SLL_##wc(X, _FP_WORKBITS)
+
+/* A semi-raw value has overflowed to infinity. Adjust the mantissa
+ and exponent appropriately. */
+#define _FP_OVERFLOW_SEMIRAW(fs, wc, X) \
+do { \
+ if (FP_ROUNDMODE == FP_RND_NEAREST \
+ || (FP_ROUNDMODE == FP_RND_PINF && !X##_s) \
+ || (FP_ROUNDMODE == FP_RND_MINF && X##_s)) \
+ { \
+ X##_e = _FP_EXPMAX_##fs; \
+ _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
+ } \
+ else \
+ { \
+ X##_e = _FP_EXPMAX_##fs - 1; \
+ _FP_FRAC_SET_##wc(X, _FP_MAXFRAC_##wc); \
+ } \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ FP_SET_EXCEPTION(FP_EX_OVERFLOW); \
+} while (0)
+
+/* Check for a semi-raw value being a signaling NaN and raise the
+ invalid exception if so. */
+#define _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X) \
+do { \
+ if (X##_e == _FP_EXPMAX_##fs \
+ && !_FP_FRAC_ZEROP_##wc(X) \
+ && !(_FP_FRAC_HIGH_##fs(X) & _FP_QNANBIT_SH_##fs)) \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+} while (0)
+
+/* Choose a NaN result from an operation on two semi-raw NaN
+ values. */
+#define _FP_CHOOSENAN_SEMIRAW(fs, wc, R, X, Y, OP) \
+do { \
+ /* _FP_CHOOSENAN expects raw values, so shift as required. */ \
+ _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
+ _FP_FRAC_SRL_##wc(Y, _FP_WORKBITS); \
+ _FP_CHOOSENAN(fs, wc, R, X, Y, OP); \
+ _FP_FRAC_SLL_##wc(R, _FP_WORKBITS); \
+} while (0)
+
+/* Test whether a biased exponent is normal (not zero or maximum). */
+#define _FP_EXP_NORMAL(fs, wc, X) (((X##_e + 1) & _FP_EXPMAX_##fs) > 1)
+
+/* Prepare to pack an fp value in semi-raw mode: the mantissa is
+ rounded and shifted right, with the rounding possibly increasing
+ the exponent (including changing a finite value to infinity). */
+#define _FP_PACK_SEMIRAW(fs, wc, X) \
+do { \
+ _FP_ROUND(wc, X); \
+ if (_FP_FRAC_HIGH_##fs(X) \
+ & (_FP_OVERFLOW_##fs >> 1)) \
+ { \
+ _FP_FRAC_HIGH_##fs(X) &= ~(_FP_OVERFLOW_##fs >> 1); \
+ X##_e++; \
+ if (X##_e == _FP_EXPMAX_##fs) \
+ _FP_OVERFLOW_SEMIRAW(fs, wc, X); \
+ } \
+ _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
+ if (!_FP_EXP_NORMAL(fs, wc, X) && !_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ if (X##_e == 0) \
+ FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \
+ else \
+ { \
+ if (!_FP_KEEPNANFRACP) \
+ { \
+ _FP_FRAC_SET_##wc(X, _FP_NANFRAC_##fs); \
+ X##_s = _FP_NANSIGN_##fs; \
+ } \
+ else \
+ _FP_FRAC_HIGH_RAW_##fs(X) |= _FP_QNANBIT_##fs; \
+ } \
+ } \
+} while (0)
+
+/*
+ * Before packing the bits back into the native fp result, take care
+ * of such mundane things as rounding and overflow. Also, for some
+ * kinds of fp values, the original parts may not have been fully
+ * extracted -- but that is ok, we can regenerate them now.
+ */
+
+#define _FP_PACK_CANONICAL(fs, wc, X) \
+do { \
+ switch (X##_c) \
+ { \
+ case FP_CLS_NORMAL: \
+ X##_e += _FP_EXPBIAS_##fs; \
+ if (X##_e > 0) \
+ { \
+ _FP_ROUND(wc, X); \
+ if (_FP_FRAC_OVERP_##wc(fs, X)) \
+ { \
+ _FP_FRAC_CLEAR_OVERP_##wc(fs, X); \
+ X##_e++; \
+ } \
+ _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
+ if (X##_e >= _FP_EXPMAX_##fs) \
+ { \
+ /* overflow */ \
+ switch (FP_ROUNDMODE) \
+ { \
+ case FP_RND_NEAREST: \
+ X##_c = FP_CLS_INF; \
+ break; \
+ case FP_RND_PINF: \
+ if (!X##_s) X##_c = FP_CLS_INF; \
+ break; \
+ case FP_RND_MINF: \
+ if (X##_s) X##_c = FP_CLS_INF; \
+ break; \
+ } \
+ if (X##_c == FP_CLS_INF) \
+ { \
+ /* Overflow to infinity */ \
+ X##_e = _FP_EXPMAX_##fs; \
+ _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
+ } \
+ else \
+ { \
+ /* Overflow to maximum normal */ \
+ X##_e = _FP_EXPMAX_##fs - 1; \
+ _FP_FRAC_SET_##wc(X, _FP_MAXFRAC_##wc); \
+ } \
+ FP_SET_EXCEPTION(FP_EX_OVERFLOW); \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ } \
+ } \
+ else \
+ { \
+ /* we've got a denormalized number */ \
+ X##_e = -X##_e + 1; \
+ if (X##_e <= _FP_WFRACBITS_##fs) \
+ { \
+ _FP_FRAC_SRS_##wc(X, X##_e, _FP_WFRACBITS_##fs); \
+ _FP_ROUND(wc, X); \
+ if (_FP_FRAC_HIGH_##fs(X) \
+ & (_FP_OVERFLOW_##fs >> 1)) \
+ { \
+ X##_e = 1; \
+ _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
+ } \
+ else \
+ { \
+ X##_e = 0; \
+ _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
+ FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \
+ } \
+ } \
+ else \
+ { \
+ /* underflow to zero */ \
+ X##_e = 0; \
+ if (!_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \
+ _FP_ROUND(wc, X); \
+ _FP_FRAC_LOW_##wc(X) >>= (_FP_WORKBITS); \
+ } \
+ FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \
+ } \
+ } \
+ break; \
+ \
+ case FP_CLS_ZERO: \
+ X##_e = 0; \
+ _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
+ break; \
+ \
+ case FP_CLS_INF: \
+ X##_e = _FP_EXPMAX_##fs; \
+ _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
+ break; \
+ \
+ case FP_CLS_NAN: \
+ X##_e = _FP_EXPMAX_##fs; \
+ if (!_FP_KEEPNANFRACP) \
+ { \
+ _FP_FRAC_SET_##wc(X, _FP_NANFRAC_##fs); \
+ X##_s = _FP_NANSIGN_##fs; \
+ } \
+ else \
+ _FP_FRAC_HIGH_RAW_##fs(X) |= _FP_QNANBIT_##fs; \
+ break; \
+ } \
+} while (0)
+
+/* This one accepts raw argument and not cooked, returns
+ * 1 if X is a signaling NaN.
+ */
+#define _FP_ISSIGNAN(fs, wc, X) \
+({ \
+ int __ret = 0; \
+ if (X##_e == _FP_EXPMAX_##fs) \
+ { \
+ if (!_FP_FRAC_ZEROP_##wc(X) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
+ __ret = 1; \
+ } \
+ __ret; \
+})
+
+
+
+
+
+/* Addition on semi-raw values. */
+#define _FP_ADD_INTERNAL(fs, wc, R, X, Y, OP) \
+do { \
+ if (X##_s == Y##_s) \
+ { \
+ /* Addition. */ \
+ R##_s = X##_s; \
+ int ediff = X##_e - Y##_e; \
+ if (ediff > 0) \
+ { \
+ R##_e = X##_e; \
+ if (Y##_e == 0) \
+ { \
+ /* Y is zero or denormalized. */ \
+ if (_FP_FRAC_ZEROP_##wc(Y)) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto add_done; \
+ } \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ ediff--; \
+ if (ediff == 0) \
+ { \
+ _FP_FRAC_ADD_##wc(R, X, Y); \
+ goto add3; \
+ } \
+ if (X##_e == _FP_EXPMAX_##fs) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto add_done; \
+ } \
+ goto add1; \
+ } \
+ } \
+ else if (X##_e == _FP_EXPMAX_##fs) \
+ { \
+ /* X is NaN or Inf, Y is normal. */ \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto add_done; \
+ } \
+ \
+ /* Insert implicit MSB of Y. */ \
+ _FP_FRAC_HIGH_##fs(Y) |= _FP_IMPLBIT_SH_##fs; \
+ \
+ add1: \
+ /* Shift the mantissa of Y to the right EDIFF steps; \
+ remember to account later for the implicit MSB of X. */ \
+ if (ediff <= _FP_WFRACBITS_##fs) \
+ _FP_FRAC_SRS_##wc(Y, ediff, _FP_WFRACBITS_##fs); \
+ else if (!_FP_FRAC_ZEROP_##wc(Y)) \
+ _FP_FRAC_SET_##wc(Y, _FP_MINFRAC_##wc); \
+ _FP_FRAC_ADD_##wc(R, X, Y); \
+ } \
+ else if (ediff < 0) \
+ { \
+ ediff = -ediff; \
+ R##_e = Y##_e; \
+ if (X##_e == 0) \
+ { \
+ /* X is zero or denormalized. */ \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto add_done; \
+ } \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ ediff--; \
+ if (ediff == 0) \
+ { \
+ _FP_FRAC_ADD_##wc(R, Y, X); \
+ goto add3; \
+ } \
+ if (Y##_e == _FP_EXPMAX_##fs) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto add_done; \
+ } \
+ goto add2; \
+ } \
+ } \
+ else if (Y##_e == _FP_EXPMAX_##fs) \
+ { \
+ /* Y is NaN or Inf, X is normal. */ \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto add_done; \
+ } \
+ \
+ /* Insert implicit MSB of X. */ \
+ _FP_FRAC_HIGH_##fs(X) |= _FP_IMPLBIT_SH_##fs; \
+ \
+ add2: \
+ /* Shift the mantissa of X to the right EDIFF steps; \
+ remember to account later for the implicit MSB of Y. */ \
+ if (ediff <= _FP_WFRACBITS_##fs) \
+ _FP_FRAC_SRS_##wc(X, ediff, _FP_WFRACBITS_##fs); \
+ else if (!_FP_FRAC_ZEROP_##wc(X)) \
+ _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \
+ _FP_FRAC_ADD_##wc(R, Y, X); \
+ } \
+ else \
+ { \
+ /* ediff == 0. */ \
+ if (!_FP_EXP_NORMAL(fs, wc, X)) \
+ { \
+ if (X##_e == 0) \
+ { \
+ /* X and Y are zero or denormalized. */ \
+ R##_e = 0; \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ if (!_FP_FRAC_ZEROP_##wc(Y)) \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto add_done; \
+ } \
+ else if (_FP_FRAC_ZEROP_##wc(Y)) \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto add_done; \
+ } \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_ADD_##wc(R, X, Y); \
+ if (_FP_FRAC_HIGH_##fs(R) & _FP_IMPLBIT_SH_##fs) \
+ { \
+ /* Normalized result. */ \
+ _FP_FRAC_HIGH_##fs(R) \
+ &= ~(_FP_W_TYPE)_FP_IMPLBIT_SH_##fs; \
+ R##_e = 1; \
+ } \
+ goto add_done; \
+ } \
+ } \
+ else \
+ { \
+ /* X and Y are NaN or Inf. */ \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ R##_e = _FP_EXPMAX_##fs; \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ else if (_FP_FRAC_ZEROP_##wc(Y)) \
+ _FP_FRAC_COPY_##wc(R, X); \
+ else \
+ _FP_CHOOSENAN_SEMIRAW(fs, wc, R, X, Y, OP); \
+ goto add_done; \
+ } \
+ } \
+ /* The exponents of X and Y, both normal, are equal. The \
+ implicit MSBs will always add to increase the \
+ exponent. */ \
+ _FP_FRAC_ADD_##wc(R, X, Y); \
+ R##_e = X##_e + 1; \
+ _FP_FRAC_SRS_##wc(R, 1, _FP_WFRACBITS_##fs); \
+ if (R##_e == _FP_EXPMAX_##fs) \
+ /* Overflow to infinity (depending on rounding mode). */ \
+ _FP_OVERFLOW_SEMIRAW(fs, wc, R); \
+ goto add_done; \
+ } \
+ add3: \
+ if (_FP_FRAC_HIGH_##fs(R) & _FP_IMPLBIT_SH_##fs) \
+ { \
+ /* Overflow. */ \
+ _FP_FRAC_HIGH_##fs(R) &= ~(_FP_W_TYPE)_FP_IMPLBIT_SH_##fs; \
+ R##_e++; \
+ _FP_FRAC_SRS_##wc(R, 1, _FP_WFRACBITS_##fs); \
+ if (R##_e == _FP_EXPMAX_##fs) \
+ /* Overflow to infinity (depending on rounding mode). */ \
+ _FP_OVERFLOW_SEMIRAW(fs, wc, R); \
+ } \
+ add_done: ; \
+ } \
+ else \
+ { \
+ /* Subtraction. */ \
+ int ediff = X##_e - Y##_e; \
+ if (ediff > 0) \
+ { \
+ R##_e = X##_e; \
+ R##_s = X##_s; \
+ if (Y##_e == 0) \
+ { \
+ /* Y is zero or denormalized. */ \
+ if (_FP_FRAC_ZEROP_##wc(Y)) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto sub_done; \
+ } \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ ediff--; \
+ if (ediff == 0) \
+ { \
+ _FP_FRAC_SUB_##wc(R, X, Y); \
+ goto sub3; \
+ } \
+ if (X##_e == _FP_EXPMAX_##fs) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto sub_done; \
+ } \
+ goto sub1; \
+ } \
+ } \
+ else if (X##_e == _FP_EXPMAX_##fs) \
+ { \
+ /* X is NaN or Inf, Y is normal. */ \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ goto sub_done; \
+ } \
+ \
+ /* Insert implicit MSB of Y. */ \
+ _FP_FRAC_HIGH_##fs(Y) |= _FP_IMPLBIT_SH_##fs; \
+ \
+ sub1: \
+ /* Shift the mantissa of Y to the right EDIFF steps; \
+ remember to account later for the implicit MSB of X. */ \
+ if (ediff <= _FP_WFRACBITS_##fs) \
+ _FP_FRAC_SRS_##wc(Y, ediff, _FP_WFRACBITS_##fs); \
+ else if (!_FP_FRAC_ZEROP_##wc(Y)) \
+ _FP_FRAC_SET_##wc(Y, _FP_MINFRAC_##wc); \
+ _FP_FRAC_SUB_##wc(R, X, Y); \
+ } \
+ else if (ediff < 0) \
+ { \
+ ediff = -ediff; \
+ R##_e = Y##_e; \
+ R##_s = Y##_s; \
+ if (X##_e == 0) \
+ { \
+ /* X is zero or denormalized. */ \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto sub_done; \
+ } \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ ediff--; \
+ if (ediff == 0) \
+ { \
+ _FP_FRAC_SUB_##wc(R, Y, X); \
+ goto sub3; \
+ } \
+ if (Y##_e == _FP_EXPMAX_##fs) \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto sub_done; \
+ } \
+ goto sub2; \
+ } \
+ } \
+ else if (Y##_e == _FP_EXPMAX_##fs) \
+ { \
+ /* Y is NaN or Inf, X is normal. */ \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ goto sub_done; \
+ } \
+ \
+ /* Insert implicit MSB of X. */ \
+ _FP_FRAC_HIGH_##fs(X) |= _FP_IMPLBIT_SH_##fs; \
+ \
+ sub2: \
+ /* Shift the mantissa of X to the right EDIFF steps; \
+ remember to account later for the implicit MSB of Y. */ \
+ if (ediff <= _FP_WFRACBITS_##fs) \
+ _FP_FRAC_SRS_##wc(X, ediff, _FP_WFRACBITS_##fs); \
+ else if (!_FP_FRAC_ZEROP_##wc(X)) \
+ _FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \
+ _FP_FRAC_SUB_##wc(R, Y, X); \
+ } \
+ else \
+ { \
+ /* ediff == 0. */ \
+ if (!_FP_EXP_NORMAL(fs, wc, X)) \
+ { \
+ if (X##_e == 0) \
+ { \
+ /* X and Y are zero or denormalized. */ \
+ R##_e = 0; \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ if (_FP_FRAC_ZEROP_##wc(Y)) \
+ R##_s = (FP_ROUNDMODE == FP_RND_MINF); \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ R##_s = Y##_s; \
+ } \
+ goto sub_done; \
+ } \
+ else if (_FP_FRAC_ZEROP_##wc(Y)) \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_COPY_##wc(R, X); \
+ R##_s = X##_s; \
+ goto sub_done; \
+ } \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_SUB_##wc(R, X, Y); \
+ R##_s = X##_s; \
+ if (_FP_FRAC_HIGH_##fs(R) & _FP_IMPLBIT_SH_##fs) \
+ { \
+ /* |X| < |Y|, negate result. */ \
+ _FP_FRAC_SUB_##wc(R, Y, X); \
+ R##_s = Y##_s; \
+ } \
+ else if (_FP_FRAC_ZEROP_##wc(R)) \
+ R##_s = (FP_ROUNDMODE == FP_RND_MINF); \
+ goto sub_done; \
+ } \
+ } \
+ else \
+ { \
+ /* X and Y are NaN or Inf, of opposite signs. */ \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, X); \
+ _FP_CHECK_SIGNAN_SEMIRAW(fs, wc, Y); \
+ R##_e = _FP_EXPMAX_##fs; \
+ if (_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ if (_FP_FRAC_ZEROP_##wc(Y)) \
+ { \
+ /* Inf - Inf. */ \
+ R##_s = _FP_NANSIGN_##fs; \
+ _FP_FRAC_SET_##wc(R, _FP_NANFRAC_##fs); \
+ _FP_FRAC_SLL_##wc(R, _FP_WORKBITS); \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ } \
+ else \
+ { \
+ /* Inf - NaN. */ \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ } \
+ } \
+ else \
+ { \
+ if (_FP_FRAC_ZEROP_##wc(Y)) \
+ { \
+ /* NaN - Inf. */ \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R, X); \
+ } \
+ else \
+ { \
+ /* NaN - NaN. */ \
+ _FP_CHOOSENAN_SEMIRAW(fs, wc, R, X, Y, OP); \
+ } \
+ } \
+ goto sub_done; \
+ } \
+ } \
+ /* The exponents of X and Y, both normal, are equal. The \
+ implicit MSBs cancel. */ \
+ R##_e = X##_e; \
+ _FP_FRAC_SUB_##wc(R, X, Y); \
+ R##_s = X##_s; \
+ if (_FP_FRAC_HIGH_##fs(R) & _FP_IMPLBIT_SH_##fs) \
+ { \
+ /* |X| < |Y|, negate result. */ \
+ _FP_FRAC_SUB_##wc(R, Y, X); \
+ R##_s = Y##_s; \
+ } \
+ else if (_FP_FRAC_ZEROP_##wc(R)) \
+ { \
+ R##_e = 0; \
+ R##_s = (FP_ROUNDMODE == FP_RND_MINF); \
+ goto sub_done; \
+ } \
+ goto norm; \
+ } \
+ sub3: \
+ if (_FP_FRAC_HIGH_##fs(R) & _FP_IMPLBIT_SH_##fs) \
+ { \
+ int diff; \
+ /* Carry into most significant bit of larger one of X and Y, \
+ canceling it; renormalize. */ \
+ _FP_FRAC_HIGH_##fs(R) &= _FP_IMPLBIT_SH_##fs - 1; \
+ norm: \
+ _FP_FRAC_CLZ_##wc(diff, R); \
+ diff -= _FP_WFRACXBITS_##fs; \
+ _FP_FRAC_SLL_##wc(R, diff); \
+ if (R##_e <= diff) \
+ { \
+ /* R is denormalized. */ \
+ diff = diff - R##_e + 1; \
+ _FP_FRAC_SRS_##wc(R, diff, _FP_WFRACBITS_##fs); \
+ R##_e = 0; \
+ } \
+ else \
+ { \
+ R##_e -= diff; \
+ _FP_FRAC_HIGH_##fs(R) &= ~(_FP_W_TYPE)_FP_IMPLBIT_SH_##fs; \
+ } \
+ } \
+ sub_done: ; \
+ } \
+} while (0)
+
+#define _FP_ADD(fs, wc, R, X, Y) _FP_ADD_INTERNAL(fs, wc, R, X, Y, '+')
+#define _FP_SUB(fs, wc, R, X, Y) \
+ do { \
+ if (!(Y##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(Y))) Y##_s ^= 1; \
+ _FP_ADD_INTERNAL(fs, wc, R, X, Y, '-'); \
+ } while (0)
+
+
+/*
+ * Main negation routine. FIXME -- when we care about setting exception
+ * bits reliably, this will not do. We should examine all of the fp classes.
+ */
+
+#define _FP_NEG(fs, wc, R, X) \
+ do { \
+ _FP_FRAC_COPY_##wc(R, X); \
+ R##_c = X##_c; \
+ R##_e = X##_e; \
+ R##_s = 1 ^ X##_s; \
+ } while (0)
+
+
+/*
+ * Main multiplication routine. The input values should be cooked.
+ */
+
+#define _FP_MUL(fs, wc, R, X, Y) \
+do { \
+ R##_s = X##_s ^ Y##_s; \
+ switch (_FP_CLS_COMBINE(X##_c, Y##_c)) \
+ { \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NORMAL): \
+ R##_c = FP_CLS_NORMAL; \
+ R##_e = X##_e + Y##_e + 1; \
+ \
+ _FP_MUL_MEAT_##fs(R,X,Y); \
+ \
+ if (_FP_FRAC_OVERP_##wc(fs, R)) \
+ _FP_FRAC_SRS_##wc(R, 1, _FP_WFRACBITS_##fs); \
+ else \
+ R##_e--; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
+ _FP_CHOOSENAN(fs, wc, R, X, Y, '*'); \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
+ R##_s = X##_s; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_ZERO): \
+ _FP_FRAC_COPY_##wc(R, X); \
+ R##_c = X##_c; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
+ R##_s = Y##_s; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ R##_c = Y##_c; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_INF): \
+ R##_s = _FP_NANSIGN_##fs; \
+ R##_c = FP_CLS_NAN; \
+ _FP_FRAC_SET_##wc(R, _FP_NANFRAC_##fs); \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ break; \
+ \
+ default: \
+ abort(); \
+ } \
+} while (0)
+
+
+/*
+ * Main division routine. The input values should be cooked.
+ */
+
+#define _FP_DIV(fs, wc, R, X, Y) \
+do { \
+ R##_s = X##_s ^ Y##_s; \
+ switch (_FP_CLS_COMBINE(X##_c, Y##_c)) \
+ { \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NORMAL): \
+ R##_c = FP_CLS_NORMAL; \
+ R##_e = X##_e - Y##_e; \
+ \
+ _FP_DIV_MEAT_##fs(R,X,Y); \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
+ _FP_CHOOSENAN(fs, wc, R, X, Y, '/'); \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R, X); \
+ R##_c = X##_c; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R, Y); \
+ R##_c = Y##_c; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
+ R##_c = FP_CLS_ZERO; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
+ FP_SET_EXCEPTION(FP_EX_DIVZERO); \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
+ R##_c = FP_CLS_INF; \
+ break; \
+ \
+ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
+ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_ZERO): \
+ R##_s = _FP_NANSIGN_##fs; \
+ R##_c = FP_CLS_NAN; \
+ _FP_FRAC_SET_##wc(R, _FP_NANFRAC_##fs); \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ break; \
+ \
+ default: \
+ abort(); \
+ } \
+} while (0)
+
+
+/*
+ * Main differential comparison routine. The inputs should be raw not
+ * cooked. The return is -1,0,1 for normal values, 2 otherwise.
+ */
+
+#define _FP_CMP(fs, wc, ret, X, Y, un) \
+ do { \
+ /* NANs are unordered */ \
+ if ((X##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(X)) \
+ || (Y##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(Y))) \
+ { \
+ ret = un; \
+ } \
+ else \
+ { \
+ int __is_zero_x; \
+ int __is_zero_y; \
+ \
+ __is_zero_x = (!X##_e && _FP_FRAC_ZEROP_##wc(X)) ? 1 : 0; \
+ __is_zero_y = (!Y##_e && _FP_FRAC_ZEROP_##wc(Y)) ? 1 : 0; \
+ \
+ if (__is_zero_x && __is_zero_y) \
+ ret = 0; \
+ else if (__is_zero_x) \
+ ret = Y##_s ? 1 : -1; \
+ else if (__is_zero_y) \
+ ret = X##_s ? -1 : 1; \
+ else if (X##_s != Y##_s) \
+ ret = X##_s ? -1 : 1; \
+ else if (X##_e > Y##_e) \
+ ret = X##_s ? -1 : 1; \
+ else if (X##_e < Y##_e) \
+ ret = X##_s ? 1 : -1; \
+ else if (_FP_FRAC_GT_##wc(X, Y)) \
+ ret = X##_s ? -1 : 1; \
+ else if (_FP_FRAC_GT_##wc(Y, X)) \
+ ret = X##_s ? 1 : -1; \
+ else \
+ ret = 0; \
+ } \
+ } while (0)
+
+
+/* Simplification for strict equality. */
+
+#define _FP_CMP_EQ(fs, wc, ret, X, Y) \
+ do { \
+ /* NANs are unordered */ \
+ if ((X##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(X)) \
+ || (Y##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(Y))) \
+ { \
+ ret = 1; \
+ } \
+ else \
+ { \
+ ret = !(X##_e == Y##_e \
+ && _FP_FRAC_EQ_##wc(X, Y) \
+ && (X##_s == Y##_s || (!X##_e && _FP_FRAC_ZEROP_##wc(X)))); \
+ } \
+ } while (0)
+
+/* Version to test unordered. */
+
+#define _FP_CMP_UNORD(fs, wc, ret, X, Y) \
+ do { \
+ ret = ((X##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(X)) \
+ || (Y##_e == _FP_EXPMAX_##fs && !_FP_FRAC_ZEROP_##wc(Y))); \
+ } while (0)
+
+/*
+ * Main square root routine. The input value should be cooked.
+ */
+
+#define _FP_SQRT(fs, wc, R, X) \
+do { \
+ _FP_FRAC_DECL_##wc(T); _FP_FRAC_DECL_##wc(S); \
+ _FP_W_TYPE q; \
+ switch (X##_c) \
+ { \
+ case FP_CLS_NAN: \
+ _FP_FRAC_COPY_##wc(R, X); \
+ R##_s = X##_s; \
+ R##_c = FP_CLS_NAN; \
+ break; \
+ case FP_CLS_INF: \
+ if (X##_s) \
+ { \
+ R##_s = _FP_NANSIGN_##fs; \
+ R##_c = FP_CLS_NAN; /* NAN */ \
+ _FP_FRAC_SET_##wc(R, _FP_NANFRAC_##fs); \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ } \
+ else \
+ { \
+ R##_s = 0; \
+ R##_c = FP_CLS_INF; /* sqrt(+inf) = +inf */ \
+ } \
+ break; \
+ case FP_CLS_ZERO: \
+ R##_s = X##_s; \
+ R##_c = FP_CLS_ZERO; /* sqrt(+-0) = +-0 */ \
+ break; \
+ case FP_CLS_NORMAL: \
+ R##_s = 0; \
+ if (X##_s) \
+ { \
+ R##_c = FP_CLS_NAN; /* sNAN */ \
+ R##_s = _FP_NANSIGN_##fs; \
+ _FP_FRAC_SET_##wc(R, _FP_NANFRAC_##fs); \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ break; \
+ } \
+ R##_c = FP_CLS_NORMAL; \
+ if (X##_e & 1) \
+ _FP_FRAC_SLL_##wc(X, 1); \
+ R##_e = X##_e >> 1; \
+ _FP_FRAC_SET_##wc(S, _FP_ZEROFRAC_##wc); \
+ _FP_FRAC_SET_##wc(R, _FP_ZEROFRAC_##wc); \
+ q = _FP_OVERFLOW_##fs >> 1; \
+ _FP_SQRT_MEAT_##wc(R, S, T, X, q); \
+ } \
+ } while (0)
+
+/*
+ * Convert from FP to integer. Input is raw.
+ */
+
+/* RSIGNED can have following values:
+ * 0: the number is required to be 0..(2^rsize)-1, if not, NV is set plus
+ * the result is either 0 or (2^rsize)-1 depending on the sign in such
+ * case.
+ * 1: the number is required to be -(2^(rsize-1))..(2^(rsize-1))-1, if not,
+ * NV is set plus the result is either -(2^(rsize-1)) or (2^(rsize-1))-1
+ * depending on the sign in such case.
+ * -1: the number is required to be -(2^(rsize-1))..(2^rsize)-1, if not, NV is
+ * set plus the result is either -(2^(rsize-1)) or (2^(rsize-1))-1
+ * depending on the sign in such case.
+ */
+#define _FP_TO_INT(fs, wc, r, X, rsize, rsigned) \
+do { \
+ if (X##_e < _FP_EXPBIAS_##fs) \
+ { \
+ r = 0; \
+ if (X##_e == 0) \
+ { \
+ if (!_FP_FRAC_ZEROP_##wc(X)) \
+ { \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ } \
+ } \
+ else \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ } \
+ else if (X##_e >= _FP_EXPBIAS_##fs + rsize - (rsigned > 0 || X##_s) \
+ || (!rsigned && X##_s)) \
+ { \
+ /* Overflow or converting to the most negative integer. */ \
+ if (rsigned) \
+ { \
+ r = 1; \
+ r <<= rsize - 1; \
+ r -= 1 - X##_s; \
+ } else { \
+ r = 0; \
+ if (X##_s) \
+ r = ~r; \
+ } \
+ \
+ if (rsigned && X##_s && X##_e == _FP_EXPBIAS_##fs + rsize - 1) \
+ { \
+ /* Possibly converting to most negative integer; check the \
+ mantissa. */ \
+ int inexact = 0; \
+ (void)((_FP_FRACBITS_##fs > rsize) \
+ ? ({ _FP_FRAC_SRST_##wc(X, inexact, \
+ _FP_FRACBITS_##fs - rsize, \
+ _FP_FRACBITS_##fs); 0; }) \
+ : 0); \
+ if (!_FP_FRAC_ZEROP_##wc(X)) \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ else if (inexact) \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ } \
+ else \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ } \
+ else \
+ { \
+ _FP_FRAC_HIGH_RAW_##fs(X) |= _FP_IMPLBIT_##fs; \
+ if (X##_e >= _FP_EXPBIAS_##fs + _FP_FRACBITS_##fs - 1) \
+ { \
+ _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \
+ r <<= X##_e - _FP_EXPBIAS_##fs - _FP_FRACBITS_##fs + 1; \
+ } \
+ else \
+ { \
+ int inexact; \
+ _FP_FRAC_SRST_##wc(X, inexact, \
+ (_FP_FRACBITS_##fs + _FP_EXPBIAS_##fs - 1 \
+ - X##_e), \
+ _FP_FRACBITS_##fs); \
+ if (inexact) \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ _FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \
+ } \
+ if (rsigned && X##_s) \
+ r = -r; \
+ } \
+} while (0)
+
+/* Convert integer to fp. Output is raw. RTYPE is unsigned even if
+ input is signed. */
+#define _FP_FROM_INT(fs, wc, X, r, rsize, rtype) \
+ do { \
+ if (r) \
+ { \
+ rtype ur_; \
+ \
+ if ((X##_s = (r < 0))) \
+ r = -(rtype)r; \
+ \
+ ur_ = (rtype) r; \
+ (void)((rsize <= _FP_W_TYPE_SIZE) \
+ ? ({ \
+ int lz_; \
+ __FP_CLZ(lz_, (_FP_W_TYPE)ur_); \
+ X##_e = _FP_EXPBIAS_##fs + _FP_W_TYPE_SIZE - 1 - lz_; \
+ }) \
+ : ((rsize <= 2 * _FP_W_TYPE_SIZE) \
+ ? ({ \
+ int lz_; \
+ __FP_CLZ_2(lz_, (_FP_W_TYPE)(ur_ >> _FP_W_TYPE_SIZE), \
+ (_FP_W_TYPE)ur_); \
+ X##_e = (_FP_EXPBIAS_##fs + 2 * _FP_W_TYPE_SIZE - 1 \
+ - lz_); \
+ }) \
+ : (abort(), 0))); \
+ \
+ if (rsize - 1 + _FP_EXPBIAS_##fs >= _FP_EXPMAX_##fs \
+ && X##_e >= _FP_EXPMAX_##fs) \
+ { \
+ /* Exponent too big; overflow to infinity. (May also \
+ happen after rounding below.) */ \
+ _FP_OVERFLOW_SEMIRAW(fs, wc, X); \
+ goto pack_semiraw; \
+ } \
+ \
+ if (rsize <= _FP_FRACBITS_##fs \
+ || X##_e < _FP_EXPBIAS_##fs + _FP_FRACBITS_##fs) \
+ { \
+ /* Exactly representable; shift left. */ \
+ _FP_FRAC_DISASSEMBLE_##wc(X, ur_, rsize); \
+ _FP_FRAC_SLL_##wc(X, (_FP_EXPBIAS_##fs \
+ + _FP_FRACBITS_##fs - 1 - X##_e)); \
+ } \
+ else \
+ { \
+ /* More bits in integer than in floating type; need to \
+ round. */ \
+ if (_FP_EXPBIAS_##fs + _FP_WFRACBITS_##fs - 1 < X##_e) \
+ ur_ = ((ur_ >> (X##_e - _FP_EXPBIAS_##fs \
+ - _FP_WFRACBITS_##fs + 1)) \
+ | ((ur_ << (rsize - (X##_e - _FP_EXPBIAS_##fs \
+ - _FP_WFRACBITS_##fs + 1))) \
+ != 0)); \
+ _FP_FRAC_DISASSEMBLE_##wc(X, ur_, rsize); \
+ if ((_FP_EXPBIAS_##fs + _FP_WFRACBITS_##fs - 1 - X##_e) > 0) \
+ _FP_FRAC_SLL_##wc(X, (_FP_EXPBIAS_##fs \
+ + _FP_WFRACBITS_##fs - 1 - X##_e)); \
+ _FP_FRAC_HIGH_##fs(X) &= ~(_FP_W_TYPE)_FP_IMPLBIT_SH_##fs; \
+ pack_semiraw: \
+ _FP_PACK_SEMIRAW(fs, wc, X); \
+ } \
+ } \
+ else \
+ { \
+ X##_s = 0; \
+ X##_e = 0; \
+ _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
+ } \
+ } while (0)
+
+
+/* Extend from a narrower floating-point format to a wider one. Input
+ and output are raw. */
+#define FP_EXTEND(dfs,sfs,dwc,swc,D,S) \
+do { \
+ if (_FP_FRACBITS_##dfs < _FP_FRACBITS_##sfs \
+ || (_FP_EXPMAX_##dfs - _FP_EXPBIAS_##dfs \
+ < _FP_EXPMAX_##sfs - _FP_EXPBIAS_##sfs) \
+ || (_FP_EXPBIAS_##dfs < _FP_EXPBIAS_##sfs + _FP_FRACBITS_##sfs - 1 \
+ && _FP_EXPBIAS_##dfs != _FP_EXPBIAS_##sfs)) \
+ abort(); \
+ D##_s = S##_s; \
+ _FP_FRAC_COPY_##dwc##_##swc(D, S); \
+ if (_FP_EXP_NORMAL(sfs, swc, S)) \
+ { \
+ D##_e = S##_e + _FP_EXPBIAS_##dfs - _FP_EXPBIAS_##sfs; \
+ _FP_FRAC_SLL_##dwc(D, (_FP_FRACBITS_##dfs - _FP_FRACBITS_##sfs)); \
+ } \
+ else \
+ { \
+ if (S##_e == 0) \
+ { \
+ if (_FP_FRAC_ZEROP_##swc(S)) \
+ D##_e = 0; \
+ else if (_FP_EXPBIAS_##dfs \
+ < _FP_EXPBIAS_##sfs + _FP_FRACBITS_##sfs - 1) \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_SLL_##dwc(D, (_FP_FRACBITS_##dfs \
+ - _FP_FRACBITS_##sfs)); \
+ D##_e = 0; \
+ } \
+ else \
+ { \
+ int _lz; \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ _FP_FRAC_CLZ_##swc(_lz, S); \
+ _FP_FRAC_SLL_##dwc(D, \
+ _lz + _FP_FRACBITS_##dfs \
+ - _FP_FRACTBITS_##sfs); \
+ D##_e = (_FP_EXPBIAS_##dfs - _FP_EXPBIAS_##sfs + 1 \
+ + _FP_FRACXBITS_##sfs - _lz); \
+ } \
+ } \
+ else \
+ { \
+ D##_e = _FP_EXPMAX_##dfs; \
+ if (!_FP_FRAC_ZEROP_##swc(S)) \
+ { \
+ if (!(_FP_FRAC_HIGH_RAW_##sfs(S) & _FP_QNANBIT_##sfs)) \
+ FP_SET_EXCEPTION(FP_EX_INVALID); \
+ _FP_FRAC_SLL_##dwc(D, (_FP_FRACBITS_##dfs \
+ - _FP_FRACBITS_##sfs)); \
+ } \
+ } \
+ } \
+} while (0)
+
+/* Truncate from a wider floating-point format to a narrower one.
+ Input and output are semi-raw. */
+#define FP_TRUNC(dfs,sfs,dwc,swc,D,S) \
+do { \
+ if (_FP_FRACBITS_##sfs < _FP_FRACBITS_##dfs \
+ || (_FP_EXPBIAS_##sfs < _FP_EXPBIAS_##dfs + _FP_FRACBITS_##dfs - 1 \
+ && _FP_EXPBIAS_##sfs != _FP_EXPBIAS_##dfs)) \
+ abort(); \
+ D##_s = S##_s; \
+ if (_FP_EXP_NORMAL(sfs, swc, S)) \
+ { \
+ D##_e = S##_e + _FP_EXPBIAS_##dfs - _FP_EXPBIAS_##sfs; \
+ if (D##_e >= _FP_EXPMAX_##dfs) \
+ _FP_OVERFLOW_SEMIRAW(dfs, dwc, D); \
+ else \
+ { \
+ if (D##_e <= 0) \
+ { \
+ if (D##_e < 1 - _FP_FRACBITS_##dfs) \
+ { \
+ _FP_FRAC_SET_##swc(S, _FP_ZEROFRAC_##swc); \
+ _FP_FRAC_LOW_##swc(S) |= 1; \
+ } \
+ else \
+ { \
+ _FP_FRAC_HIGH_##sfs(S) |= _FP_IMPLBIT_SH_##sfs; \
+ _FP_FRAC_SRS_##swc(S, (_FP_WFRACBITS_##sfs \
+ - _FP_WFRACBITS_##dfs + 1 - D##_e), \
+ _FP_WFRACBITS_##sfs); \
+ } \
+ D##_e = 0; \
+ } \
+ else \
+ _FP_FRAC_SRS_##swc(S, (_FP_WFRACBITS_##sfs \
+ - _FP_WFRACBITS_##dfs), \
+ _FP_WFRACBITS_##sfs); \
+ _FP_FRAC_COPY_##dwc##_##swc(D, S); \
+ } \
+ } \
+ else \
+ { \
+ if (S##_e == 0) \
+ { \
+ D##_e = 0; \
+ if (_FP_FRAC_ZEROP_##swc(S)) \
+ _FP_FRAC_SET_##dwc(D, _FP_ZEROFRAC_##dwc); \
+ else \
+ { \
+ FP_SET_EXCEPTION(FP_EX_DENORM); \
+ if (_FP_EXPBIAS_##sfs \
+ < _FP_EXPBIAS_##dfs + _FP_FRACBITS_##dfs - 1) \
+ { \
+ _FP_FRAC_SRS_##swc(S, (_FP_WFRACBITS_##sfs \
+ - _FP_WFRACBITS_##dfs), \
+ _FP_WFRACBITS_##sfs); \
+ _FP_FRAC_COPY_##dwc##_##swc(D, S); \
+ } \
+ else \
+ { \
+ _FP_FRAC_SET_##dwc(D, _FP_ZEROFRAC_##dwc); \
+ _FP_FRAC_LOW_##dwc(D) |= 1; \
+ } \
+ } \
+ } \
+ else \
+ { \
+ D##_e = _FP_EXPMAX_##dfs; \
+ if (_FP_FRAC_ZEROP_##swc(S)) \
+ _FP_FRAC_SET_##dwc(D, _FP_ZEROFRAC_##dwc); \
+ else \
+ { \
+ _FP_CHECK_SIGNAN_SEMIRAW(sfs, swc, S); \
+ _FP_FRAC_SRL_##swc(S, (_FP_WFRACBITS_##sfs \
+ - _FP_WFRACBITS_##dfs)); \
+ _FP_FRAC_COPY_##dwc##_##swc(D, S); \
+ /* Semi-raw NaN must have all workbits cleared. */ \
+ _FP_FRAC_LOW_##dwc(D) \
+ &= ~(_FP_W_TYPE) ((1 << _FP_WORKBITS) - 1); \
+ _FP_FRAC_HIGH_##dfs(D) |= _FP_QNANBIT_SH_##dfs; \
+ } \
+ } \
+ } \
+} while (0)
+
+/*
+ * Helper primitives.
+ */
+
+/* Count leading zeros in a word. */
+
+#ifndef __FP_CLZ
+/* GCC 3.4 and later provide the builtins for us. */
+#define __FP_CLZ(r, x) \
+ do { \
+ if (sizeof (_FP_W_TYPE) == sizeof (unsigned int)) \
+ r = __builtin_clz (x); \
+ else if (sizeof (_FP_W_TYPE) == sizeof (unsigned long)) \
+ r = __builtin_clzl (x); \
+ else if (sizeof (_FP_W_TYPE) == sizeof (unsigned long long)) \
+ r = __builtin_clzll (x); \
+ else \
+ abort (); \
+ } while (0)
+#endif /* ndef __FP_CLZ */
+
+#define _FP_DIV_HELP_imm(q, r, n, d) \
+ do { \
+ q = n / d, r = n % d; \
+ } while (0)
+
+
+/* A restoring bit-by-bit division primitive. */
+
+#define _FP_DIV_MEAT_N_loop(fs, wc, R, X, Y) \
+ do { \
+ int count = _FP_WFRACBITS_##fs; \
+ _FP_FRAC_DECL_##wc (u); \
+ _FP_FRAC_DECL_##wc (v); \
+ _FP_FRAC_COPY_##wc (u, X); \
+ _FP_FRAC_COPY_##wc (v, Y); \
+ _FP_FRAC_SET_##wc (R, _FP_ZEROFRAC_##wc); \
+ /* Normalize U and V. */ \
+ _FP_FRAC_SLL_##wc (u, _FP_WFRACXBITS_##fs); \
+ _FP_FRAC_SLL_##wc (v, _FP_WFRACXBITS_##fs); \
+ /* First round. Since the operands are normalized, either the \
+ first or second bit will be set in the fraction. Produce a \
+ normalized result by checking which and adjusting the loop \
+ count and exponent accordingly. */ \
+ if (_FP_FRAC_GE_1 (u, v)) \
+ { \
+ _FP_FRAC_SUB_##wc (u, u, v); \
+ _FP_FRAC_LOW_##wc (R) |= 1; \
+ count--; \
+ } \
+ else \
+ R##_e--; \
+ /* Subsequent rounds. */ \
+ do { \
+ int msb = (_FP_WS_TYPE) _FP_FRAC_HIGH_##wc (u) < 0; \
+ _FP_FRAC_SLL_##wc (u, 1); \
+ _FP_FRAC_SLL_##wc (R, 1); \
+ if (msb || _FP_FRAC_GE_1 (u, v)) \
+ { \
+ _FP_FRAC_SUB_##wc (u, u, v); \
+ _FP_FRAC_LOW_##wc (R) |= 1; \
+ } \
+ } while (--count > 0); \
+ /* If there's anything left in U, the result is inexact. */ \
+ _FP_FRAC_LOW_##wc (R) |= !_FP_FRAC_ZEROP_##wc (u); \
+ } while (0)
+
+#define _FP_DIV_MEAT_1_loop(fs, R, X, Y) _FP_DIV_MEAT_N_loop (fs, 1, R, X, Y)
+#define _FP_DIV_MEAT_2_loop(fs, R, X, Y) _FP_DIV_MEAT_N_loop (fs, 2, R, X, Y)
+#define _FP_DIV_MEAT_4_loop(fs, R, X, Y) _FP_DIV_MEAT_N_loop (fs, 4, R, X, Y)
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/quad.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/quad.h
new file mode 100644
index 000000000..c22e94402
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/quad.h
@@ -0,0 +1,271 @@
+/* Software floating-point emulation.
+ Definitions for IEEE Quad Precision.
+ Copyright (C) 1997,1998,1999,2006,2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#if _FP_W_TYPE_SIZE < 32
+#error "Here's a nickel, kid. Go buy yourself a real computer."
+#endif
+
+#if _FP_W_TYPE_SIZE < 64
+#define _FP_FRACTBITS_Q (4*_FP_W_TYPE_SIZE)
+#else
+#define _FP_FRACTBITS_Q (2*_FP_W_TYPE_SIZE)
+#endif
+
+#define _FP_FRACBITS_Q 113
+#define _FP_FRACXBITS_Q (_FP_FRACTBITS_Q - _FP_FRACBITS_Q)
+#define _FP_WFRACBITS_Q (_FP_WORKBITS + _FP_FRACBITS_Q)
+#define _FP_WFRACXBITS_Q (_FP_FRACTBITS_Q - _FP_WFRACBITS_Q)
+#define _FP_EXPBITS_Q 15
+#define _FP_EXPBIAS_Q 16383
+#define _FP_EXPMAX_Q 32767
+
+#define _FP_QNANBIT_Q \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_Q-2) % _FP_W_TYPE_SIZE)
+#define _FP_QNANBIT_SH_Q \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_Q-2+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
+#define _FP_IMPLBIT_Q \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_Q-1) % _FP_W_TYPE_SIZE)
+#define _FP_IMPLBIT_SH_Q \
+ ((_FP_W_TYPE)1 << (_FP_FRACBITS_Q-1+_FP_WORKBITS) % _FP_W_TYPE_SIZE)
+#define _FP_OVERFLOW_Q \
+ ((_FP_W_TYPE)1 << (_FP_WFRACBITS_Q % _FP_W_TYPE_SIZE))
+
+typedef float TFtype __attribute__((mode(TF)));
+
+#if _FP_W_TYPE_SIZE < 64
+
+union _FP_UNION_Q
+{
+ TFtype flt;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned sign : 1;
+ unsigned exp : _FP_EXPBITS_Q;
+ unsigned long frac3 : _FP_FRACBITS_Q - (_FP_IMPLBIT_Q != 0)-(_FP_W_TYPE_SIZE * 3);
+ unsigned long frac2 : _FP_W_TYPE_SIZE;
+ unsigned long frac1 : _FP_W_TYPE_SIZE;
+ unsigned long frac0 : _FP_W_TYPE_SIZE;
+#else
+ unsigned long frac0 : _FP_W_TYPE_SIZE;
+ unsigned long frac1 : _FP_W_TYPE_SIZE;
+ unsigned long frac2 : _FP_W_TYPE_SIZE;
+ unsigned long frac3 : _FP_FRACBITS_Q - (_FP_IMPLBIT_Q != 0)-(_FP_W_TYPE_SIZE * 3);
+ unsigned exp : _FP_EXPBITS_Q;
+ unsigned sign : 1;
+#endif /* not bigendian */
+ } bits __attribute__((packed));
+};
+
+
+#define FP_DECL_Q(X) _FP_DECL(4,X)
+#define FP_UNPACK_RAW_Q(X,val) _FP_UNPACK_RAW_4(Q,X,val)
+#define FP_UNPACK_RAW_QP(X,val) _FP_UNPACK_RAW_4_P(Q,X,val)
+#define FP_PACK_RAW_Q(val,X) _FP_PACK_RAW_4(Q,val,X)
+#define FP_PACK_RAW_QP(val,X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_4_P(Q,val,X); \
+ } while (0)
+
+#define FP_UNPACK_Q(X,val) \
+ do { \
+ _FP_UNPACK_RAW_4(Q,X,val); \
+ _FP_UNPACK_CANONICAL(Q,4,X); \
+ } while (0)
+
+#define FP_UNPACK_QP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_4_P(Q,X,val); \
+ _FP_UNPACK_CANONICAL(Q,4,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_Q(X,val) \
+ do { \
+ _FP_UNPACK_RAW_4(Q,X,val); \
+ _FP_UNPACK_SEMIRAW(Q,4,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_QP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_4_P(Q,X,val); \
+ _FP_UNPACK_SEMIRAW(Q,4,X); \
+ } while (0)
+
+#define FP_PACK_Q(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(Q,4,X); \
+ _FP_PACK_RAW_4(Q,val,X); \
+ } while (0)
+
+#define FP_PACK_QP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(Q,4,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_4_P(Q,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_Q(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(Q,4,X); \
+ _FP_PACK_RAW_4(Q,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_QP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(Q,4,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_4_P(Q,val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_Q(X) _FP_ISSIGNAN(Q,4,X)
+#define FP_NEG_Q(R,X) _FP_NEG(Q,4,R,X)
+#define FP_ADD_Q(R,X,Y) _FP_ADD(Q,4,R,X,Y)
+#define FP_SUB_Q(R,X,Y) _FP_SUB(Q,4,R,X,Y)
+#define FP_MUL_Q(R,X,Y) _FP_MUL(Q,4,R,X,Y)
+#define FP_DIV_Q(R,X,Y) _FP_DIV(Q,4,R,X,Y)
+#define FP_SQRT_Q(R,X) _FP_SQRT(Q,4,R,X)
+#define _FP_SQRT_MEAT_Q(R,S,T,X,Q) _FP_SQRT_MEAT_4(R,S,T,X,Q)
+
+#define FP_CMP_Q(r,X,Y,un) _FP_CMP(Q,4,r,X,Y,un)
+#define FP_CMP_EQ_Q(r,X,Y) _FP_CMP_EQ(Q,4,r,X,Y)
+#define FP_CMP_UNORD_Q(r,X,Y) _FP_CMP_UNORD(Q,4,r,X,Y)
+
+#define FP_TO_INT_Q(r,X,rsz,rsg) _FP_TO_INT(Q,4,r,X,rsz,rsg)
+#define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,4,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_Q(X) _FP_FRAC_HIGH_4(X)
+#define _FP_FRAC_HIGH_RAW_Q(X) _FP_FRAC_HIGH_4(X)
+
+#else /* not _FP_W_TYPE_SIZE < 64 */
+union _FP_UNION_Q
+{
+ TFtype flt /* __attribute__((mode(TF))) */ ;
+ struct {
+ _FP_W_TYPE a, b;
+ } longs;
+ struct {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned sign : 1;
+ unsigned exp : _FP_EXPBITS_Q;
+ _FP_W_TYPE frac1 : _FP_FRACBITS_Q - (_FP_IMPLBIT_Q != 0) - _FP_W_TYPE_SIZE;
+ _FP_W_TYPE frac0 : _FP_W_TYPE_SIZE;
+#else
+ _FP_W_TYPE frac0 : _FP_W_TYPE_SIZE;
+ _FP_W_TYPE frac1 : _FP_FRACBITS_Q - (_FP_IMPLBIT_Q != 0) - _FP_W_TYPE_SIZE;
+ unsigned exp : _FP_EXPBITS_Q;
+ unsigned sign : 1;
+#endif
+ } bits;
+};
+
+#define FP_DECL_Q(X) _FP_DECL(2,X)
+#define FP_UNPACK_RAW_Q(X,val) _FP_UNPACK_RAW_2(Q,X,val)
+#define FP_UNPACK_RAW_QP(X,val) _FP_UNPACK_RAW_2_P(Q,X,val)
+#define FP_PACK_RAW_Q(val,X) _FP_PACK_RAW_2(Q,val,X)
+#define FP_PACK_RAW_QP(val,X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_2_P(Q,val,X); \
+ } while (0)
+
+#define FP_UNPACK_Q(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2(Q,X,val); \
+ _FP_UNPACK_CANONICAL(Q,2,X); \
+ } while (0)
+
+#define FP_UNPACK_QP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2_P(Q,X,val); \
+ _FP_UNPACK_CANONICAL(Q,2,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_Q(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2(Q,X,val); \
+ _FP_UNPACK_SEMIRAW(Q,2,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_QP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_2_P(Q,X,val); \
+ _FP_UNPACK_SEMIRAW(Q,2,X); \
+ } while (0)
+
+#define FP_PACK_Q(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(Q,2,X); \
+ _FP_PACK_RAW_2(Q,val,X); \
+ } while (0)
+
+#define FP_PACK_QP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(Q,2,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_2_P(Q,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_Q(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(Q,2,X); \
+ _FP_PACK_RAW_2(Q,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_QP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(Q,2,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_2_P(Q,val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_Q(X) _FP_ISSIGNAN(Q,2,X)
+#define FP_NEG_Q(R,X) _FP_NEG(Q,2,R,X)
+#define FP_ADD_Q(R,X,Y) _FP_ADD(Q,2,R,X,Y)
+#define FP_SUB_Q(R,X,Y) _FP_SUB(Q,2,R,X,Y)
+#define FP_MUL_Q(R,X,Y) _FP_MUL(Q,2,R,X,Y)
+#define FP_DIV_Q(R,X,Y) _FP_DIV(Q,2,R,X,Y)
+#define FP_SQRT_Q(R,X) _FP_SQRT(Q,2,R,X)
+#define _FP_SQRT_MEAT_Q(R,S,T,X,Q) _FP_SQRT_MEAT_2(R,S,T,X,Q)
+
+#define FP_CMP_Q(r,X,Y,un) _FP_CMP(Q,2,r,X,Y,un)
+#define FP_CMP_EQ_Q(r,X,Y) _FP_CMP_EQ(Q,2,r,X,Y)
+#define FP_CMP_UNORD_Q(r,X,Y) _FP_CMP_UNORD(Q,2,r,X,Y)
+
+#define FP_TO_INT_Q(r,X,rsz,rsg) _FP_TO_INT(Q,2,r,X,rsz,rsg)
+#define FP_FROM_INT_Q(X,r,rs,rt) _FP_FROM_INT(Q,2,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_Q(X) _FP_FRAC_HIGH_2(X)
+#define _FP_FRAC_HIGH_RAW_Q(X) _FP_FRAC_HIGH_2(X)
+
+#endif /* not _FP_W_TYPE_SIZE < 64 */
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/single.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/single.h
new file mode 100644
index 000000000..9c3734adf
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/single.h
@@ -0,0 +1,151 @@
+/* Software floating-point emulation.
+ Definitions for IEEE Single Precision.
+ Copyright (C) 1997,1998,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#if _FP_W_TYPE_SIZE < 32
+#error "Here's a nickel kid. Go buy yourself a real computer."
+#endif
+
+#define _FP_FRACTBITS_S _FP_W_TYPE_SIZE
+
+#define _FP_FRACBITS_S 24
+#define _FP_FRACXBITS_S (_FP_FRACTBITS_S - _FP_FRACBITS_S)
+#define _FP_WFRACBITS_S (_FP_WORKBITS + _FP_FRACBITS_S)
+#define _FP_WFRACXBITS_S (_FP_FRACTBITS_S - _FP_WFRACBITS_S)
+#define _FP_EXPBITS_S 8
+#define _FP_EXPBIAS_S 127
+#define _FP_EXPMAX_S 255
+#define _FP_QNANBIT_S ((_FP_W_TYPE)1 << (_FP_FRACBITS_S-2))
+#define _FP_QNANBIT_SH_S ((_FP_W_TYPE)1 << (_FP_FRACBITS_S-2+_FP_WORKBITS))
+#define _FP_IMPLBIT_S ((_FP_W_TYPE)1 << (_FP_FRACBITS_S-1))
+#define _FP_IMPLBIT_SH_S ((_FP_W_TYPE)1 << (_FP_FRACBITS_S-1+_FP_WORKBITS))
+#define _FP_OVERFLOW_S ((_FP_W_TYPE)1 << (_FP_WFRACBITS_S))
+
+/* The implementation of _FP_MUL_MEAT_S and _FP_DIV_MEAT_S should be
+ chosen by the target machine. */
+
+typedef float SFtype __attribute__((mode(SF)));
+
+union _FP_UNION_S
+{
+ SFtype flt;
+ struct {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned sign : 1;
+ unsigned exp : _FP_EXPBITS_S;
+ unsigned frac : _FP_FRACBITS_S - (_FP_IMPLBIT_S != 0);
+#else
+ unsigned frac : _FP_FRACBITS_S - (_FP_IMPLBIT_S != 0);
+ unsigned exp : _FP_EXPBITS_S;
+ unsigned sign : 1;
+#endif
+ } bits __attribute__((packed));
+};
+
+#define FP_DECL_S(X) _FP_DECL(1,X)
+#define FP_UNPACK_RAW_S(X,val) _FP_UNPACK_RAW_1(S,X,val)
+#define FP_UNPACK_RAW_SP(X,val) _FP_UNPACK_RAW_1_P(S,X,val)
+#define FP_PACK_RAW_S(val,X) _FP_PACK_RAW_1(S,val,X)
+#define FP_PACK_RAW_SP(val,X) \
+ do { \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_1_P(S,val,X); \
+ } while (0)
+
+#define FP_UNPACK_S(X,val) \
+ do { \
+ _FP_UNPACK_RAW_1(S,X,val); \
+ _FP_UNPACK_CANONICAL(S,1,X); \
+ } while (0)
+
+#define FP_UNPACK_SP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_1_P(S,X,val); \
+ _FP_UNPACK_CANONICAL(S,1,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_S(X,val) \
+ do { \
+ _FP_UNPACK_RAW_1(S,X,val); \
+ _FP_UNPACK_SEMIRAW(S,1,X); \
+ } while (0)
+
+#define FP_UNPACK_SEMIRAW_SP(X,val) \
+ do { \
+ _FP_UNPACK_RAW_1_P(S,X,val); \
+ _FP_UNPACK_SEMIRAW(S,1,X); \
+ } while (0)
+
+#define FP_PACK_S(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(S,1,X); \
+ _FP_PACK_RAW_1(S,val,X); \
+ } while (0)
+
+#define FP_PACK_SP(val,X) \
+ do { \
+ _FP_PACK_CANONICAL(S,1,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_1_P(S,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_S(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(S,1,X); \
+ _FP_PACK_RAW_1(S,val,X); \
+ } while (0)
+
+#define FP_PACK_SEMIRAW_SP(val,X) \
+ do { \
+ _FP_PACK_SEMIRAW(S,1,X); \
+ if (!FP_INHIBIT_RESULTS) \
+ _FP_PACK_RAW_1_P(S,val,X); \
+ } while (0)
+
+#define FP_ISSIGNAN_S(X) _FP_ISSIGNAN(S,1,X)
+#define FP_NEG_S(R,X) _FP_NEG(S,1,R,X)
+#define FP_ADD_S(R,X,Y) _FP_ADD(S,1,R,X,Y)
+#define FP_SUB_S(R,X,Y) _FP_SUB(S,1,R,X,Y)
+#define FP_MUL_S(R,X,Y) _FP_MUL(S,1,R,X,Y)
+#define FP_DIV_S(R,X,Y) _FP_DIV(S,1,R,X,Y)
+#define FP_SQRT_S(R,X) _FP_SQRT(S,1,R,X)
+#define _FP_SQRT_MEAT_S(R,S,T,X,Q) _FP_SQRT_MEAT_1(R,S,T,X,Q)
+
+#define FP_CMP_S(r,X,Y,un) _FP_CMP(S,1,r,X,Y,un)
+#define FP_CMP_EQ_S(r,X,Y) _FP_CMP_EQ(S,1,r,X,Y)
+#define FP_CMP_UNORD_S(r,X,Y) _FP_CMP_UNORD(S,1,r,X,Y)
+
+#define FP_TO_INT_S(r,X,rsz,rsg) _FP_TO_INT(S,1,r,X,rsz,rsg)
+#define FP_FROM_INT_S(X,r,rs,rt) _FP_FROM_INT(S,1,X,r,rs,rt)
+
+#define _FP_FRAC_HIGH_S(X) _FP_FRAC_HIGH_1(X)
+#define _FP_FRAC_HIGH_RAW_S(X) _FP_FRAC_HIGH_1(X)
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/soft-fp.h b/gcc-4.2.1-5666.3/gcc/config/soft-fp/soft-fp.h
new file mode 100644
index 000000000..dbf080e7f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/soft-fp.h
@@ -0,0 +1,209 @@
+/* Software floating-point emulation.
+ Copyright (C) 1997,1998,1999,2000,2002,2003,2005,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jj@ultra.linux.cz),
+ David S. Miller (davem@redhat.com) and
+ Peter Maydell (pmaydell@chiark.greenend.org.uk).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#ifndef SOFT_FP_H
+#define SOFT_FP_H
+
+#ifdef _LIBC
+#include <sfp-machine.h>
+#else
+#include "sfp-machine.h"
+#endif
+
+/* Allow sfp-machine to have its own byte order definitions. */
+#ifndef __BYTE_ORDER
+#ifdef _LIBC
+#include <endian.h>
+#else
+#error "endianness not defined by sfp-machine.h"
+#endif
+#endif
+
+#define _FP_WORKBITS 3
+#define _FP_WORK_LSB ((_FP_W_TYPE)1 << 3)
+#define _FP_WORK_ROUND ((_FP_W_TYPE)1 << 2)
+#define _FP_WORK_GUARD ((_FP_W_TYPE)1 << 1)
+#define _FP_WORK_STICKY ((_FP_W_TYPE)1 << 0)
+
+#ifndef FP_RND_NEAREST
+# define FP_RND_NEAREST 0
+# define FP_RND_ZERO 1
+# define FP_RND_PINF 2
+# define FP_RND_MINF 3
+#endif
+#ifndef FP_ROUNDMODE
+# define FP_ROUNDMODE FP_RND_NEAREST
+#endif
+
+/* By default don't care about exceptions. */
+#ifndef FP_EX_INVALID
+#define FP_EX_INVALID 0
+#endif
+#ifndef FP_EX_OVERFLOW
+#define FP_EX_OVERFLOW 0
+#endif
+#ifndef FP_EX_UNDERFLOW
+#define FP_EX_UNDERFLOW 0
+#endif
+#ifndef FP_EX_DIVZERO
+#define FP_EX_DIVZERO 0
+#endif
+#ifndef FP_EX_INEXACT
+#define FP_EX_INEXACT 0
+#endif
+#ifndef FP_EX_DENORM
+#define FP_EX_DENORM 0
+#endif
+
+#ifdef _FP_DECL_EX
+#define FP_DECL_EX \
+ int _fex = 0; \
+ _FP_DECL_EX
+#else
+#define FP_DECL_EX int _fex = 0
+#endif
+
+#ifndef FP_INIT_ROUNDMODE
+#define FP_INIT_ROUNDMODE do {} while (0)
+#endif
+
+#ifndef FP_HANDLE_EXCEPTIONS
+#define FP_HANDLE_EXCEPTIONS do {} while (0)
+#endif
+
+#ifndef FP_INHIBIT_RESULTS
+/* By default we write the results always.
+ * sfp-machine may override this and e.g.
+ * check if some exceptions are unmasked
+ * and inhibit it in such a case.
+ */
+#define FP_INHIBIT_RESULTS 0
+#endif
+
+#define FP_SET_EXCEPTION(ex) \
+ _fex |= (ex)
+
+#define FP_UNSET_EXCEPTION(ex) \
+ _fex &= ~(ex)
+
+#define FP_CLEAR_EXCEPTIONS \
+ _fex = 0
+
+#define _FP_ROUND_NEAREST(wc, X) \
+do { \
+ if ((_FP_FRAC_LOW_##wc(X) & 15) != _FP_WORK_ROUND) \
+ _FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \
+} while (0)
+
+#define _FP_ROUND_ZERO(wc, X) (void)0
+
+#define _FP_ROUND_PINF(wc, X) \
+do { \
+ if (!X##_s && (_FP_FRAC_LOW_##wc(X) & 7)) \
+ _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \
+} while (0)
+
+#define _FP_ROUND_MINF(wc, X) \
+do { \
+ if (X##_s && (_FP_FRAC_LOW_##wc(X) & 7)) \
+ _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \
+} while (0)
+
+#define _FP_ROUND(wc, X) \
+do { \
+ if (_FP_FRAC_LOW_##wc(X) & 7) \
+ FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ switch (FP_ROUNDMODE) \
+ { \
+ case FP_RND_NEAREST: \
+ _FP_ROUND_NEAREST(wc,X); \
+ break; \
+ case FP_RND_ZERO: \
+ _FP_ROUND_ZERO(wc,X); \
+ break; \
+ case FP_RND_PINF: \
+ _FP_ROUND_PINF(wc,X); \
+ break; \
+ case FP_RND_MINF: \
+ _FP_ROUND_MINF(wc,X); \
+ break; \
+ } \
+} while (0)
+
+#define FP_CLS_NORMAL 0
+#define FP_CLS_ZERO 1
+#define FP_CLS_INF 2
+#define FP_CLS_NAN 3
+
+#define _FP_CLS_COMBINE(x,y) (((x) << 2) | (y))
+
+#include "op-1.h"
+#include "op-2.h"
+#include "op-4.h"
+#include "op-8.h"
+#include "op-common.h"
+
+/* Sigh. Silly things longlong.h needs. */
+#define UWtype _FP_W_TYPE
+#define W_TYPE_SIZE _FP_W_TYPE_SIZE
+
+typedef int QItype __attribute__((mode(QI)));
+typedef int SItype __attribute__((mode(SI)));
+typedef int DItype __attribute__((mode(DI)));
+typedef unsigned int UQItype __attribute__((mode(QI)));
+typedef unsigned int USItype __attribute__((mode(SI)));
+typedef unsigned int UDItype __attribute__((mode(DI)));
+#if _FP_W_TYPE_SIZE == 32
+typedef unsigned int UHWtype __attribute__((mode(HI)));
+#elif _FP_W_TYPE_SIZE == 64
+typedef USItype UHWtype;
+#endif
+
+#define SI_BITS (__CHAR_BIT__ * (int)sizeof(SItype))
+#define DI_BITS (__CHAR_BIT__ * (int)sizeof(DItype))
+
+#ifndef umul_ppmm
+#ifdef _LIBC
+#include <stdlib/longlong.h>
+#else
+#include "longlong.h"
+#endif
+#endif
+
+#ifdef _LIBC
+#include <stdlib.h>
+#else
+extern void abort (void);
+#endif
+
+#endif
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/subdf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/subdf3.c
new file mode 100644
index 000000000..3978b5299
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/subdf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a - b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+DFtype __subdf3(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R);
+ DFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_D(A, a);
+ FP_UNPACK_SEMIRAW_D(B, b);
+ FP_SUB_D(R, A, B);
+ FP_PACK_SEMIRAW_D(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/subsf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/subsf3.c
new file mode 100644
index 000000000..f1cbdd1ff
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/subsf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a - b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+SFtype __subsf3(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B); FP_DECL_S(R);
+ SFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_S(A, a);
+ FP_UNPACK_SEMIRAW_S(B, b);
+ FP_SUB_S(R, A, B);
+ FP_PACK_SEMIRAW_S(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/subtf3.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/subtf3.c
new file mode 100644
index 000000000..7ba4c8c5e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/subtf3.c
@@ -0,0 +1,49 @@
+/* Software floating-point emulation.
+ Return a - b
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+TFtype __subtf3(TFtype a, TFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A); FP_DECL_Q(B); FP_DECL_Q(R);
+ TFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(A, a);
+ FP_UNPACK_SEMIRAW_Q(B, b);
+ FP_SUB_Q(R, A, B);
+ FP_PACK_SEMIRAW_Q(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/t-softfp b/gcc-4.2.1-5666.3/gcc/config/soft-fp/t-softfp
new file mode 100644
index 000000000..4a3f91e15
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/t-softfp
@@ -0,0 +1,108 @@
+# Copyright (C) 2006 Free Software Foundation, Inc.
+
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston MA 02110-1301, USA.
+
+# Targets using soft-fp should define the following variables:
+#
+# softfp_float_modes: a list of soft-float floating-point modes,
+# e.g. sf df
+# softfp_int_modes: a list of integer modes for which to define conversions,
+# e.g. si di
+# softfp_extensions: a list of extensions between floating-point modes,
+# e.g. sfdf
+# softfp_truncations: a list of truncations between floating-point modes,
+# e.g. dfsf
+# softfp_machine_header: the target sfp-machine.h file (relative to config/),
+# e.g. rs6000/sfp-machine.h
+#
+# Extensions and truncations should include those where only one mode
+# is a soft-float mode; for example, sftf where sf is hard-float and
+# tf is soft-float.
+#
+# If the libgcc2.c functions should not be replaced, also define:
+#
+# softfp_exclude_libgcc2 := y
+#
+# Avoiding replacing the libgcc2.c functions is a temporary measure
+# for targets with both hard-float and soft-float multilibs, since
+# these variables apply for all multilibs. With toplevel libgcc,
+# soft-fp can be used conditionally on the multilib instead.
+#
+# If the code should not be compiled at all for some multilibs, define:
+#
+# softfp_wrap_start: text to put at the start of wrapper source files,
+# output with echo
+# e.g. '#ifndef __powerpc64__'
+# softfp_wrap_end: text to put at the end of wrapper source files,
+# e.g. '#endif'
+#
+# This is another temporary measure.
+
+softfp_float_funcs = add$(m)3 div$(m)3 eq$(m)2 ge$(m)2 le$(m)2 mul$(m)3 \
+ neg$(m)2 sub$(m)3 unord$(m)2
+softfp_floatint_funcs = fix$(m)$(i) fixuns$(m)$(i) \
+ float$(i)$(m) floatun$(i)$(m)
+
+softfp_func_list := \
+ $(foreach m,$(softfp_float_modes), \
+ $(softfp_float_funcs) \
+ $(foreach i,$(softfp_int_modes), \
+ $(softfp_floatint_funcs))) \
+ $(foreach e,$(softfp_extensions),extend$(e)2) \
+ $(foreach t,$(softfp_truncations),trunc$(t)2)
+
+ifeq ($(softfp_exclude_libgcc2),y)
+# This list is taken from mklibgcc.in and doesn't presently allow for
+# 64-bit targets where si should become di and di should become ti.
+softfp_func_list := $(filter-out floatdidf floatdisf fixunsdfsi fixunssfsi \
+ fixunsdfdi fixdfdi fixunssfdi fixsfdi fixxfdi fixunsxfdi \
+ floatdixf fixunsxfsi fixtfdi fixunstfdi floatditf \
+ floatundidf floatundisf floatundixf floatunditf,$(softfp_func_list))
+endif
+
+ifeq ($(softfp_wrap_start),)
+softfp_file_list := \
+ $(addsuffix .c,$(addprefix $(srcdir)/config/soft-fp/,$(softfp_func_list)))
+else
+softfp_file_list := $(addsuffix .c,$(softfp_func_list))
+
+$(softfp_file_list):
+ echo $(softfp_wrap_start) > $@
+ echo '#include "config/soft-fp/$@"' >> $@
+ echo $(softfp_wrap_end) >> $@
+endif
+
+LIB2FUNCS_EXTRA += $(softfp_file_list)
+
+ifneq ($(softfp_exclude_libgcc2),y)
+# Functions in libgcc2.c are excluded for each soft-float mode (a
+# target may have both soft-float and hard-float modes), for the fixed
+# list of integer modes (si and di) for which libgcc2.c defines any
+# such functions. Depending on the target, the si and di symbols may
+# in fact define di and ti functions.
+
+LIB2FUNCS_EXCLUDE += \
+ $(addprefix _,$(foreach m,$(softfp_float_modes), \
+ $(foreach i,si di, \
+ $(softfp_floatint_funcs))))
+endif
+
+SFP_MACHINE := sfp-machine.h
+
+$(SFP_MACHINE): $(srcdir)/config/$(softfp_machine_header)
+ cp $(srcdir)/config/$(softfp_machine_header) $(SFP_MACHINE)
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/truncdfsf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/truncdfsf2.c
new file mode 100644
index 000000000..bd953912e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/truncdfsf2.c
@@ -0,0 +1,54 @@
+/* Software floating-point emulation.
+ Truncate IEEE double into IEEE single
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+#include "double.h"
+
+SFtype __truncdfsf2(DFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ FP_DECL_S(R);
+ SFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_D(A, a);
+#if _FP_W_TYPE_SIZE < _FP_FRACBITS_D
+ FP_TRUNC(S,D,1,2,R,A);
+#else
+ FP_TRUNC(S,D,1,1,R,A);
+#endif
+ FP_PACK_SEMIRAW_S(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfdf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfdf2.c
new file mode 100644
index 000000000..c3827b08a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfdf2.c
@@ -0,0 +1,54 @@
+/* Software floating-point emulation.
+ Truncate IEEE quad into IEEE double
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+#include "quad.h"
+
+DFtype __trunctfdf2(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ FP_DECL_D(R);
+ DFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(A, a);
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_TRUNC(D,Q,2,4,R,A);
+#else
+ FP_TRUNC(D,Q,1,2,R,A);
+#endif
+ FP_PACK_SEMIRAW_D(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfsf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfsf2.c
new file mode 100644
index 000000000..676c937e2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/trunctfsf2.c
@@ -0,0 +1,54 @@
+/* Software floating-point emulation.
+ Truncate IEEE quad into IEEE single
+ Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+#include "quad.h"
+
+SFtype __trunctfsf2(TFtype a)
+{
+ FP_DECL_EX;
+ FP_DECL_Q(A);
+ FP_DECL_S(R);
+ SFtype r;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(A, a);
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_TRUNC(S,Q,1,4,R,A);
+#else
+ FP_TRUNC(S,Q,1,2,R,A);
+#endif
+ FP_PACK_SEMIRAW_S(r, R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/unorddf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/unorddf2.c
new file mode 100644
index 000000000..c83a56327
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/unorddf2.c
@@ -0,0 +1,45 @@
+/* Software floating-point emulation.
+ Return 1 iff a or b is a NaN, 0 otherwise.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Joseph Myers (joseph@codesourcery.com).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "double.h"
+
+int
+__unorddf2(DFtype a, DFtype b)
+{
+ FP_DECL_D(A); FP_DECL_D(B);
+ int r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_UNORD_D(r, A, B);
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/unordsf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/unordsf2.c
new file mode 100644
index 000000000..8de756356
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/unordsf2.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Return 1 iff a or b is a NaN, 0 otherwise.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Joseph Myers (joseph@codesourcery.com).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "single.h"
+
+int
+__unordsf2(SFtype a, SFtype b)
+{
+ FP_DECL_S(A);
+ FP_DECL_S(B);
+ int r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_UNORD_S(r, A, B);
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/soft-fp/unordtf2.c b/gcc-4.2.1-5666.3/gcc/config/soft-fp/unordtf2.c
new file mode 100644
index 000000000..134b1d09b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/soft-fp/unordtf2.c
@@ -0,0 +1,46 @@
+/* Software floating-point emulation.
+ Return 1 iff a or b is a NaN, 0 otherwise.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Joseph Myers (joseph@codesourcery.com).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "soft-fp.h"
+#include "quad.h"
+
+int
+__unordtf2(TFtype a, TFtype b)
+{
+ FP_DECL_Q(A);
+ FP_DECL_Q(B);
+ int r;
+
+ FP_UNPACK_RAW_Q(A, a);
+ FP_UNPACK_RAW_Q(B, b);
+ FP_CMP_UNORD_Q(r, A, B);
+
+ return r;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/sol2-10.h b/gcc-4.2.1-5666.3/gcc/config/sol2-10.h
new file mode 100644
index 000000000..1da6bee10
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/sol2-10.h
@@ -0,0 +1,24 @@
+/* Operating system specific defines to be used when targeting GCC for any
+ Solaris 2 system starting from Solaris 10.
+ Copyright 2006 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Solaris 10 has the float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
diff --git a/gcc-4.2.1-5666.3/gcc/config/sol2-6.h b/gcc-4.2.1-5666.3/gcc/config/sol2-6.h
new file mode 100644
index 000000000..d5b7dda2a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/sol2-6.h
@@ -0,0 +1,27 @@
+/* Operating system specific defines to be used when targeting GCC for any
+ Solaris 2 system up to Solaris 2.6.
+ Copyright 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{pthreads|pthread:-D_REENTRANT -D_PTHREADS95} \
+%{!pthreads:%{!pthread:%{threads:-D_REENTRANT -D_SOLARIS_THREADS}}} \
+%{compat-bsd:-iwithprefixbefore ucbinclude -I/usr/ucbinclude} \
+"
diff --git a/gcc-4.2.1-5666.3/gcc/config/sol2-c.c b/gcc-4.2.1-5666.3/gcc/config/sol2-c.c
new file mode 100644
index 000000000..173583be2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/sol2-c.c
@@ -0,0 +1,272 @@
+/* Solaris support needed only by C/C++ frontends.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "tm.h"
+#include "tm_p.h"
+#include "toplev.h"
+
+#include "c-format.h"
+#include "intl.h"
+
+#include "cpplib.h"
+#include "c-pragma.h"
+#include "c-common.h"
+
+/* cmn_err only accepts "l" and "ll". */
+static const format_length_info cmn_err_length_specs[] =
+{
+ { "l", FMT_LEN_l, STD_C89, "ll", FMT_LEN_ll, STD_C89 },
+ { NULL, 0, 0, NULL, 0, 0 }
+};
+
+static const format_flag_spec cmn_err_flag_specs[] =
+{
+ { 'w', 0, 0, N_("field width"), N_("field width in printf format"), STD_C89 },
+ { 'L', 0, 0, N_("length modifier"), N_("length modifier in printf format"), STD_C89 },
+ { 0, 0, 0, NULL, NULL, 0 }
+};
+
+
+static const format_flag_pair cmn_err_flag_pairs[] =
+{
+ { 0, 0, 0, 0 }
+};
+
+static const format_char_info bitfield_string_type =
+ { "b", 1, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "", "cR", NULL };
+
+static const format_char_info cmn_err_char_table[] =
+{
+ /* C89 conversion specifiers. */
+ { "dD", 0, STD_C89, { T89_I, BADLEN, BADLEN, T89_L, T9L_LL, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "", NULL },
+ { "oOxX",0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "", NULL },
+ { "u", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "", NULL },
+ { "c", 0, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "", NULL },
+ { "p", 1, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "c", NULL },
+ { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "cR", NULL },
+ { "b", 0, STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "w", "", &bitfield_string_type },
+ { NULL, 0, 0, NOLENGTHS, NULL, NULL, NULL }
+};
+
+const format_kind_info solaris_format_types[] = {
+ { "cmn_err", cmn_err_length_specs, cmn_err_char_table, "", NULL,
+ cmn_err_flag_specs, cmn_err_flag_pairs,
+ FMT_FLAG_ARG_CONVERT|FMT_FLAG_EMPTY_PREC_OK,
+ 'w', 0, 0, 0, 'L',
+ &integer_type_node, &integer_type_node
+ }
+};
+
+/* Handle #pragma align ALIGNMENT (VAR [, VAR]...) */
+
+static void
+solaris_pragma_align (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree t, x;
+ enum cpp_ttype ttype;
+ HOST_WIDE_INT low;
+
+ if (pragma_lex (&x) != CPP_NUMBER
+ || pragma_lex (&t) != CPP_OPEN_PAREN)
+ {
+ warning (0, "malformed %<#pragma align%>, ignoring");
+ return;
+ }
+
+ low = TREE_INT_CST_LOW (x);
+ if (TREE_INT_CST_HIGH (x) != 0
+ || (low != 1 && low != 2 && low != 4 && low != 8 && low != 16
+ && low != 32 && low != 64 && low != 128))
+ {
+ warning (0, "invalid alignment for %<#pragma align%>, ignoring");
+ return;
+ }
+
+ ttype = pragma_lex (&t);
+ if (ttype != CPP_NAME)
+ {
+ warning (0, "malformed %<#pragma align%>, ignoring");
+ return;
+ }
+
+ while (1)
+ {
+ tree decl = identifier_global_value (t);
+ if (decl && DECL_P (decl))
+ warning (0, "%<#pragma align%> must appear before the declaration of "
+ "%D, ignoring", decl);
+ else
+ solaris_pending_aligns = tree_cons (t, build_tree_list (NULL, x),
+ solaris_pending_aligns);
+
+ ttype = pragma_lex (&t);
+ if (ttype == CPP_COMMA)
+ {
+ ttype = pragma_lex (&t);
+ if (ttype != CPP_NAME)
+ {
+ warning (0, "malformed %<#pragma align%>");
+ return;
+ }
+ }
+ else if (ttype == CPP_CLOSE_PAREN)
+ {
+ if (pragma_lex (&t) != CPP_EOF)
+ warning (0, "junk at end of %<#pragma align%>");
+ return;
+ }
+ else
+ {
+ warning (0, "malformed %<#pragma align%>");
+ return;
+ }
+ }
+}
+
+/* Handle #pragma init (function [, function]...) */
+
+static void
+solaris_pragma_init (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree t;
+ enum cpp_ttype ttype;
+
+ if (pragma_lex (&t) != CPP_OPEN_PAREN)
+ {
+ warning (0, "malformed %<#pragma init%>, ignoring");
+ return;
+ }
+
+ ttype = pragma_lex (&t);
+ if (ttype != CPP_NAME)
+ {
+ warning (0, "malformed %<#pragma init%>, ignoring");
+ return;
+ }
+
+ while (1)
+ {
+ tree decl = identifier_global_value (t);
+ if (decl && DECL_P (decl))
+ {
+ tree init_list = build_tree_list (get_identifier ("init"),
+ NULL);
+ tree attrs = tree_cons (get_identifier ("used"), NULL, init_list);
+ decl_attributes (&decl, attrs, 0);
+ }
+ else
+ solaris_pending_inits = tree_cons (t, NULL, solaris_pending_inits);
+
+ ttype = pragma_lex (&t);
+ if (ttype == CPP_COMMA)
+ {
+ ttype = pragma_lex (&t);
+ if (ttype != CPP_NAME)
+ {
+ warning (0, "malformed %<#pragma init%>");
+ return;
+ }
+ }
+ else if (ttype == CPP_CLOSE_PAREN)
+ {
+ if (pragma_lex (&t) != CPP_EOF)
+ warning (0, "junk at end of %<#pragma init%>");
+ return;
+ }
+ else
+ {
+ warning (0, "malformed %<#pragma init%>");
+ return;
+ }
+ }
+}
+
+/* Handle #pragma fini (function [, function]...) */
+
+static void
+solaris_pragma_fini (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree t;
+ enum cpp_ttype ttype;
+
+ if (pragma_lex (&t) != CPP_OPEN_PAREN)
+ {
+ warning (0, "malformed %<#pragma fini%>, ignoring");
+ return;
+ }
+
+ ttype = pragma_lex (&t);
+ if (ttype != CPP_NAME)
+ {
+ warning (0, "malformed %<#pragma fini%>, ignoring");
+ return;
+ }
+
+ while (1)
+ {
+ tree decl = identifier_global_value (t);
+ if (decl && DECL_P (decl))
+ {
+ tree fini_list = build_tree_list (get_identifier ("fini"),
+ NULL);
+ tree attrs = tree_cons (get_identifier ("used"), NULL, fini_list);
+ decl_attributes (&decl, attrs, 0);
+ }
+ else
+ solaris_pending_finis = tree_cons (t, NULL, solaris_pending_finis);
+
+ ttype = pragma_lex (&t);
+ if (ttype == CPP_COMMA)
+ {
+ ttype = pragma_lex (&t);
+ if (ttype != CPP_NAME)
+ {
+ warning (0, "malformed %<#pragma fini%>");
+ return;
+ }
+ }
+ else if (ttype == CPP_CLOSE_PAREN)
+ {
+ if (pragma_lex (&t) != CPP_EOF)
+ warning (0, "junk at end of %<#pragma fini%>");
+ return;
+ }
+ else
+ {
+ warning (0, "malformed %<#pragma fini%>");
+ return;
+ }
+ }
+}
+
+/* Register Solaris-specific #pragma directives. */
+
+void
+solaris_register_pragmas (void)
+{
+ c_register_pragma_with_expansion (0, "align", solaris_pragma_align);
+ c_register_pragma (0, "init", solaris_pragma_init);
+ c_register_pragma (0, "fini", solaris_pragma_fini);
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/sol2-protos.h b/gcc-4.2.1-5666.3/gcc/config/sol2-protos.h
new file mode 100644
index 000000000..757c6f839
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/sol2-protos.h
@@ -0,0 +1,24 @@
+/* Operating system specific prototypes to be used when targeting GCC for any
+ Solaris 2 system.
+ Copyright 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+extern void solaris_insert_attributes (tree, tree *);
+extern void solaris_register_pragmas (void);
+extern void solaris_output_init_fini (FILE *, tree);
diff --git a/gcc-4.2.1-5666.3/gcc/config/sol2.c b/gcc-4.2.1-5666.3/gcc/config/sol2.c
new file mode 100644
index 000000000..e3d3147ca
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/sol2.c
@@ -0,0 +1,120 @@
+/* General Solaris system support.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "toplev.h"
+#include "ggc.h"
+
+tree solaris_pending_aligns, solaris_pending_inits, solaris_pending_finis;
+
+/* Attach any pending attributes for DECL to the list in *ATTRIBUTES.
+ Pending attributes come from #pragma or _Pragma, so this code is
+ only useful in the C family front ends, but it is included in
+ all languages to avoid changing the target machine initializer
+ depending on the language. */
+
+void
+solaris_insert_attributes (tree decl, tree *attributes)
+{
+ tree *x, next;
+
+ if (solaris_pending_aligns != NULL && TREE_CODE (decl) == VAR_DECL)
+ for (x = &solaris_pending_aligns; *x; x = &TREE_CHAIN (*x))
+ {
+ tree name = TREE_PURPOSE (*x);
+ tree value = TREE_VALUE (*x);
+ if (DECL_NAME (decl) == name)
+ {
+ if (lookup_attribute ("aligned", DECL_ATTRIBUTES (decl))
+ || lookup_attribute ("aligned", *attributes))
+ warning (0, "ignoring %<#pragma align%> for explicitly "
+ "aligned %q+D", decl);
+ else
+ *attributes = tree_cons (get_identifier ("aligned"), value,
+ *attributes);
+ next = TREE_CHAIN (*x);
+ ggc_free (*x);
+ *x = next;
+ break;
+ }
+ }
+
+ if (solaris_pending_inits != NULL && TREE_CODE (decl) == FUNCTION_DECL)
+ for (x = &solaris_pending_inits; *x; x = &TREE_CHAIN (*x))
+ {
+ tree name = TREE_PURPOSE (*x);
+ if (DECL_NAME (decl) == name)
+ {
+ *attributes = tree_cons (get_identifier ("init"), NULL,
+ *attributes);
+ *attributes = tree_cons (get_identifier ("used"), NULL,
+ *attributes);
+ next = TREE_CHAIN (*x);
+ ggc_free (*x);
+ *x = next;
+ break;
+ }
+ }
+
+ if (solaris_pending_finis != NULL && TREE_CODE (decl) == FUNCTION_DECL)
+ for (x = &solaris_pending_finis; *x; x = &TREE_CHAIN (*x))
+ {
+ tree name = TREE_PURPOSE (*x);
+ if (DECL_NAME (decl) == name)
+ {
+ *attributes = tree_cons (get_identifier ("fini"), NULL,
+ *attributes);
+ *attributes = tree_cons (get_identifier ("used"), NULL,
+ *attributes);
+ next = TREE_CHAIN (*x);
+ ggc_free (*x);
+ *x = next;
+ break;
+ }
+ }
+}
+
+/* Output initializer or finalizer entries for DECL to FILE. */
+
+void
+solaris_output_init_fini (FILE *file, tree decl)
+{
+ if (lookup_attribute ("init", DECL_ATTRIBUTES (decl)))
+ {
+ fprintf (file, "\t.pushsection\t\".init\"\n");
+ ASM_OUTPUT_CALL (file, decl);
+ fprintf (file, "\t.popsection\n");
+ }
+
+ if (lookup_attribute ("fini", DECL_ATTRIBUTES (decl)))
+ {
+ fprintf (file, "\t.pushsection\t\".fini\"\n");
+ ASM_OUTPUT_CALL (file, decl);
+ fprintf (file, "\t.popsection\n");
+ }
+}
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/sol2.h b/gcc-4.2.1-5666.3/gcc/config/sol2.h
new file mode 100644
index 000000000..5ffaf5758
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/sol2.h
@@ -0,0 +1,245 @@
+/* Operating system specific defines to be used when targeting GCC for any
+ Solaris 2 system.
+ Copyright 2002, 2003, 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* Solaris 2 (at least as of 2.5.1) uses a 32-bit wchar_t. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* Solaris 2 uses a wint_t different from the default. This is required
+ by the SCD 2.4.1, p. 6-83, Figure 6-66. */
+#undef WINT_TYPE
+#define WINT_TYPE "long int"
+
+#undef WINT_TYPE_SIZE
+#define WINT_TYPE_SIZE BITS_PER_WORD
+
+#define TARGET_HANDLE_PRAGMA_REDEFINE_EXTNAME 1
+
+/* ??? Note: in order for -compat-bsd to work fully,
+ we must somehow arrange to fixincludes /usr/ucbinclude
+ and put the result in $(libsubdir)/ucbinclude. */
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{pthreads|pthread:-D_REENTRANT -D_PTHREADS} \
+%{!pthreads:%{!pthread:%{threads:-D_REENTRANT -D_SOLARIS_THREADS}}} \
+%{compat-bsd:-iwithprefixbefore ucbinclude -I/usr/ucbinclude} \
+"
+
+/* Names to predefine in the preprocessor for this target machine. */
+#define TARGET_SUB_OS_CPP_BUILTINS()
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define_std ("unix"); \
+ builtin_define_std ("sun"); \
+ builtin_define ("__svr4__"); \
+ builtin_define ("__SVR4"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=svr4"); \
+ /* For C++ we need to add some additional macro \
+ definitions required by the C++ standard \
+ library. */ \
+ if (c_dialect_cxx ()) \
+ { \
+ builtin_define ("_XOPEN_SOURCE=500"); \
+ builtin_define ("_LARGEFILE_SOURCE=1"); \
+ builtin_define ("_LARGEFILE64_SOURCE=1"); \
+ builtin_define ("__EXTENSIONS__"); \
+ } \
+ TARGET_SUB_OS_CPP_BUILTINS(); \
+ } while (0)
+
+/* The system headers under Solaris 2 are C++-aware since 2.0. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
+%{fpic|fpie|fPIC|fPIE:-K PIC} \
+%(asm_cpu) \
+"
+
+/* We don't use the standard LIB_SPEC only because we don't yet support c++. */
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{compat-bsd:-lucb -lsocket -lnsl -lelf -laio} \
+ %{!shared:\
+ %{!symbolic:\
+ %{pthreads|pthread:-lpthread} \
+ %{!pthreads:%{!pthread:%{threads:-lthread}}} \
+ %{p|pg:-ldl} -lc}}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* We don't use the standard svr4 STARTFILE_SPEC because it's wrong for us. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{p:mcrt1.o%s} \
+ %{!p: \
+ %{pg:gcrt1.o%s gmon.o%s} \
+ %{!pg:crt1.o%s}}}} \
+ crti.o%s %(startfile_arch) \
+ crtbegin.o%s"
+
+#undef STARTFILE_ARCH32_SPEC
+#define STARTFILE_ARCH32_SPEC "%{ansi:values-Xc.o%s} \
+ %{!ansi:values-Xa.o%s}"
+
+#undef STARTFILE_ARCH_SPEC
+#define STARTFILE_ARCH_SPEC STARTFILE_ARCH32_SPEC
+
+#undef LINK_ARCH32_SPEC_BASE
+#define LINK_ARCH32_SPEC_BASE \
+ "%{G:-G} \
+ %{YP,*} \
+ %{R*} \
+ %{compat-bsd: \
+ %{!YP,*:%{p|pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \
+ -R /usr/ucblib} \
+ %{!compat-bsd: \
+ %{!YP,*:%{p|pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ccs/lib:/usr/lib}}}}"
+
+#undef LINK_ARCH32_SPEC
+#define LINK_ARCH32_SPEC LINK_ARCH32_SPEC_BASE
+
+#undef LINK_ARCH_SPEC
+#define LINK_ARCH_SPEC LINK_ARCH32_SPEC
+
+/* This should be the same as in svr4.h, except with -R added. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{h*} %{v:-V} \
+ %{b} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy %{!mimpure-text:-z text}} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %(link_arch) \
+ %{Qy:} %{!Qn:-Qy}"
+
+/* The Solaris linker doesn't understand constructor priorities. (The
+ GNU linker does support constructor priorities, so GNU ld
+ configuration files for Solaris override this setting.) */
+#undef SUPPORTS_INIT_PRIORITY
+#define SUPPORTS_INIT_PRIORITY 0
+
+/* This defines which switch letters take arguments.
+ It is as in svr4.h but with -R added. */
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ || (CHAR) == 'R' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z')
+
+#define STDC_0_IN_SYSTEM_HEADERS 1
+
+/*
+ * Attempt to turn on access permissions for the stack.
+ *
+ * _SC_STACK_PROT is only defined for post 2.6, but we want this code
+ * to run always. 2.6 can change the stack protection but has no way to
+ * query it.
+ *
+ */
+
+/* sys/mman.h is not present on some non-Solaris configurations
+ that use sol2.h, so ENABLE_EXECUTE_STACK must use a magic
+ number instead of the appropriate PROT_* flags. */
+
+#define ENABLE_EXECUTE_STACK \
+ \
+/* #define STACK_PROT_RWX (PROT_READ | PROT_WRITE | PROT_EXEC) */ \
+ \
+static int need_enable_exec_stack; \
+ \
+static void check_enabling(void) __attribute__ ((constructor)); \
+static void check_enabling(void) \
+{ \
+ extern long sysconf(int); \
+ \
+ int prot = (int) sysconf(515 /* _SC_STACK_PROT */); \
+ if (prot != 7 /* STACK_PROT_RWX */) \
+ need_enable_exec_stack = 1; \
+} \
+ \
+extern void __enable_execute_stack (void *); \
+void \
+__enable_execute_stack (void *addr) \
+{ \
+ extern int mprotect(void *, size_t, int); \
+ if (!need_enable_exec_stack) \
+ return; \
+ else { \
+ long size = getpagesize (); \
+ long mask = ~(size-1); \
+ char *page = (char *) (((long) addr) & mask); \
+ char *end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
+ \
+ if (mprotect (page, end - page, 7 /* STACK_PROT_RWX */) < 0) \
+ perror ("mprotect of trampoline code"); \
+ } \
+}
+
+/* Support Solaris-specific format checking for cmn_err. */
+#define TARGET_N_FORMAT_TYPES 1
+#define TARGET_FORMAT_TYPES solaris_format_types
+
+/* #pragma init and #pragma fini are implemented on top of init and
+ fini attributes. */
+#define SOLARIS_ATTRIBUTE_TABLE \
+ { "init", 0, 0, true, false, false, NULL }, \
+ { "fini", 0, 0, true, false, false, NULL }
+
+/* This is how to declare the size of a function. For Solaris, we output
+ any .init or .fini entries here. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ solaris_output_init_fini (FILE, DECL); \
+ } \
+ while (0)
+
+/* Register the Solaris-specific #pragma directives. */
+#define REGISTER_TARGET_PRAGMAS() solaris_register_pragmas ()
+
+extern GTY(()) tree solaris_pending_aligns;
+extern GTY(()) tree solaris_pending_inits;
+extern GTY(()) tree solaris_pending_finis;
+
+/* Allow macro expansion in #pragma pack. */
+#define HANDLE_PRAGMA_PACK_WITH_EXPANSION
diff --git a/gcc-4.2.1-5666.3/gcc/config/svr3.h b/gcc-4.2.1-5666.3/gcc/config/svr3.h
new file mode 100644
index 000000000..283ebf0cd
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/svr3.h
@@ -0,0 +1,161 @@
+/* Operating system specific defines to be used when targeting GCC for
+ generic System V Release 3 system.
+ Copyright (C) 1991, 1996, 2000, 2002, 2004 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Define a symbol indicating that we are using svr3.h. */
+#define USING_SVR3_H
+
+/* Define a symbol so that libgcc* can know what sort of operating
+ environment and assembler syntax we are targeting for. */
+#define SVR3_target
+
+/* Assembler, linker, library, and startfile spec's. */
+
+/* The .file command should always begin the output. */
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+/* We don't use ROUNDED because the standard compiler doesn't,
+ and the linker gives error messages if a common symbol
+ has more than one length value. */
+
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%lu\n", (unsigned long)(SIZE)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+/* Note that using bss_section here caused errors
+ in building shared libraries on system V.3. */
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+ do { \
+ int align = exact_log2 (ROUNDED); \
+ if (align > 2) align = 2; \
+ switch_to_section (data_section); \
+ ASM_OUTPUT_ALIGN ((FILE), align == -1 ? 2 : align); \
+ ASM_OUTPUT_LABEL ((FILE), (NAME)); \
+ fprintf ((FILE), "\t.set .,.+%u\n", (int)(ROUNDED)); \
+ } while (0)
+
+/* Output #ident as a .ident. */
+
+#undef ASM_OUTPUT_IDENT
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t.ident \"%s\"\n", NAME);
+
+/* Use periods rather than dollar signs in special g++ assembler names. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* System V Release 3 uses COFF debugging info. */
+
+#define SDB_DEBUGGING_INFO 1
+
+/* We don't want to output DBX debugging information. */
+
+#undef DBX_DEBUGGING_INFO
+
+/* Define the actual types of some ANSI-mandated types. These
+ definitions should work for most SVR3 systems. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* The prefix to add to user-visible assembler symbols.
+
+ For System V Release 3 the convention is to prepend a leading
+ underscore onto user-level symbol names. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'.
+
+ For most svr3 systems, the convention is that any symbol which begins
+ with a period is not put into the linker symbol table by the assembler. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%s%ld", LOCAL_LABEL_PREFIX, PREFIX, (long)(NUM))
+
+/* We want local labels to start with period if made with asm_fprintf. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* Support const sections and the ctors and dtors sections for g++. */
+
+/* Define a few machine-specific details of the implementation of
+ constructors.
+
+ The __CTORS_LIST__ goes in the .init section. Define CTOR_LIST_BEGIN
+ and CTOR_LIST_END to contribute to the .init section an instruction to
+ push a word containing 0 (or some equivalent of that).
+
+ Define TARGET_ASM_CONSTRUCTOR to push the address of the constructor. */
+
+#define INIT_SECTION_ASM_OP "\t.section\t.init"
+#define FINI_SECTION_ASM_OP "\t.section .fini,\"x\""
+#define DTORS_SECTION_ASM_OP FINI_SECTION_ASM_OP
+
+/* CTOR_LIST_BEGIN and CTOR_LIST_END are machine-dependent
+ because they push on the stack. */
+
+#ifndef STACK_GROWS_DOWNWARD
+
+/* Constructor list on stack is in reverse order. Go to the end of the
+ list and go backwards to call constructors in the right order. */
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *p, *beg = alloca (0); \
+ for (p = beg; *p; p++) \
+ ; \
+ while (p != beg) \
+ (*--p) (); \
+} while (0)
+
+#else
+
+/* Constructor list on stack is in correct order. Just call them. */
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *p, *beg = alloca (0); \
+ for (p = beg; *p; ) \
+ (*p++) (); \
+} while (0)
+
+#endif /* STACK_GROWS_DOWNWARD */
diff --git a/gcc-4.2.1-5666.3/gcc/config/svr4.h b/gcc-4.2.1-5666.3/gcc/config/svr4.h
new file mode 100644
index 000000000..795049ce1
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/svr4.h
@@ -0,0 +1,212 @@
+/* Operating system specific defines to be used when targeting GCC for some
+ generic System V Release 4 system.
+ Copyright (C) 1991, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.
+
+ To use this file, make up a line like that in config.gcc:
+
+ tm_file="$tm_file elfos.h svr4.h MACHINE/svr4.h"
+
+ where MACHINE is replaced by the name of the basic hardware that you
+ are targeting for. Then, in the file MACHINE/svr4.h, put any really
+ system-specific defines (or overrides of defines) which you find that
+ you need.
+*/
+
+/* Define a symbol indicating that we are using svr4.h. */
+#define USING_SVR4_H
+
+/* Cpp, assembler, linker, library, and startfile spec's. */
+
+/* This defines which switch letters take arguments. On svr4, most of
+ the normal cases (defined in gcc.c) apply, and we also have -h* and
+ -z* options (for the linker). Note however that there is no such
+ thing as a -T option for svr4. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'x' \
+ || (CHAR) == 'z')
+
+/* This defines which multi-letter switches take arguments. On svr4,
+ there are no such switches except those implemented by GCC itself. */
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \
+ && strcmp (STR, "Tbss"))
+
+/* Provide an ASM_SPEC appropriate for svr4. Here we try to support as
+ many of the specialized svr4 assembler options as seems reasonable,
+ given that there are certain options which we can't (or shouldn't)
+ support directly due to the fact that they conflict with other options
+ for other svr4 tools (e.g. ld) or with other options for GCC itself.
+ For example, we don't support the -o (output file) or -R (remove
+ input file) options because GCC already handles these things. We
+ also don't support the -m (run m4) option for the assembler because
+ that conflicts with the -m (produce load map) option of the svr4
+ linker. We do however allow passing arbitrary options to the svr4
+ assembler via the -Wa, option.
+
+ Note that gcc doesn't allow a space to follow -Y in a -Ym,* or -Yd,*
+ option.
+
+ The svr4 assembler wants '-' on the command line if it's expected to
+ read its stdin.
+*/
+
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+
+#define AS_NEEDS_DASH_FOR_PIPED_INPUT
+
+/* Under svr4, the normal location of the `ld' and `as' programs is the
+ /usr/ccs/bin directory. */
+
+/* APPLE LOCAL begin mainline 4.3 2006-12-13 CROSS_DIRECTORY_STRUCTURE 4697325 */
+#ifndef CROSS_DIRECTORY_STRUCTURE
+/* APPLE LOCAL end mainline 4.3 2006-12-13 CROSS_DIRECTORY_STRUCTURE 4697325 */
+#undef MD_EXEC_PREFIX
+#define MD_EXEC_PREFIX "/usr/ccs/bin/"
+#endif
+
+/* Under svr4, the normal location of the various *crt*.o files is the
+ /usr/ccs/lib directory. */
+
+/* APPLE LOCAL begin mainline 4.3 2006-12-13 CROSS_DIRECTORY_STRUCTURE 4697325 */
+#ifndef CROSS_DIRECTORY_STRUCTURE
+/* APPLE LOCAL end mainline 4.3 2006-12-13 CROSS_DIRECTORY_STRUCTURE 4697325 */
+#undef MD_STARTFILE_PREFIX
+#define MD_STARTFILE_PREFIX "/usr/ccs/lib/"
+#endif
+
+/* Provide a LIB_SPEC appropriate for svr4. Here we tack on the default
+ standard C library (unless we are building a shared library). */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{!symbolic:-lc}}"
+
+/* Provide an ENDFILE_SPEC appropriate for svr4. Here we tack on our own
+ magical crtend.o file (see crtstuff.c) which provides part of the
+ support for getting C++ file-scope static object constructed before
+ entering `main', followed by the normal svr3/svr4 "finalizer" file,
+ which is either `gcrtn.o' or `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o%s}%{!pg:crtn.o%s}"
+
+/* Provide a LINK_SPEC appropriate for svr4. Here we provide support
+ for the special GCC options -static, -shared, and -symbolic which
+ allow us to link things in one of these three modes by applying the
+ appropriate combinations of options at link-time. We also provide
+ support here for as many of the other svr4 linker options as seems
+ reasonable, given that some of them conflict with options for other
+ svr4 tools (e.g. the assembler). In particular, we do support the
+ -z*, -V, -b, -t, -Qy, -Qn, and -YP* options here, and the -e*, -l*,
+ -o*, -r, -s, -u*, and -L* options are directly supported by gcc.c
+ itself. We don't directly support the -m (generate load map)
+ option because that conflicts with the -m (run m4) option of the
+ svr4 assembler. We also don't directly support the svr4 linker's
+ -I* or -M* options because these conflict with existing GCC
+ options. We do however allow passing arbitrary options to the svr4
+ linker via the -Wl, option, in gcc.c. We don't support the svr4
+ linker's -a option at all because it is totally useless and because
+ it conflicts with GCC's own -a option.
+
+ Note that gcc doesn't allow a space to follow -Y in a -YP,* option.
+
+ When the -G link option is used (-shared and -symbolic) a final link is
+ not being done. */
+
+#undef LINK_SPEC
+/* APPLE LOCAL begin mainline 4.3 2006-12-13 CROSS_DIRECTORY_STRUCTURE 4697325 */
+#ifdef CROSS_DIRECTORY_STRUCTURE
+/* APPLE LOCAL end mainline 4.3 2006-12-13 CROSS_DIRECTORY_STRUCTURE 4697325 */
+#define LINK_SPEC "%{h*} %{v:-V} \
+ %{b} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy -z text} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{Qy:} %{!Qn:-Qy}"
+#else
+#define LINK_SPEC "%{h*} %{v:-V} \
+ %{b} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy -z text} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ccs/lib:/usr/lib}} \
+ %{Qy:} %{!Qn:-Qy}"
+#endif
+
+/* Gcc automatically adds in one of the files /usr/ccs/lib/values-Xc.o
+ or /usr/ccs/lib/values-Xa.o for each final link step (depending
+ upon the other gcc options selected, such as -ansi). These files
+ each contain one (initialized) copy of a special variable called
+ `_lib_version'. Each one of these files has `_lib_version' initialized
+ to a different (enum) value. The SVR4 library routines query the
+ value of `_lib_version' at run to decide how they should behave.
+ Specifically, they decide (based upon the value of `_lib_version')
+ if they will act in a strictly ANSI conforming manner or not. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}}}\
+ %{pg:gcrti.o%s}%{!pg:crti.o%s} \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi:values-Xa.o%s} \
+ crtbegin.o%s"
+
+/* The numbers used to denote specific machine registers in the System V
+ Release 4 DWARF debugging information are quite likely to be totally
+ different from the numbers used in BSD stabs debugging information
+ for the same kind of target machine. Thus, we undefine the macro
+ DBX_REGISTER_NUMBER here as an extra inducement to get people to
+ provide proper machine-specific definitions of DBX_REGISTER_NUMBER
+ (which is also used to provide DWARF registers numbers in dwarfout.c)
+ in their tm.h files which include this file. */
+
+#undef DBX_REGISTER_NUMBER
+
+/* Define the actual types of some ANSI-mandated types. (These
+ definitions should work for most SVR4 systems). */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define TARGET_POSIX_IO
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-darwin b/gcc-4.2.1-5666.3/gcc/config/t-darwin
new file mode 100644
index 000000000..5a2dcd35c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-darwin
@@ -0,0 +1,53 @@
+# APPLE LOCAL constant CFStrings
+darwin.o: $(HASHTAB_H) toplev.h
+
+# APPLE LOCAL begin pragma fenv
+# APPLE LOCAL begin optimization pragmas 3124235/3420242
+darwin-c.o: options.h opts.h
+# APPLE LOCAL end optimization pragmas 3124235/3420242
+# APPLE LOCAL end pragma fenv
+
+darwin.o: $(srcdir)/config/darwin.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(RTL_H) $(REGS_H) hard-reg-set.h $(REAL_H) insn-config.h \
+ conditions.h insn-flags.h output.h insn-attr.h flags.h $(TREE_H) expr.h \
+ reload.h function.h $(GGC_H) langhooks.h $(TARGET_H) $(TM_P_H) gt-darwin.h \
+ config/darwin-sections.def
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/darwin.c
+
+darwin-c.o: $(srcdir)/config/darwin-c.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(CPPLIB_H) $(TREE_H) c-pragma.h $(C_TREE_H) toplev.h $(TM_P_H) \
+ c-incpath.h flags.h $(C_COMMON_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/darwin-c.c $(PREPROCESSOR_DEFINES)
+
+gt-darwin.h : s-gtype ; @true
+
+# APPLE LOCAL begin mainline 2007-06-14 5235474
+darwin-driver.o: $(srcdir)/config/darwin-driver.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(GCC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/darwin-driver.c
+# APPLE LOCAL end mainline 2007-06-14 5235474
+
+ # APPLE LOCAL begin 4505290
+# How to build crt3.o
+EXTRA_MULTILIB_PARTS=crt3.o
+# Pass -fno-tree-dominator-opts to work around bug 26840.
+$(T)crt3$(objext): $(srcdir)/config/darwin-crt3.c $(GCC_PASSES) \
+ $(TCONFIG_H) stmp-int-hdrs tsystem.h
+ # APPLE LOCAL use -mlongcall for large text support
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -mlongcall \
+ -fno-tree-dominator-opts $(DARWIN_EXTRA_CRT_BUILD_CFLAGS) \
+ $(DARWIN_EXTRA_CRT_BUILD_CFLAGS) \
+ -c $(srcdir)/config/darwin-crt3.c -o $(T)crt3$(objext)
+
+ # APPLE LOCAL end 4505290
+# Use unwind-dw2-fde-darwin
+LIB2ADDEH = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde-darwin.c \
+ $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
+LIB2ADDEHDEP = unwind.inc unwind-dw2-fde.h unwind-dw2-fde.c
+
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+TARGET_LIBGCC2_CFLAGS = -fPIC -pipe
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-dfprules b/gcc-4.2.1-5666.3/gcc/config/t-dfprules
new file mode 100644
index 000000000..6bf6246d8
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-dfprules
@@ -0,0 +1,10 @@
+# Use DFP_ENABLE to build decimal floating point support routines for
+# all decimal floating point types (32-bit, 64-bit and 128-bit). We
+# use `true' for clarity, but any value will do.
+#
+DFP_ENABLE = true
+
+# DFP_CFLAGS can be used to pass target-specific CFLAGS when compiling
+# dfp-bit.c. This is useful for overriding the definition of macros.
+#
+# DFP_CFLAGS = -DFOO=bar
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-freebsd b/gcc-4.2.1-5666.3/gcc/config/t-freebsd
new file mode 100644
index 000000000..211dbdf6b
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-freebsd
@@ -0,0 +1,5 @@
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = -fPIC
+
+# Compile libgcc.a with pic.
+TARGET_LIBGCC2_CFLAGS += -fPIC
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-freebsd-thread b/gcc-4.2.1-5666.3/gcc/config/t-freebsd-thread
new file mode 100644
index 000000000..6e5c64f78
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-freebsd-thread
@@ -0,0 +1,2 @@
+# This is currently needed to compile libgcc2 for threads support
+TARGET_LIBGCC2_CFLAGS += -pthread
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-gnu b/gcc-4.2.1-5666.3/gcc/config/t-gnu
new file mode 100644
index 000000000..7be5d00a7
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-gnu
@@ -0,0 +1,2 @@
+# In GNU, "/usr" is a four-letter word.
+NATIVE_SYSTEM_HEADER_DIR = /include
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-libc-ok b/gcc-4.2.1-5666.3/gcc/config/t-libc-ok
new file mode 100644
index 000000000..4dae812ae
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-libc-ok
@@ -0,0 +1 @@
+CRTSTUFF_T_CFLAGS_S=-fPIC
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-libgcc-pic b/gcc-4.2.1-5666.3/gcc/config/t-libgcc-pic
new file mode 100644
index 000000000..ff935fe1e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-libgcc-pic
@@ -0,0 +1,2 @@
+# Compile libgcc2.a with pic.
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-libunwind b/gcc-4.2.1-5666.3/gcc/config/t-libunwind
new file mode 100644
index 000000000..121ce2e47
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-libunwind
@@ -0,0 +1,12 @@
+# Use the system libunwind library.
+#
+# Override the default value from t-slibgcc-elf-ver and mention -lunwind
+# so that the resulting libgcc_s.so has the necessary DT_NEEDED entry for
+# libunwind.
+SHLIB_LC = -lunwind -lc
+LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c \
+ $(srcdir)/unwind-compat.c $(srcdir)/unwind-dw2-fde-compat.c
+LIB2ADDEHSTATIC = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c
+
+T_CFLAGS += -DUSE_LIBUNWIND_EXCEPTIONS
+TARGET_LIBGCC2_CFLAGS += -DUSE_GAS_SYMVER
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-libunwind-elf b/gcc-4.2.1-5666.3/gcc/config/t-libunwind-elf
new file mode 100644
index 000000000..1fa8aa252
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-libunwind-elf
@@ -0,0 +1,31 @@
+# Build libunwind for ELF with the GNU linker.
+
+# Use unwind-dw2-fde-glibc
+LIBUNWIND = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde-glibc.c
+LIBUNWINDDEP = unwind.inc unwind-dw2-fde.h unwind-dw2-fde.c
+
+SHLIBUNWIND_SOVERSION = 7
+SHLIBUNWIND_SONAME = @shlib_base_name@.so.$(SHLIBUNWIND_SOVERSION)
+
+SHLIBUNWIND_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared \
+ -nodefaultlibs -Wl,-h,$(SHLIBUNWIND_SONAME) \
+ -Wl,-z,text -Wl,-z,defs -o $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME).tmp \
+ @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ rm -f $(SHLIB_DIR)/$(SHLIB_SOLINK) && \
+ if [ -f $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME) ]; then \
+ mv -f $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME) \
+ $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME).tmp \
+ $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME) && \
+ $(LN_S) $(SHLIBUNWIND_SONAME) $(SHLIB_DIR)/$(SHLIB_SOLINK)
+
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIBUNWIND_INSTALL = \
+ $$(SHELL) $$(srcdir)/mkinstalldirs $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL_DATA) $(SHLIB_DIR)/$(SHLIBUNWIND_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIBUNWIND_SONAME); \
+ rm -f $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
+ $(LN_S) $(SHLIBUNWIND_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-linux b/gcc-4.2.1-5666.3/gcc/config/t-linux
new file mode 100644
index 000000000..f3e792bcc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-linux
@@ -0,0 +1,13 @@
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = $(CRTSTUFF_T_CFLAGS) -fPIC
+# Compile libgcc2.a with pic.
+TARGET_LIBGCC2_CFLAGS = -fPIC
+
+# Override t-slibgcc-elf-ver to export some libgcc symbols with
+# the symbol versions that glibc used.
+SHLIB_MAPFILES += $(srcdir)/config/libgcc-glibc.ver
+
+# Use unwind-dw2-fde-glibc
+LIB2ADDEH = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde-glibc.c \
+ $(srcdir)/unwind-sjlj.c $(srcdir)/gthr-gnat.c $(srcdir)/unwind-c.c
+LIB2ADDEHDEP = unwind.inc unwind-dw2-fde.h unwind-dw2-fde.c
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-lynx b/gcc-4.2.1-5666.3/gcc/config/t-lynx
new file mode 100644
index 000000000..a14a53c61
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-lynx
@@ -0,0 +1,15 @@
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = -fPIC
+
+# Compile libgcc2.a with pic.
+TARGET_LIBGCC2_CFLAGS = -fPIC
+
+MULTILIB_OPTIONS = mthreads
+MULTILIB_DIRNAMES = thread
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+Local Variables:
+mode: makefile
+End:
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-netbsd b/gcc-4.2.1-5666.3/gcc/config/t-netbsd
new file mode 100644
index 000000000..843e4100d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-netbsd
@@ -0,0 +1,2 @@
+# Always build crtstuff with PIC.
+CRTSTUFF_T_CFLAGS = -fPIC
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-openbsd b/gcc-4.2.1-5666.3/gcc/config/t-openbsd
new file mode 100644
index 000000000..2289f154e
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-openbsd
@@ -0,0 +1,2 @@
+# We don't need GCC's own include files.
+USER_H =
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-openbsd-thread b/gcc-4.2.1-5666.3/gcc/config/t-openbsd-thread
new file mode 100644
index 000000000..4b25f25a4
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-openbsd-thread
@@ -0,0 +1,5 @@
+# This is currently needed to compile libgcc2 for threads support
+TARGET_LIBGCC2_CFLAGS=-pthread
+#T_CFLAGS=-pthread
+#T_CPPFLAGS=-pthread
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-rtems b/gcc-4.2.1-5666.3/gcc/config/t-rtems
new file mode 100644
index 000000000..dfbd3afe9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-rtems
@@ -0,0 +1,7 @@
+# RTEMS always has limits.h.
+LIMITS_H_TEST = true
+
+# If we are building next to newlib, this will let us find the RTEMS
+# limits.h when building libgcc2. Otherwise, newlib must be installed
+# first.
+LIBGCC2_INCLUDES = -I$(srcdir)/../newlib/libc/sys/rtems/include
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-darwin b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-darwin
new file mode 100644
index 000000000..90491df9c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-darwin
@@ -0,0 +1,104 @@
+# APPLE LOCAL file mainline
+# Build a shared libgcc library with the darwin linker.
+SHLIB_SOVERSION = 1
+SHLIB_VERSTRING = -compatibility_version $(SHLIB_SOVERSION) -current_version $(SHLIB_SOVERSION).0
+SHLIB_EXT = .dylib
+SHLIB_SUFFIX = `if test @multilib_dir@ = ppc64 ; then echo _@multilib_dir@ ; fi`
+SHLIB_INSTALL_NAME = @shlib_base_name@$(SHLIB_SUFFIX).$(SHLIB_SOVERSION)$(SHLIB_EXT)
+SHLIB_SONAME = @shlib_base_name@.$(SHLIB_SOVERSION)$(SHLIB_EXT)
+SHLIB_SOLINK = @shlib_base_name@.so
+SHLIB_MAP = @shlib_map_file@
+SHLIB_OBJS = @shlib_objs@
+SHLIB_DIR = @multilib_dir@
+SHLIB_LC = -lc
+
+# Darwin only searches in /usr/lib for shared libraries, not in subdirectories,
+# so the libgcc variants have different names not different locations.
+# Note that this version is used for the loader, not the linker; the linker
+# uses the stub versions named by $(LIBGCC).
+# APPLE LOCAL begin mainline
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -dynamiclib -nodefaultlibs \
+ -install_name $(slibdir)/$(SHLIB_INSTALL_NAME) \
+ -single_module -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp \
+ -Wl,-exported_symbols_list,$(SHLIB_MAP) \
+ $(SHLIB_VERSTRING) \
+ @multilib_flags@ $(SHLIB_OBJS) $(SHLIB_LC)
+# APPLE LOCAL end mainline
+
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir); \
+ $(INSTALL_DATA) $(SHLIB_SONAME) \
+ $$(DESTDIR)$$(slibdir)/$(SHLIB_SONAME)
+
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+SHLIB_MKMAP_OPTS = -v leading_underscore=1
+SHLIB_MAPFILES += $(srcdir)/libgcc-std.ver
+
+# Must use a different directive for hidden visibility in assembly sources.
+ASM_HIDDEN_OP = .private_extern
+
+# In order to support -mmacosx-version-min, you need to have multiple
+# different libgcc_s libraries that actually get linked against, one for
+# each system version supported. They are 'stub' libraries that
+# contain no code, just a list of exported symbols.
+# The actual use of the libraries is controlled by REAL_LIBGCC_SPEC.
+#
+# This assumes each multilib corresponds to a different architecture.
+libgcc_s.%.dylib : $(SHLIB_VERPFX).%.ver stmp-multilib
+ # When building multilibbed target libraries, all the required
+ # libraries are expected to exist in the multilib directory.
+ MLIBS=`$(GCC_FOR_TARGET) --print-multi-lib \
+ | sed -e 's/;.*$$//' -e '/^\.$$/d'` ; \
+ for mlib in $$MLIBS ; do \
+ rm -f $${mlib}/$@ || exit 1 ; \
+ $(LN_S) ../$@ $${mlib}/$@ || exit 1 ; \
+ done
+ MLIBS=`$(GCC_FOR_TARGET) --print-multi-lib \
+ | sed -e 's/;.*$$//' -e '/^\.$$/d'` ; \
+ for mlib in '' $$MLIBS ; do \
+ $(STRIP_FOR_TARGET) -o $(@)_T$${mlib} \
+ -s $(SHLIB_VERPFX).$(*).ver -c -u \
+ ./$${mlib}/libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT).tmp || exit 1 ; \
+ done
+ $(LIPO_FOR_TARGET) -output $@ -create $(@)_T*
+ rm $(@)_T*
+
+libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT): stmp-multilib libgcc_s.10.4.dylib \
+ libgcc_s.10.5.dylib
+ # When building multilibbed target libraries, all the required
+ # libraries are expected to exist in the multilib directory.
+ MLIBS=`$(GCC_FOR_TARGET) --print-multi-lib \
+ | sed -e 's/;.*$$//' -e '/^\.$$/d'` ; \
+ for mlib in '' $$MLIBS ; do \
+ cp ./$${mlib}/libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT).tmp \
+ ./libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT)_T_$${mlib} || exit 1 ; \
+ done
+ $(LIPO_FOR_TARGET) -output libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT) \
+ -create libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT)_T*
+ rm libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT)_T*
+
+# From the point-of-view of the Makefiles, libgcc is built by the 'strip'
+# and 'lipo' commands above.
+LIBGCC=libgcc_s.$(SHLIB_SOVERSION)$(SHLIB_EXT) libgcc_s.10.4.dylib \
+ libgcc_s.10.5.dylib stmp-multilib
+
+INSTALL_FILES=libgcc_s.10.4.dylib libgcc_s.10.5.dylib libgcc_s.1.dylib
+
+install-darwin-libgcc-stubs : $(INSTALL_FILES) installdirs
+ for d in $(INSTALL_FILES) ; do \
+ $(INSTALL_DATA) $$d $(DESTDIR)$(slibdir)/$$d || exit 1 ; \
+ done
+ if [ -f $(DESTDIR)$(slibdir)/libgcc_s_ppc64.1.dylib ]; then \
+ rm -f $(DESTDIR)$(slibdir)/libgcc_s_ppc64.1.dylib; \
+ else true; fi
+ $(LN_S) libgcc_s.1.dylib \
+ $(DESTDIR)$(slibdir)/libgcc_s_ppc64.1.dylib
+ if [ -f $(DESTDIR)$(slibdir)/libgcc_s_x86_64.1.dylib ]; then \
+ rm -f $(DESTDIR)$(slibdir)/libgcc_s_x86_64.1.dylib; \
+ else true; fi
+ $(LN_S) libgcc_s.1.dylib \
+ $(DESTDIR)$(slibdir)/libgcc_s_x86_64.1.dylib
+
+INSTALL_LIBGCC = install-darwin-libgcc-stubs install-multilib
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-elf-ver b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-elf-ver
new file mode 100644
index 000000000..346143c5c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-elf-ver
@@ -0,0 +1,36 @@
+# Build a shared libgcc library for ELF with symbol versioning
+# with the GNU linker.
+
+SHLIB_EXT = .so
+SHLIB_SOLINK = @shlib_base_name@.so
+SHLIB_SOVERSION = 1
+SHLIB_SONAME = @shlib_base_name@.so.$(SHLIB_SOVERSION)
+SHLIB_MAP = @shlib_map_file@
+SHLIB_OBJS = @shlib_objs@
+SHLIB_DIR = @multilib_dir@
+SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
+SHLIB_LC = -lc
+
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,--soname=$(SHLIB_SONAME) \
+ -Wl,--version-script=$(SHLIB_MAP) \
+ -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp @multilib_flags@ \
+ $(SHLIB_OBJS) $(SHLIB_LC) && \
+ rm -f $(SHLIB_DIR)/$(SHLIB_SOLINK) && \
+ if [ -f $(SHLIB_DIR)/$(SHLIB_SONAME) ]; then \
+ mv -f $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $(SHLIB_DIR)/$(SHLIB_SONAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_DIR)/$(SHLIB_SONAME).tmp $(SHLIB_DIR)/$(SHLIB_SONAME) && \
+ $(LN_S) $(SHLIB_SONAME) $(SHLIB_DIR)/$(SHLIB_SOLINK)
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL_DATA) $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SONAME); \
+ rm -f $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
+ $(LN_S) $(SHLIB_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
+SHLIB_MKMAP = $(srcdir)/mkmap-symver.awk
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-nolc-override b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-nolc-override
new file mode 100644
index 000000000..959d2cc2a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-nolc-override
@@ -0,0 +1 @@
+SHLIB_LC =
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-sld b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-sld
new file mode 100644
index 000000000..02d21c6de
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-slibgcc-sld
@@ -0,0 +1,32 @@
+# Build a shared libgcc library with the Solaris linker.
+
+SHLIB_EXT = .so
+SHLIB_SOLINK = @shlib_base_name@.so
+SHLIB_SONAME = @shlib_base_name@.so.1
+SHLIB_MAP = @shlib_map_file@
+SHLIB_OBJS = @shlib_objs@
+SHLIB_DIR = @multilib_dir@
+SHLIB_SLIBDIR_QUAL = @shlib_slibdir_qual@
+
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-h,$(SHLIB_SONAME) -Wl,-z,text -Wl,-z,defs \
+ -Wl,-M,$(SHLIB_MAP) -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp \
+ @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ rm -f $(SHLIB_DIR)/$(SHLIB_SOLINK) && \
+ if [ -f $(SHLIB_DIR)/$(SHLIB_SONAME) ]; then \
+ mv -f $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $(SHLIB_DIR)/$(SHLIB_SONAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_DIR)/$(SHLIB_SONAME).tmp $(SHLIB_DIR)/$(SHLIB_SONAME) && \
+ $(LN_S) $(SHLIB_SONAME) $(SHLIB_DIR)/$(SHLIB_SOLINK)
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL); \
+ $(INSTALL_DATA) $(SHLIB_DIR)/$(SHLIB_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SONAME); \
+ rm -f $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK); \
+ $(LN_S) $(SHLIB_SONAME) \
+ $$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
+SHLIB_MKMAP = $(srcdir)/mkmap-symver.awk
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-sol2 b/gcc-4.2.1-5666.3/gcc/config/t-sol2
new file mode 100644
index 000000000..2339a0bc9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-sol2
@@ -0,0 +1,12 @@
+# Solaris-specific format checking and pragmas
+sol2-c.o: $(srcdir)/config/sol2-c.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ tree.h c-format.h intl.h $(CPPLIB_H) c-pragma.h $(TM_H) $(TM_P_H) \
+ toplev.h $(C_COMMON_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/sol2-c.c
+
+# Solaris-specific attributes
+sol2.o: $(srcdir)/config/sol2.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ tree.h $(TM_H) $(TM_P_H) toplev.h $(GGC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/sol2.c
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-svr4 b/gcc-4.2.1-5666.3/gcc/config/t-svr4
new file mode 100644
index 000000000..52c9b28fc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-svr4
@@ -0,0 +1,11 @@
+# We need to use -fPIC when we are using gcc to compile the routines in
+# crtstuff.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fPIC when compiling the
+# routines in crtstuff.c. Likewise for libgcc2.c.
+
+CRTSTUFF_T_CFLAGS = -fPIC
+TARGET_LIBGCC2_CFLAGS = -fPIC
+
+# See all the declarations.
+FIXPROTO_DEFINES = -D_XOPEN_SOURCE
diff --git a/gcc-4.2.1-5666.3/gcc/config/t-vxworks b/gcc-4.2.1-5666.3/gcc/config/t-vxworks
new file mode 100644
index 000000000..677b3b454
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/t-vxworks
@@ -0,0 +1,27 @@
+# Don't run fixproto.
+STMP_FIXPROTO =
+
+# Build libgcc using the multilib mechanism
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# No special flags needed for libgcc.a
+TARGET_LIBGCC2_CFLAGS =
+
+# Don't build libgcc.a with debug info
+LIBGCC2_DEBUG_CFLAGS =
+
+# Extra libgcc2 module used by gthr-vxworks.h functions
+LIB2FUNCS_EXTRA = $(srcdir)/config/vxlib.c
+
+# Some runtime modules need these. Can't set extra_headers in config.gcc
+# because the paths are always made absolute to the cpu config dir.
+EXTRA_HEADERS += $(srcdir)/gthr-vxworks.h gthr-default.h
+
+# This ensures that the correct target headers are used; some
+# VxWorks system headers have names that collide with GCC's
+# internal (host) headers, e.g. regs.h.
+# FIXME: May not be necessary anymore.
+LIBGCC2_INCLUDES="-I$(SYSTEM_HEADER_DIR)"
+
+EXTRA_MULTILIB_PARTS =
diff --git a/gcc-4.2.1-5666.3/gcc/config/tm-dwarf2.h b/gcc-4.2.1-5666.3/gcc/config/tm-dwarf2.h
new file mode 100644
index 000000000..d08646ecc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/tm-dwarf2.h
@@ -0,0 +1,4 @@
+/* Enable Dwarf2 debugging and make it the default */
+#define DWARF2_DEBUGGING_INFO 1
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
diff --git a/gcc-4.2.1-5666.3/gcc/config/udivmod.c b/gcc-4.2.1-5666.3/gcc/config/udivmod.c
new file mode 100644
index 000000000..1395e9cc9
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/udivmod.c
@@ -0,0 +1,14 @@
+long udivmodsi4 ();
+
+long
+__udivsi3 (long a, long b)
+{
+ return udivmodsi4 (a, b, 0);
+}
+
+long
+__umodsi3 (long a, long b)
+{
+ return udivmodsi4 (a, b, 1);
+}
+
diff --git a/gcc-4.2.1-5666.3/gcc/config/udivmodsi4.c b/gcc-4.2.1-5666.3/gcc/config/udivmodsi4.c
new file mode 100644
index 000000000..83c2340c2
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/udivmodsi4.c
@@ -0,0 +1,24 @@
+unsigned long
+udivmodsi4(unsigned long num, unsigned long den, int modwanted)
+{
+ unsigned long bit = 1;
+ unsigned long res = 0;
+
+ while (den < num && bit && !(den & (1L<<31)))
+ {
+ den <<=1;
+ bit <<=1;
+ }
+ while (bit)
+ {
+ if (num >= den)
+ {
+ num -= den;
+ res |= bit;
+ }
+ bit >>=1;
+ den >>=1;
+ }
+ if (modwanted) return num;
+ return res;
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/usegas.h b/gcc-4.2.1-5666.3/gcc/config/usegas.h
new file mode 100644
index 000000000..8e1b47a90
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/usegas.h
@@ -0,0 +1,21 @@
+/* Copyright (C) 2001 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* Just set a single flag we can test for it inside other files. */
+#define USE_GAS 1
diff --git a/gcc-4.2.1-5666.3/gcc/config/vx-common.h b/gcc-4.2.1-5666.3/gcc/config/vx-common.h
new file mode 100644
index 000000000..cb8d7a080
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/vx-common.h
@@ -0,0 +1,88 @@
+/* Target-independent configuration for VxWorks and VxWorks AE.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+ Contributed by by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* VxWorks headers are C++-aware. */
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C
+
+/* Most of these will probably be overridden by subsequent headers. We
+ undefine them here just in case, and define VXWORKS_ versions of each,
+ to be used in port-specific vxworks.h. */
+#undef LIB_SPEC
+#undef LINK_SPEC
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC VXWORKS_LIBGCC_SPEC
+#undef STARTFILE_SPEC
+#undef ENDFILE_SPEC
+
+/* Most of these macros are overridden in "config/vxworks.h" or
+ "config/vxworksae.h" and are here merely for documentation
+ purposes. */
+#define VXWORKS_ADDITIONAL_CPP_SPEC ""
+#define VXWORKS_LIB_SPEC ""
+#define VXWORKS_LINK_SPEC ""
+#define VXWORKS_LIBGCC_SPEC ""
+#define VXWORKS_STARTFILE_SPEC ""
+#define VXWORKS_ENDFILE_SPEC ""
+
+/* VxWorks cannot have dots in constructor labels, because it uses a
+ mutant variation of collect2 that generates C code instead of
+ assembly. Thus each constructor label must be a legitimate C
+ symbol. FIXME: Have VxWorks use real collect2 instead. */
+#undef NO_DOLLAR_IN_LABEL
+#define NO_DOT_IN_LABEL
+
+/* VxWorks uses wchar_t == unsigned short (UCS2) on all architectures. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* Dwarf2 unwind info is not supported. */
+#define DWARF2_UNWIND_INFO 0
+
+/* VxWorks uses DWARF2. */
+#define DWARF2_DEBUGGING_INFO 1
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* None of these other formats is supported. */
+#undef DWARF_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#undef SDB_DEBUGGING_INFO
+#undef XCOFF_DEBUGGING_INFO
+#undef VMS_DEBUGGING_INFO
+
+/* Kernel mode doesn't have ctors/dtors, but RTP mode does. */
+#define TARGET_HAVE_CTORS_DTORS false
+#define VXWORKS_OVERRIDE_OPTIONS /* empty */
+
+/* No math library needed. */
+#define MATH_LIBRARY ""
+
+/* No profiling. */
+#define VXWORKS_FUNCTION_PROFILER(FILE, LABELNO) do \
+{ \
+ sorry ("profiler support for VxWorks"); \
+} while (0)
+
+/* We occasionally need to distinguish between the VxWorks variants. */
+#define VXWORKS_KIND_NORMAL 1
+#define VXWORKS_KIND_AE 2
diff --git a/gcc-4.2.1-5666.3/gcc/config/vxlib.c b/gcc-4.2.1-5666.3/gcc/config/vxlib.c
new file mode 100644
index 000000000..8625981a3
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/vxlib.c
@@ -0,0 +1,386 @@
+/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Contributed by Zack Weinberg <zack@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* Threads compatibility routines for libgcc2 for VxWorks.
+ These are out-of-line routines called from gthr-vxworks.h. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "gthr.h"
+
+#if defined(__GTHREADS)
+#include <vxWorks.h>
+#ifndef __RTP__
+#include <vxLib.h>
+#endif
+#include <taskLib.h>
+#ifndef __RTP__
+#include <taskHookLib.h>
+#else
+# include <errno.h>
+#endif
+
+/* Init-once operation.
+
+ This would be a clone of the implementation from gthr-solaris.h,
+ except that we have a bootstrap problem - the whole point of this
+ exercise is to prevent double initialization, but if two threads
+ are racing with each other, once->mutex is liable to be initialized
+ by both. Then each thread will lock its own mutex, and proceed to
+ call the initialization routine.
+
+ So instead we use a bare atomic primitive (vxTas()) to handle
+ mutual exclusion. Threads losing the race then busy-wait, calling
+ taskDelay() to yield the processor, until the initialization is
+ completed. Inefficient, but reliable. */
+
+int
+__gthread_once (__gthread_once_t *guard, void (*func)(void))
+{
+ if (guard->done)
+ return 0;
+
+#ifdef __RTP__
+ __gthread_lock_library ();
+#else
+ while (!vxTas ((void *)&guard->busy))
+ taskDelay (1);
+#endif
+
+ /* Only one thread at a time gets here. Check ->done again, then
+ go ahead and call func() if no one has done it yet. */
+ if (!guard->done)
+ {
+ func ();
+ guard->done = 1;
+ }
+
+#ifdef __RTP__
+ __gthread_unlock_library ();
+#else
+ guard->busy = 0;
+#endif
+ return 0;
+}
+
+/* Thread-local storage.
+
+ We reserve a field in the TCB to point to a dynamically allocated
+ array which is used to store TLS values. A TLS key is simply an
+ offset in this array. The exact location of the TCB field is not
+ known to this code nor to vxlib.c -- all access to it indirects
+ through the routines __gthread_get_tls_data and
+ __gthread_set_tls_data, which are provided by the VxWorks kernel.
+
+ There is also a global array which records which keys are valid and
+ which have destructors.
+
+ A task delete hook is installed to execute key destructors. The
+ routines __gthread_enter_tls_dtor_context and
+ __gthread_leave_tls_dtor_context, which are also provided by the
+ kernel, ensure that it is safe to call free() on memory allocated
+ by the task being deleted. (This is a no-op on VxWorks 5, but
+ a major undertaking on AE.)
+
+ The task delete hook is only installed when at least one thread
+ has TLS data. This is a necessary precaution, to allow this module
+ to be unloaded - a module with a hook can not be removed.
+
+ Since this interface is used to allocate only a small number of
+ keys, the table size is small and static, which simplifies the
+ code quite a bit. Revisit this if and when it becomes necessary. */
+
+#define MAX_KEYS 4
+
+/* This is the structure pointed to by the pointer returned
+ by __gthread_get_tls_data. */
+struct tls_data
+{
+ int *owner;
+ void *values[MAX_KEYS];
+ unsigned int generation[MAX_KEYS];
+};
+
+/* To make sure we only delete TLS data associated with this object,
+ include a pointer to a local variable in the TLS data object. */
+static int self_owner;
+
+/* The number of threads for this module which have active TLS data.
+ This is protected by tls_lock. */
+static int active_tls_threads;
+
+/* kernel provided routines */
+extern void *__gthread_get_tls_data (void);
+extern void __gthread_set_tls_data (void *data);
+
+extern void __gthread_enter_tls_dtor_context (void);
+extern void __gthread_leave_tls_dtor_context (void);
+
+
+/* This is a global structure which records all of the active keys.
+
+ A key is potentially valid (i.e. has been handed out by
+ __gthread_key_create) iff its generation count in this structure is
+ even. In that case, the matching entry in the dtors array is a
+ routine to be called when a thread terminates with a valid,
+ non-NULL specific value for that key.
+
+ A key is actually valid in a thread T iff the generation count
+ stored in this structure is equal to the generation count stored in
+ T's specific-value structure. */
+
+typedef void (*tls_dtor) (void *);
+
+struct tls_keys
+{
+ tls_dtor dtor[MAX_KEYS];
+ unsigned int generation[MAX_KEYS];
+};
+
+#define KEY_VALID_P(key) !(tls_keys.generation[key] & 1)
+
+/* Note: if MAX_KEYS is increased, this initializer must be updated
+ to match. All the generation counts begin at 1, which means no
+ key is valid. */
+static struct tls_keys tls_keys =
+{
+ { 0, 0, 0, 0 },
+ { 1, 1, 1, 1 }
+};
+
+/* This lock protects the tls_keys structure. */
+static __gthread_mutex_t tls_lock;
+
+static __gthread_once_t tls_init_guard = __GTHREAD_ONCE_INIT;
+
+/* Internal routines. */
+
+/* The task TCB has just been deleted. Call the destructor
+ function for each TLS key that has both a destructor and
+ a non-NULL specific value in this thread.
+
+ This routine does not need to take tls_lock; the generation
+ count protects us from calling a stale destructor. It does
+ need to read tls_keys.dtor[key] atomically. */
+
+static void
+tls_delete_hook (void *tcb ATTRIBUTE_UNUSED)
+{
+ struct tls_data *data = __gthread_get_tls_data ();
+ __gthread_key_t key;
+
+ if (data && data->owner == &self_owner)
+ {
+ __gthread_enter_tls_dtor_context ();
+ for (key = 0; key < MAX_KEYS; key++)
+ {
+ if (data->generation[key] == tls_keys.generation[key])
+ {
+ tls_dtor dtor = tls_keys.dtor[key];
+
+ if (dtor)
+ dtor (data->values[key]);
+ }
+ }
+ free (data);
+
+ /* We can't handle an error here, so just leave the thread
+ marked as loaded if one occurs. */
+ if (__gthread_mutex_lock (&tls_lock) != ERROR)
+ {
+ active_tls_threads--;
+ if (active_tls_threads == 0)
+ taskDeleteHookDelete ((FUNCPTR)tls_delete_hook);
+ __gthread_mutex_unlock (&tls_lock);
+ }
+
+ __gthread_set_tls_data (0);
+ __gthread_leave_tls_dtor_context ();
+ }
+}
+
+/* Initialize global data used by the TLS system. */
+static void
+tls_init (void)
+{
+ __GTHREAD_MUTEX_INIT_FUNCTION (&tls_lock);
+}
+
+static void tls_destructor (void) __attribute__ ((destructor));
+static void
+tls_destructor (void)
+{
+#ifdef __RTP__
+ /* All threads but this one should have exited by now. */
+ tls_delete_hook (NULL);
+#else
+ /* Unregister the hook forcibly. The counter of active threads may
+ be incorrect, because constructors (like the C++ library's) and
+ destructors (like this one) run in the context of the shell rather
+ than in a task spawned from this module. */
+ taskDeleteHookDelete ((FUNCPTR)tls_delete_hook);
+#endif
+
+ if (tls_init_guard.done && __gthread_mutex_lock (&tls_lock) != ERROR)
+ semDelete (tls_lock);
+}
+
+/* External interface */
+
+/* Store in KEYP a value which can be passed to __gthread_setspecific/
+ __gthread_getspecific to store and retrieve a value which is
+ specific to each calling thread. If DTOR is not NULL, it will be
+ called when a thread terminates with a non-NULL specific value for
+ this key, with the value as its sole argument. */
+
+int
+__gthread_key_create (__gthread_key_t *keyp, tls_dtor dtor)
+{
+ __gthread_key_t key;
+
+ __gthread_once (&tls_init_guard, tls_init);
+
+ if (__gthread_mutex_lock (&tls_lock) == ERROR)
+ return errno;
+
+ for (key = 0; key < MAX_KEYS; key++)
+ if (!KEY_VALID_P (key))
+ goto found_slot;
+
+ /* no room */
+ __gthread_mutex_unlock (&tls_lock);
+ return EAGAIN;
+
+ found_slot:
+ tls_keys.generation[key]++; /* making it even */
+ tls_keys.dtor[key] = dtor;
+ *keyp = key;
+ __gthread_mutex_unlock (&tls_lock);
+ return 0;
+}
+
+/* Invalidate KEY; it can no longer be used as an argument to
+ setspecific/getspecific. Note that this does NOT call destructor
+ functions for any live values for this key. */
+int
+__gthread_key_delete (__gthread_key_t key)
+{
+ if (key >= MAX_KEYS)
+ return EINVAL;
+
+ __gthread_once (&tls_init_guard, tls_init);
+
+ if (__gthread_mutex_lock (&tls_lock) == ERROR)
+ return errno;
+
+ if (!KEY_VALID_P (key))
+ {
+ __gthread_mutex_unlock (&tls_lock);
+ return EINVAL;
+ }
+
+ tls_keys.generation[key]++; /* making it odd */
+ tls_keys.dtor[key] = 0;
+
+ __gthread_mutex_unlock (&tls_lock);
+ return 0;
+}
+
+/* Retrieve the thread-specific value for KEY. If it has never been
+ set in this thread, or KEY is invalid, returns NULL.
+
+ It does not matter if this function races with key_create or
+ key_delete; the worst that can happen is you get a value other than
+ the one that a serialized implementation would have provided. */
+
+void *
+__gthread_getspecific (__gthread_key_t key)
+{
+ struct tls_data *data;
+
+ if (key >= MAX_KEYS)
+ return 0;
+
+ data = __gthread_get_tls_data ();
+
+ if (!data)
+ return 0;
+
+ if (data->generation[key] != tls_keys.generation[key])
+ return 0;
+
+ return data->values[key];
+}
+
+/* Set the thread-specific value for KEY. If KEY is invalid, or
+ memory allocation fails, returns -1, otherwise 0.
+
+ The generation count protects this function against races with
+ key_create/key_delete; the worst thing that can happen is that a
+ value is successfully stored into a dead generation (and then
+ immediately becomes invalid). However, we do have to make sure
+ to read tls_keys.generation[key] atomically. */
+
+int
+__gthread_setspecific (__gthread_key_t key, void *value)
+{
+ struct tls_data *data;
+ unsigned int generation;
+
+ if (key >= MAX_KEYS)
+ return EINVAL;
+
+ data = __gthread_get_tls_data ();
+ if (!data)
+ {
+ if (__gthread_mutex_lock (&tls_lock) == ERROR)
+ return ENOMEM;
+ if (active_tls_threads == 0)
+ taskDeleteHookAdd ((FUNCPTR)tls_delete_hook);
+ active_tls_threads++;
+ __gthread_mutex_unlock (&tls_lock);
+
+ data = malloc (sizeof (struct tls_data));
+ if (!data)
+ return ENOMEM;
+
+ memset (data, 0, sizeof (struct tls_data));
+ data->owner = &self_owner;
+ __gthread_set_tls_data (data);
+ }
+
+ generation = tls_keys.generation[key];
+
+ if (generation & 1)
+ return EINVAL;
+
+ data->generation[key] = generation;
+ data->values[key] = value;
+
+ return 0;
+}
+#endif /* __GTHREADS */
diff --git a/gcc-4.2.1-5666.3/gcc/config/vxworks.h b/gcc-4.2.1-5666.3/gcc/config/vxworks.h
new file mode 100644
index 000000000..70345e246
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/vxworks.h
@@ -0,0 +1,103 @@
+/* Common VxWorks target definitions for GNU compiler.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+ Contributed by Wind River Systems.
+ Rewritten by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* In kernel mode, VxWorks provides all the libraries itself, as well as
+ the functionality of startup files, etc. In RTP mode, it behaves more
+ like a traditional Unix, with more external files. Most of our specs
+ must be aware of the difference. */
+
+/* The directory containing the VxWorks target headers. */
+#define VXWORKS_TARGET_DIR "/home/tornado/base6/target"
+
+/* Since we provide a default -isystem, expand -isystem on the command
+ line early. */
+#undef VXWORKS_ADDITIONAL_CPP_SPEC
+#define VXWORKS_ADDITIONAL_CPP_SPEC " \
+ %{!nostdinc:%{isystem*}} \
+ %{mrtp: -D__RTP__=1 \
+ %{!nostdinc:-isystem " VXWORKS_TARGET_DIR "/usr/h}} \
+ %{!mrtp:-D_WRS_KERNEL=1 \
+ %{!nostdinc:-isystem " VXWORKS_TARGET_DIR "/h}}"
+
+/* The references to __init and __fini will be satisfied by
+ libc_internal.a. */
+#undef VXWORKS_LIB_SPEC
+#define VXWORKS_LIB_SPEC \
+"%{mrtp:%{shared:-u " USER_LABEL_PREFIX "__init -u " USER_LABEL_PREFIX "__fini} \
+ %{!shared:%{non-static:-u " USER_LABEL_PREFIX "_STI__6__rtld -ldl} \
+ --start-group -lc -lgcc -lc_internal -lnet -ldsi \
+ --end-group}}"
+
+/* The no-op spec for "-shared" below is present because otherwise GCC
+ will treat it as an unrecognized option. */
+#undef VXWORKS_LINK_SPEC
+#define VXWORKS_LINK_SPEC \
+"%{!mrtp:-r} \
+ %{!shared: \
+ %{mrtp:-q %{h*} \
+ %{R*} %{!Wl,-T*: %{!T*: %(link_start) }} \
+ %(link_target) %(link_os)}} \
+ %{v:-V} \
+ %{shared:-shared} \
+ %{Bstatic:-Bstatic} \
+ %{Bdynamic:-Bdynamic} \
+ %{!Xbind-lazy:-z now} \
+ %{Xbind-now:%{Xbind-lazy: \
+ %e-Xbind-now and -Xbind-lazy are incompatible}} \
+ %{mrtp:%{!shared:%{!non-static:-static} \
+ %{non-static:--force-dynamic --export-dynamic}}}"
+
+/* For VxWorks, the system provides libc_internal.a. This is a superset
+ of libgcc.a; we want to use it. Make sure not to dynamically export
+ any of its symbols, though. Always look for libgcc.a first so that
+ we get the latest versions of the GNU intrinsics during our builds. */
+#undef VXWORKS_LIBGCC_SPEC
+#define VXWORKS_LIBGCC_SPEC \
+ "-lgcc %{mrtp:--exclude-libs=libc_internal,libgcc -lc_internal}"
+
+#undef VXWORKS_STARTFILE_SPEC
+#define VXWORKS_STARTFILE_SPEC "%{mrtp:%{!shared:crt0.o%s}}"
+#define VXWORKS_ENDFILE_SPEC ""
+
+/* We can use .ctors/.dtors sections only in RTP mode.
+ Unfortunately this must be an integer constant expression;
+ fix up in override_options. */
+#undef VXWORKS_OVERRIDE_OPTIONS
+#define VXWORKS_OVERRIDE_OPTIONS do { \
+ targetm.have_ctors_dtors = TARGET_VXWORKS_RTP; \
+} while (0)
+
+/* The VxWorks runtime uses a clever trick to get the sentinel entry
+ (-1) inserted at the beginning of the .ctors segment. This trick
+ will not work if we ever generate any entries in plain .ctors
+ sections; we must always use .ctors.PRIORITY. */
+#define ALWAYS_NUMBER_CTORS_SECTIONS 1
+
+/* The name of the symbol for the table of GOTs in a particular
+ RTP. */
+#define VXWORKS_GOTT_BASE "__GOTT_BASE__"
+/* The name of the symbol for the index into the table of GOTs for the
+ GOT associated with the current shared library. */
+#define VXWORKS_GOTT_INDEX "__GOTT_INDEX__"
+
+#define VXWORKS_KIND VXWORKS_KIND_NORMAL
diff --git a/gcc-4.2.1-5666.3/gcc/config/vxworks.opt b/gcc-4.2.1-5666.3/gcc/config/vxworks.opt
new file mode 100644
index 000000000..98a44f57a
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/vxworks.opt
@@ -0,0 +1,32 @@
+; Processor-independent options for VxWorks.
+;
+; Copyright (C) 2005 Free Software Foundation, Inc.
+; Contributed by CodeSourcery, LLC.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 2, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING. If not, write to the Free
+; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+; 02110-1301, USA.
+
+mrtp
+Target Report RejectNegative Mask(VXWORKS_RTP) Condition(VXWORKS_KIND == VXWORKS_KIND_NORMAL)
+Assume the VxWorks RTP environment
+
+; VxWorks AE has two modes: kernel mode and vThreads mode. In
+; general, back ends do not actually need to know which mode they're
+; in, so we do not have to set any flags.
+mvthreads
+Target RejectNegative Condition(VXWORKS_KIND == VXWORKS_KIND_AE)
+Assume the VxWorks vThreads environment
diff --git a/gcc-4.2.1-5666.3/gcc/config/vxworksae.h b/gcc-4.2.1-5666.3/gcc/config/vxworksae.h
new file mode 100644
index 000000000..b63192ecb
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/vxworksae.h
@@ -0,0 +1,58 @@
+/* Common VxWorks AE target definitions for GNU compiler.
+ Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* This header should be included after including vx-common.h. */
+
+/* Most of the definitions below this point are versions of the
+ vxworks.h definitions, without the -mrtp bits. */
+
+/* The directory containing the VxWorks AE target headers. */
+#define VXWORKSAE_TARGET_DIR \
+ "/home/tornado/vxworks-ae/latest/target"
+
+/* Include target/vThreads/h or target/h (depending on the compilation
+ mode), and then target/val/h (in either mode). The macros defined
+ are in the user's namespace, but the VxWorks headers require
+ them. */
+#undef VXWORKS_ADDITIONAL_CPP_SPEC
+#define VXWORKS_ADDITIONAL_CPP_SPEC " \
+ %{!nostdinc:%{isystem*}} \
+ %{mvthreads:-DVTHREADS=1 \
+ %{!nostdinc:-isystem " VXWORKSAE_TARGET_DIR "/vThreads/h}} \
+ %{!mvthreads:-DAE653_BUILD=1 \
+ %{!nostdinc:-isystem " VXWORKSAE_TARGET_DIR "/h}} \
+ %{!nostdinc:-isystem " VXWORKSAE_TARGET_DIR "/val/h}"
+
+#undef VXWORKS_LIB_SPEC
+#define VXWORKS_LIB_SPEC ""
+
+#undef VXWORKS_LINK_SPEC
+#define VXWORKS_LINK_SPEC \
+ "-r %{v:-V}"
+
+#undef VXWORKS_LIBGCC_SPEC
+#define VXWORKS_LIBGCC_SPEC \
+ "-lgcc"
+
+#undef VXWORKS_STARTFILE_SPEC
+#define VXWORKS_STARTFILE_SPEC ""
+
+#define VXWORKS_KIND VXWORKS_KIND_AE
diff --git a/gcc-4.2.1-5666.3/gcc/config/windiss.h b/gcc-4.2.1-5666.3/gcc/config/windiss.h
new file mode 100644
index 000000000..0c1eae1de
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/windiss.h
@@ -0,0 +1,38 @@
+/* Support for GCC using WindISS simulator.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+
+/* windiss uses wchar_t == unsigned short (UCS2) on all architectures. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* windiss has wint_t == int */
+#undef WINT_TYPE
+#define WINT_TYPE "int"
+
+/* No profiling. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+{ \
+ sorry ("profiler support for WindISS"); \
+}
diff --git a/gcc-4.2.1-5666.3/gcc/config/x-darwin b/gcc-4.2.1-5666.3/gcc/config/x-darwin
new file mode 100644
index 000000000..c2ffd7d5c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/x-darwin
@@ -0,0 +1,3 @@
+host-darwin.o : $(srcdir)/config/host-darwin.c $(CONFIG_H) $(SYSTEM_H) \
+ coretypes.h toplev.h config/host-darwin.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
diff --git a/gcc-4.2.1-5666.3/gcc/config/x-hpux b/gcc-4.2.1-5666.3/gcc/config/x-hpux
new file mode 100644
index 000000000..fa0c5553c
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/x-hpux
@@ -0,0 +1,4 @@
+host-hpux.o : $(srcdir)/config/host-hpux.c $(CONFIG_H) $(SYSTEM_H) \
+ coretypes.h hosthooks.h hosthooks-def.h $(HOOKS_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/host-hpux.c
diff --git a/gcc-4.2.1-5666.3/gcc/config/x-interix b/gcc-4.2.1-5666.3/gcc/config/x-interix
new file mode 100644
index 000000000..e5393316d
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/x-interix
@@ -0,0 +1,3 @@
+# Existing CC/GCC may not define -D__INTERIX, so need this here.
+# Since we want to configure with _ALL_SOURCE, we need to build that way too
+X_CFLAGS= -D__INTERIX -D_ALL_SOURCE
diff --git a/gcc-4.2.1-5666.3/gcc/config/x-linux b/gcc-4.2.1-5666.3/gcc/config/x-linux
new file mode 100644
index 000000000..e4aa040bc
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/x-linux
@@ -0,0 +1,4 @@
+host-linux.o : $(srcdir)/config/host-linux.c $(CONFIG_H) $(SYSTEM_H) \
+ coretypes.h hosthooks.h hosthooks-def.h $(HOOKS_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/host-linux.c
diff --git a/gcc-4.2.1-5666.3/gcc/config/x-solaris b/gcc-4.2.1-5666.3/gcc/config/x-solaris
new file mode 100644
index 000000000..59c7bf61f
--- /dev/null
+++ b/gcc-4.2.1-5666.3/gcc/config/x-solaris
@@ -0,0 +1,4 @@
+host-solaris.o : $(srcdir)/config/host-solaris.c $(CONFIG_H) $(SYSTEM_H) \
+ coretypes.h hosthooks.h hosthooks-def.h $(HOOKS_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/host-solaris.c